repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hyperopt
|
hyperopt-master/hyperopt/pyll/base.py
|
# file is called AST to not collide with std lib module 'ast'
#
# It provides types to build ASTs in a simple lambda-notation style
#
from future import standard_library
import copy
import logging
import operator
import time
from collections import deque
import networkx as nx
# TODO: move things depending on numpy (among others too) to a library file
import numpy as np
from io import StringIO
standard_library.install_aliases()
logger = logging.getLogger(__name__)
np_versions = list(map(int, np.__version__.split(".")[:2]))
DEFAULT_MAX_PROGRAM_LEN = 100000
class PyllImportError(ImportError):
"""A pyll symbol was not defined in the scope"""
class MissingArgument:
"""Object to represent a missing argument to a function application"""
class SymbolTable:
"""
An object whose methods generally allocate Apply nodes.
_impls is a dictionary containing implementations for those nodes.
>>> self.add(a, b) # -- creates a new 'add' Apply node
>>> self._impl['add'](a, b) # -- this computes a + b
"""
def __init__(self):
# -- list and dict are special because they are Python builtins
self._impls = {
"list": list,
"dict": dict,
"range": range,
"len": len,
"int": int,
"float": float,
"map": map,
"max": max,
"min": min,
"getattr": getattr,
}
def _new_apply(self, name, args, kwargs, o_len, pure):
pos_args = [as_apply(a) for a in args]
named_args = [(k, as_apply(v)) for (k, v) in list(kwargs.items())]
named_args.sort()
return Apply(
name, pos_args=pos_args, named_args=named_args, o_len=o_len, pure=pure
)
def dict(self, *args, **kwargs):
# XXX: figure out len
return self._new_apply("dict", args, kwargs, o_len=None, pure=True)
def int(self, arg):
return self._new_apply("int", [as_apply(arg)], {}, o_len=None, pure=True)
def float(self, arg):
return self._new_apply("float", [as_apply(arg)], {}, o_len=None, pure=True)
def len(self, obj):
return self._new_apply("len", [obj], {}, o_len=None, pure=True)
def list(self, init):
return self._new_apply("list", [as_apply(init)], {}, o_len=None, pure=True)
def map(self, fn, seq, pure=False):
"""
pure - True is assertion that fn does not modify seq[i]
"""
return self._new_apply(
"map", [as_apply(fn), as_apply(seq)], {}, o_len=seq.o_len, pure=pure
)
def range(self, *args):
return self._new_apply("range", args, {}, o_len=None, pure=True)
def max(self, *args):
"""return max of args"""
return self._new_apply(
"max", list(map(as_apply, args)), {}, o_len=None, pure=True
)
def min(self, *args):
"""return min of args"""
return self._new_apply(
"min", list(map(as_apply, args)), {}, o_len=None, pure=True
)
def getattr(self, obj, attr, *args):
return self._new_apply(
"getattr",
[as_apply(obj), as_apply(attr)] + list(map(as_apply, args)),
{},
o_len=None,
pure=True,
)
def _define(self, f, o_len, pure):
name = f.__name__
entry = SymbolTableEntry(self, name, o_len, pure)
setattr(self, name, entry)
self._impls[name] = f
return f
def define(self, f, o_len=None, pure=False):
"""Decorator for adding python functions to self"""
name = f.__name__
if hasattr(self, name):
raise ValueError("Cannot override existing symbol", name)
return self._define(f, o_len, pure)
def define_if_new(self, f, o_len=None, pure=False):
"""Pass silently if f matches the current implementation
for f.__name__"""
name = f.__name__
if hasattr(self, name) and self._impls[name] is not f:
raise ValueError("Cannot redefine existing symbol", name)
return self._define(f, o_len, pure)
def undefine(self, f):
if isinstance(f, str):
name = f
else:
name = f.__name__
del self._impls[name]
delattr(self, name)
def define_pure(self, f):
return self.define(f, o_len=None, pure=True)
def define_info(self, o_len=None, pure=False):
def wrapper(f):
return self.define(f, o_len=o_len, pure=pure)
return wrapper
def inject(self, *args, **kwargs):
"""
Add symbols from self into a dictionary and return the dict.
This is used for import-like syntax: see `import_`.
"""
rval = {}
for k in args:
try:
rval[k] = getattr(self, k)
except AttributeError:
raise PyllImportError(k)
for k, origk in list(kwargs.items()):
try:
rval[k] = getattr(self, origk)
except AttributeError:
raise PyllImportError(origk)
return rval
def import_(self, _globals, *args, **kwargs):
_globals.update(self.inject(*args, **kwargs))
class SymbolTableEntry:
"""A functools.partial-like class for adding symbol table entries."""
def __init__(self, symbol_table, apply_name, o_len, pure):
self.symbol_table = symbol_table
self.apply_name = apply_name
self.o_len = o_len
self.pure = pure
def __call__(self, *args, **kwargs):
return self.symbol_table._new_apply(
self.apply_name, args, kwargs, self.o_len, self.pure
)
scope = SymbolTable()
def as_apply(obj):
"""Smart way of turning object into an Apply"""
if isinstance(obj, Apply):
rval = obj
elif isinstance(obj, tuple):
rval = Apply("pos_args", [as_apply(a) for a in obj], {}, len(obj))
elif isinstance(obj, list):
rval = Apply("pos_args", [as_apply(a) for a in obj], {}, None)
elif isinstance(obj, dict):
items = list(obj.items())
# -- should be fine to allow numbers and simple things
# but think about if it's ok to allow Apply objects
# it messes up sorting at the very least.
items.sort()
if all(isinstance(k, str) for k in obj):
named_args = [(k, as_apply(v)) for (k, v) in items]
rval = Apply("dict", [], named_args, len(named_args))
else:
new_items = [(k, as_apply(v)) for (k, v) in items]
rval = Apply("dict", [as_apply(new_items)], {}, o_len=None)
else:
rval = Literal(obj)
assert isinstance(rval, Apply)
return rval
class Apply:
"""
Represent a symbolic application of a symbol to arguments.
o_len - None or int if the function is guaranteed to return a fixed number
`o_len` of outputs if it returns successfully
pure - True only if the function has no relevant side-effects
"""
def __init__(
self, name, pos_args, named_args, o_len=None, pure=False, define_params=None
):
self.name = name
# -- tuples or arrays -> lists
self.pos_args = list(pos_args)
self.named_args = [[kw, arg] for (kw, arg) in named_args]
# -- o_len is attached this early to support tuple unpacking and
# list coercion.
self.o_len = o_len
self.pure = pure
# -- define_params lets us cope with stuff that may be in the
# SymbolTable on the master but not on the worker.
self.define_params = define_params
assert all(isinstance(v, Apply) for v in pos_args)
assert all(isinstance(v, Apply) for k, v in named_args)
assert all(isinstance(k, str) for k, v in named_args)
def __setstate__(self, state):
self.__dict__.update(state)
# -- On deserialization, update scope if need be.
if self.define_params:
scope.define_if_new(**self.define_params)
def eval(self, memo=None):
"""
Recursively evaluate an expression graph.
This method operates directly on the graph of extended inputs to this
node, making no attempt to modify or optimize the expression graph.
Caveats:
* If there are nodes in the graph that do not represent expressions,
(e.g. nodes that correspond to statement blocks or assertions)
then it's not clear what this routine should do, and you should
probably not call it.
* If there are Lambdas in the graph, this procedure will not evluate
them -- see rec_eval for that.
However, for many cases that are pure expression graphs, this
offers a quick and simple way to evaluate them.
"""
if memo is None:
memo = {}
if id(self) in memo:
return memo[id(self)]
else:
args = [a.eval() for a in self.pos_args]
kwargs = {n: a.eval() for (n, a) in self.named_args}
f = scope._impls[self.name]
memo[id(self)] = rval = f(*args, **kwargs)
return rval
def inputs(self):
# -- this function gets called a lot and it's not 100% safe to cache
# so the if/else is a small optimization
if self.named_args:
rval = self.pos_args + [v for (k, v) in self.named_args]
else:
rval = self.pos_args
return rval
@property
def arg(self):
# XXX: move this introspection to __init__, and change
# the basic data-structure to not use pos_args and named_args.
# XXX: think though... we want the binding to be updated if pos_args
# and named_args is modified... so maybe this is an ok way to do it?
#
# XXX: extend something to deal with Lambda objects instead of
# decorated python functions.
#
# http://docs.python.org/reference/expressions.html#calls
#
binding = {}
fn = scope._impls[self.name]
# XXX does not work for builtin functions
defaults = fn.__defaults__ # right-aligned default values for params
code = fn.__code__
extra_args_ok = bool(code.co_flags & 0x04)
extra_kwargs_ok = bool(code.co_flags & 0x08)
# -- assert that my understanding of calling protocol is correct
try:
if extra_args_ok and extra_kwargs_ok:
assert len(code.co_varnames) >= code.co_argcount + 2
param_names = code.co_varnames[: code.co_argcount + 2]
args_param = param_names[code.co_argcount]
kwargs_param = param_names[code.co_argcount + 1]
pos_params = param_names[: code.co_argcount]
elif extra_kwargs_ok:
assert len(code.co_varnames) >= code.co_argcount + 1
param_names = code.co_varnames[: code.co_argcount + 1]
kwargs_param = param_names[code.co_argcount]
pos_params = param_names[: code.co_argcount]
elif extra_args_ok:
assert len(code.co_varnames) >= code.co_argcount + 1
param_names = code.co_varnames[: code.co_argcount + 1]
args_param = param_names[code.co_argcount]
pos_params = param_names[: code.co_argcount]
else:
assert len(code.co_varnames) >= code.co_argcount
param_names = code.co_varnames[: code.co_argcount]
pos_params = param_names[: code.co_argcount]
except AssertionError:
print("YIKES: MISUNDERSTANDING OF CALL PROTOCOL:")
print(code.co_argcount)
print(code.co_varnames)
print("%x" % code.co_flags)
raise
if extra_args_ok:
binding[args_param] == []
if extra_kwargs_ok:
binding[kwargs_param] == {}
if len(self.pos_args) > code.co_argcount and not extra_args_ok:
raise TypeError("Argument count exceeds number of positional params")
# -- bind positional arguments
for param_i, arg_i in zip(param_names, self.pos_args):
binding[param_i] = arg_i
if extra_args_ok:
# XXX: THIS IS NOT BEING TESTED AND IS OBVIOUSLY BROKEN
# TODO: 'args' does not even exist at this point
binding[args_param].extend(args[code.co_argcount :])
# -- bind keyword arguments
for aname, aval in self.named_args:
try:
pos = pos_params.index(aname)
except ValueError:
if extra_kwargs_ok:
binding[kwargs_param][aname] = aval
continue
else:
raise TypeError("Unrecognized keyword argument", aname)
param = param_names[pos]
if param in binding:
raise TypeError("Duplicate argument for parameter", param)
binding[param] = aval
assert len(binding) <= len(param_names)
if len(binding) < len(param_names):
for p in param_names:
if p not in binding:
binding[p] = MissingArgument
return binding
def set_kwarg(self, name, value):
for ii, (key, val) in enumerate(self.named_args):
if key == name:
self.named_args[ii][1] = as_apply(value)
return
arg = self.arg
if name in arg and arg[name] != MissingArgument:
raise NotImplementedError("change pos arg to kw arg")
else:
self.named_args.append([name, as_apply(value)])
self.named_args.sort()
def clone_from_inputs(self, inputs, o_len="same"):
if len(inputs) != len(self.inputs()):
raise TypeError()
L = len(self.pos_args)
pos_args = list(inputs[:L])
named_args = [
[kw, inputs[L + ii]] for ii, (kw, arg) in enumerate(self.named_args)
]
# -- danger cloning with new inputs can change the o_len
if o_len == "same":
o_len = self.o_len
return self.__class__(self.name, pos_args, named_args, o_len)
def replace_input(self, old_node, new_node):
rval = []
for ii, aa in enumerate(self.pos_args):
if aa is old_node:
self.pos_args[ii] = new_node
rval.append(ii)
for ii, (nn, aa) in enumerate(self.named_args):
if aa is old_node:
self.named_args[ii][1] = new_node
rval.append(ii + len(self.pos_args))
return rval
def pprint(self, ofile, lineno=None, indent=0, memo=None):
if memo is None:
memo = {}
if lineno is None:
lineno = [0]
if self in memo:
print(lineno[0], " " * indent + memo[self], file=ofile)
lineno[0] += 1
else:
memo[self] = self.name + (" [line:%i]" % lineno[0])
print(lineno[0], " " * indent + self.name, file=ofile)
lineno[0] += 1
for arg in self.pos_args:
arg.pprint(ofile, lineno, indent + 2, memo)
for name, arg in self.named_args:
print(lineno[0], " " * indent + " " + name + " =", file=ofile)
lineno[0] += 1
arg.pprint(ofile, lineno, indent + 2, memo)
def __str__(self):
sio = StringIO()
self.pprint(sio)
return sio.getvalue()[:-1] # remove trailing '\n'
def __add__(self, other):
return scope.add(self, other)
def __radd__(self, other):
return scope.add(other, self)
def __sub__(self, other):
return scope.sub(self, other)
def __rsub__(self, other):
return scope.sub(other, self)
def __neg__(self):
return scope.neg(self)
def __mul__(self, other):
return scope.mul(self, other)
def __rmul__(self, other):
return scope.mul(other, self)
def __div__(self, other):
return scope.div(self, other)
def __rdiv__(self, other):
return scope.div(other, self)
def __truediv__(self, other):
return scope.truediv(self, other)
def __rtruediv__(self, other):
return scope.truediv(other, self)
def __floordiv__(self, other):
return scope.floordiv(self, other)
def __rfloordiv__(self, other):
return scope.floordiv(other, self)
def __pow__(self, other):
return scope.pow(self, other)
def __rpow__(self, other):
return scope.pow(other, self)
def __gt__(self, other):
return scope.gt(self, other)
def __ge__(self, other):
return scope.ge(self, other)
def __lt__(self, other):
return scope.lt(self, other)
def __le__(self, other):
return scope.le(self, other)
def __getitem__(self, idx):
if self.o_len is not None and isinstance(idx, int):
if idx >= self.o_len:
# -- this IndexError is essential for supporting
# tuple-unpacking syntax or list coercion of self.
raise IndexError()
return scope.getitem(self, idx)
def __len__(self):
if self.o_len is None:
raise TypeError("len of pyll.Apply either undefined or unknown")
return self.o_len
def __call__(self, *args, **kwargs):
return scope.call(self, args, kwargs)
def apply(name, *args, **kwargs):
pos_args = [as_apply(a) for a in args]
named_args = [(k, as_apply(v)) for (k, v) in list(kwargs.items())]
named_args.sort()
return Apply(name, pos_args=pos_args, named_args=named_args, o_len=None)
class Literal(Apply):
def __init__(self, obj=None):
try:
o_len = len(obj)
except (AttributeError, TypeError):
# Note: AttributeError is raised on sklearn's
# RandomForestClassifier when used before fit
o_len = None
Apply.__init__(self, "literal", [], {}, o_len, pure=True)
self._obj = obj
def eval(self, memo=None):
if memo is None:
memo = {}
return memo.setdefault(id(self), self._obj)
@property
def obj(self):
return self._obj
@property
def arg(self):
return {}
def pprint(self, ofile, lineno=None, indent=0, memo=None):
if lineno is None:
lineno = [0]
if memo is None:
memo = {}
if self in memo:
print(lineno[0], " " * indent + memo[self], file=ofile)
else:
# TODO: set up a registry for this
if isinstance(self._obj, np.ndarray):
msg = "Literal{{np.ndarray,shape={},min={:f},max={:f}}}".format(
self._obj.shape,
self._obj.min(),
self._obj.max(),
)
else:
msg = "Literal{%s}" % str(self._obj)
memo[self] = "%s [line:%i]" % (msg, lineno[0])
print(lineno[0], " " * indent + msg, file=ofile)
lineno[0] += 1
def replace_input(self, old_node, new_node):
return []
def clone_from_inputs(self, inputs, o_len="same"):
return self.__class__(self._obj)
class Lambda:
# XXX: Extend Lambda objects to have a list of exception clauses.
# If the code of the expr() throws an error, these clauses convert
# that error to a return value.
def __init__(self, name, params, expr):
self.__name__ = name # like a python function
self.params = params # list of (name, symbol[, default_value]) tuples
self.expr = expr # pyll graph defining this Lambda
def __call__(self, *args, **kwargs):
# -- return `expr` cloned from given args and kwargs
if len(args) > len(self.params):
raise TypeError("too many arguments")
memo = {}
for arg, param in zip(args, self.params):
# print('applying with arg', param, arg)
memo[param[1]] = as_apply(arg)
if len(args) != len(self.params) or kwargs:
raise NotImplementedError("named / default arguments", (args, self.params))
rval = clone(self.expr, memo)
return rval
class UndefinedValue:
pass
# -- set up some convenience symbols to use as parameters in Lambda definitions
p0 = Literal(UndefinedValue)
p1 = Literal(UndefinedValue)
p2 = Literal(UndefinedValue)
p3 = Literal(UndefinedValue)
p4 = Literal(UndefinedValue)
@scope.define
def call(fn, args=(), kwargs={}):
"""call fn with given args and kwargs.
This is used to represent Apply.__call__
"""
return fn(*args, **kwargs)
@scope.define
def callpipe1(fn_list, arg):
"""
fn_list: a list lambdas that return either pyll expressions or python
values
arg: the argument to the first function in the list
return: `fn_list[-1]( ... (fn_list[1](fn_list[0](arg))))`
"""
# XXX: in current implementation, if fs are `partial`, then
# this loop will expand all functions f at once, so that they
# will all be evaluated in the same scope/memo by rec_eval.
# Normally programming languages would evaluate each f in a private
# scope
for f in fn_list:
arg = f(arg)
return arg
@scope.define
def partial(name, *args, **kwargs):
# TODO: introspect the named instruction, to retrieve the
# list of parameters *not* accounted for by args and kwargs
# then delete these stupid functions and just have one `partial`
try:
name = name.apply_name # to retrieve name from scope.foo methods
except AttributeError:
pass
my_id = len(scope._impls)
# -- create a function with this name
# the name is the string used index into scope._impls
temp_name = "partial_%s_id%i" % (name, my_id)
l = Lambda(temp_name, [("x", p0)], expr=apply(name, *(args + (p0,)), **kwargs))
scope.define(l)
# assert that the next partial will get a different id
# XXX; THIS ASSUMES THAT SCOPE ONLY GROWS
assert my_id < len(scope._impls)
rval = getattr(scope, temp_name)
return rval
def dfs(aa, seq=None, seqset=None):
if seq is None:
assert seqset is None
seq = []
seqset = {}
# -- seqset is the set of all nodes we have seen (which may be still on
# the stack)
# N.B. it used to be a stack, but now it's a dict mapping to inputs
# because that's an optimization saving us from having to call inputs
# so often.
if aa in seqset:
return
assert isinstance(aa, Apply)
seqset[aa] = aa.inputs()
for ii in seqset[aa]:
dfs(ii, seq, seqset)
seq.append(aa)
return seq
def toposort(expr):
"""
Return apply nodes of `expr` sub-tree as a list in topological order.
Raises networkx.NetworkXUnfeasible if subtree contains cycle.
"""
G = nx.DiGraph()
for node in dfs(expr):
G.add_edges_from([(n_in, node) for n_in in node.inputs()])
order = list(nx.topological_sort(G))
assert order[-1] == expr
return order
def clone(expr, memo=None):
if memo is None:
memo = {}
nodes = dfs(expr)
for node in nodes:
if node not in memo:
new_inputs = [memo[arg] for arg in node.inputs()]
new_node = node.clone_from_inputs(new_inputs)
memo[node] = new_node
return memo[expr]
def clone_merge(expr, memo=None, merge_literals=False):
nodes = dfs(expr)
if memo is None:
memo = {}
# -- args are somewhat slow to construct, so cache them out front
# XXX node.arg does not always work (builtins, weird co_flags)
node_args = [(node.pos_args, node.named_args) for node in nodes]
try:
del node
except:
pass
for ii, node_ii in enumerate(nodes):
if node_ii in memo:
continue
new_ii = None
if node_ii.pure:
for jj in range(ii):
node_jj = nodes[jj]
if node_ii.name != node_jj.name:
continue
if node_ii.name == "literal":
if not merge_literals:
continue
if node_ii._obj != node_jj._obj:
continue
else:
if node_args[ii] != node_args[jj]:
continue
logger.debug("clone_merge %s %i <- %i" % (node_ii.name, jj, ii))
new_ii = node_jj
break
if new_ii is None:
new_inputs = [memo[arg] for arg in node_ii.inputs()]
new_ii = node_ii.clone_from_inputs(new_inputs)
memo[node_ii] = new_ii
return memo[expr]
##############################################################################
##############################################################################
class GarbageCollected:
"""Placeholder representing a garbage-collected value"""
def rec_eval(
expr,
deepcopy_inputs=False,
memo=None,
max_program_len=None,
memo_gc=True,
print_trace=False,
print_node_on_error=True,
):
"""
expr - pyll Apply instance to be evaluated
memo - optional dictionary of values to use for particular nodes
deepcopy_inputs - deepcopy inputs to every node prior to calling that
node's function on those inputs. If this leads to a different return
value, then some function (XXX add more complete DebugMode
functionality) in your graph is modifying its inputs and causing
mis-calculation. XXX: This is not a fully-functional DebugMode because
if the offender happens on account of the toposort order to be the last
user of said input, then it will not be detected as a potential
problem.
"""
if max_program_len == None:
max_program_len = DEFAULT_MAX_PROGRAM_LEN
if deepcopy_inputs not in (0, 1, False, True):
# -- I've been calling rec_eval(expr, memo) by accident a few times
# this error would have been appreciated.
raise ValueError("deepcopy_inputs should be bool", deepcopy_inputs)
node = as_apply(expr)
topnode = node
if memo is None:
memo = {}
else:
memo = dict(memo)
# -- hack for speed
# since the inputs are constant during rec_eval
# but not constant in general
node_inputs = {}
node_list = []
dfs(node, node_list, seqset=node_inputs)
# TODO: optimize dfs to not recurse past the items in memo
# this is especially important for evaluating Lambdas
# which cause rec_eval to recurse
#
# N.B. that Lambdas may expand the graph during the evaluation
# so that this iteration may be an incomplete
if memo_gc:
clients = {}
for aa in node_list:
clients.setdefault(aa, set())
for ii in node_inputs[aa]:
clients.setdefault(ii, set()).add(aa)
def set_memo(k, v):
assert v is not GarbageCollected
memo[k] = v
for ii in node_inputs[k]:
# -- if all clients of ii are already in the memo
# then we can free memo[ii] by replacing it
# with a dummy symbol
if all(iic in memo for iic in clients[ii]):
memo[ii] = GarbageCollected
else:
def set_memo(k, v):
memo[k] = v
todo = deque([topnode])
while todo:
if len(todo) > max_program_len:
raise RuntimeError("Probably infinite loop in document")
node = todo.pop()
if print_trace:
print("rec_eval:print_trace", len(todo), node.name)
if node in memo:
# -- we've already computed this, move on.
continue
# -- different kinds of nodes are treated differently:
if node.name == "switch":
# -- switch is the conditional evaluation node
switch_i_var = node.pos_args[0]
if switch_i_var in memo:
switch_i = memo[switch_i_var]
try:
int(switch_i)
except:
raise TypeError("switch argument was", switch_i)
if switch_i != int(switch_i) or switch_i < 0:
raise ValueError("switch pos must be positive int", switch_i)
rval_var = node.pos_args[int(switch_i) + 1]
if rval_var in memo:
set_memo(node, memo[rval_var])
continue
else:
waiting_on = [rval_var]
else:
waiting_on = [switch_i_var]
elif isinstance(node, Literal):
# -- constants go straight into the memo
set_memo(node, node.obj)
continue
else:
# -- normal instruction-type nodes have inputs
waiting_on = [v for v in node_inputs[node] if v not in memo]
if waiting_on:
# -- Necessary inputs have yet to be evaluated.
# push the node back in the queue, along with the
# inputs it still needs
todo.append(node)
todo.extend(waiting_on)
else:
# -- not waiting on anything;
# this instruction can be evaluated.
args = _args = [memo[v] for v in node.pos_args]
kwargs = _kwargs = {k: memo[v] for (k, v) in node.named_args}
if memo_gc:
for aa in args + list(kwargs.values()):
assert aa is not GarbageCollected
if deepcopy_inputs:
args = copy.deepcopy(_args)
kwargs = copy.deepcopy(_kwargs)
try:
rval = scope._impls[node.name](*args, **kwargs)
except Exception as e:
if print_node_on_error:
print("=" * 80)
print("ERROR in rec_eval")
print("EXCEPTION", type(e), str(e))
print("NODE")
print(node) # -- typically a multi-line string
print("=" * 80)
raise
if isinstance(rval, Apply):
# -- if an instruction returns a Pyll apply node
# it means evaluate that too. Lambdas do this.
#
# XXX: consider if it is desirable, efficient, buggy
# etc. to keep using the same memo dictionary
foo = rec_eval(rval, deepcopy_inputs, memo, memo_gc=memo_gc)
set_memo(node, foo)
else:
set_memo(node, rval)
return memo[topnode]
############################################################################
############################################################################
@scope.define_pure
def pos_args(*args):
return args
@scope.define_pure
def identity(obj):
return obj
# -- We used to define these as Python functions in this file, but the operator
# module already provides them, is slightly more efficient about it. Since
# searchspaces uses the same convention, we can more easily map graphs back
# and forth and reduce the amount of code in both codebases.
scope.define_pure(operator.getitem)
scope.define_pure(operator.add)
scope.define_pure(operator.sub)
scope.define_pure(operator.mul)
try:
scope.define_pure(operator.div)
except AttributeError:
pass # No more operator.div in Python3, but truediv also exists since Python2.2
scope.define_pure(operator.truediv)
scope.define_pure(operator.floordiv)
scope.define_pure(operator.neg)
scope.define_pure(operator.eq)
scope.define_pure(operator.lt)
scope.define_pure(operator.le)
scope.define_pure(operator.gt)
scope.define_pure(operator.ge)
@scope.define_pure
def exp(a):
return np.exp(a)
@scope.define_pure
def log(a):
return np.log(a)
@scope.define_pure
def pow(a, b):
return a**b
@scope.define_pure
def sin(a):
return np.sin(a)
@scope.define_pure
def cos(a):
return np.cos(a)
@scope.define_pure
def tan(a):
return np.tan(a)
@scope.define_pure
def sum(x, axis=None):
if axis is None:
return np.sum(x)
else:
return np.sum(x, axis=axis)
@scope.define_pure
def sqrt(x):
return np.sqrt(x)
@scope.define_pure
def minimum(x, y):
return np.minimum(x, y)
@scope.define_pure
def maximum(x, y):
return np.maximum(x, y)
@scope.define_pure
def array_union1(args):
s = set()
for a in args:
s.update(a)
return np.asarray(sorted(s))
@scope.define_pure
def array_union(*args):
return array_union1(args)
@scope.define_pure
def asarray(a, dtype=None):
if dtype is None:
return np.asarray(a)
else:
return np.asarray(a, dtype=dtype)
@scope.define_pure
def str_join(s, seq):
return s.join(seq)
@scope.define_pure
def bincount(x, offset=0, weights=None, minlength=None, p=None):
y = np.asarray(x, dtype="int")
# hack for pchoice, p is passed as [ np.repeat(p, obs.size) ],
# so scope.len(p) gives incorrect #dimensions, need to get just the first one
if p is not None and p.ndim == 2:
assert np.all(p == p[0])
minlength = len(p[0])
return np.bincount(y - offset, weights, minlength)
@scope.define_pure
def repeat(n_times, obj):
return [obj] * n_times
@scope.define
def call_method(obj, methodname, *args, **kwargs):
method = getattr(obj, methodname)
return method(*args, **kwargs)
@scope.define_pure
def call_method_pure(obj, methodname, *args, **kwargs):
method = getattr(obj, methodname)
return method(*args, **kwargs)
@scope.define_pure
def copy_call_method_pure(obj, methodname, *args, **kwargs):
# -- this method copies object before calling the method
# so that in the case where args and kwargs are not modified
# the call_method can be done in a no-side-effect way.
#
# It is a mistake to use this method when args or kwargs are modified
# by the call to method.
method = getattr(copy.copy(obj), methodname)
return method(*args, **kwargs)
@scope.define_pure
def switch(pos, *args):
# switch is an unusual expression, in that it affects control flow
# when executed with rec_eval. args are not all evaluated, only
# args[pos] is evaluated.
# raise RuntimeError('switch is not meant to be evaluated')
#
# .. However, in quick-evaluation schemes it is handy that this be defined
# as follows:
return args[pos]
def _kwswitch(kw, **kwargs):
"""conditional evaluation according to string value"""
# Get the index of the string in kwargs to use switch
keys, values = list(zip(*sorted(kwargs.items())))
match_idx = scope.call_method_pure(keys, "index", kw)
return scope.switch(match_idx, *values)
scope.kwswitch = _kwswitch
@scope.define_pure
def Raise(etype, *args, **kwargs):
raise etype(*args, **kwargs)
@scope.define_info(o_len=2)
def curtime(obj):
return time.time(), obj
@scope.define
def pdb_settrace(obj):
import pdb
pdb.set_trace()
return obj
| 35,322 | 30.482175 | 87 |
py
|
hyperopt
|
hyperopt-master/hyperopt/pyll/stochastic.py
|
"""
Constructs for annotating base graphs.
"""
from past.utils import old_div
import sys
import numpy as np
from .base import scope, as_apply, dfs, rec_eval, clone
################################################################################
################################################################################
def ERR(msg):
print(msg, file=sys.stderr)
implicit_stochastic_symbols = set()
def implicit_stochastic(f):
implicit_stochastic_symbols.add(f.__name__)
return f
@scope.define
def rng_from_seed(seed):
return np.random.default_rng(seed)
# -- UNIFORM
@implicit_stochastic
@scope.define
def uniform(low, high, rng=None, size=()):
return rng.uniform(low, high, size=size)
@implicit_stochastic
@scope.define
def loguniform(low, high, rng=None, size=()):
draw = rng.uniform(low, high, size=size)
return np.exp(draw)
@implicit_stochastic
@scope.define
def quniform(low, high, q, rng=None, size=()):
draw = rng.uniform(low, high, size=size)
return np.round(old_div(draw, q)) * q
@implicit_stochastic
@scope.define
def qloguniform(low, high, q, rng=None, size=()):
draw = np.exp(rng.uniform(low, high, size=size))
return np.round(old_div(draw, q)) * q
# -- NORMAL
@implicit_stochastic
@scope.define
def normal(mu, sigma, rng=None, size=()):
return rng.normal(mu, sigma, size=size)
@implicit_stochastic
@scope.define
def qnormal(mu, sigma, q, rng=None, size=()):
draw = rng.normal(mu, sigma, size=size)
return np.round(old_div(draw, q)) * q
@implicit_stochastic
@scope.define
def lognormal(mu, sigma, rng=None, size=()):
draw = rng.normal(mu, sigma, size=size)
return np.exp(draw)
@implicit_stochastic
@scope.define
def qlognormal(mu, sigma, q, rng=None, size=()):
draw = np.exp(rng.normal(mu, sigma, size=size))
return np.round(old_div(draw, q)) * q
# -- CATEGORICAL
@implicit_stochastic
@scope.define
def randint(low, high=None, rng=None, size=()):
"""
See np.random.randint documentation.
rng = random number generator, typically equals np.random.Generator
"""
return rng.integers(low, high, size)
@implicit_stochastic
@scope.define
def randint_via_categorical(p, rng=None, size=()):
"""
Only used in tpe because of the chaotic API based on names.
# ideally we would just use randint above, but to use priors this is a wrapper of
categorical
rng = random number generator, typically equals np.random.Generator
"""
return scope.categorical(p, rng, size)
@implicit_stochastic
@scope.define
def categorical(p, rng=None, size=()):
"""Draws i with probability p[i]"""
if len(p) == 1 and isinstance(p[0], np.ndarray):
p = p[0]
p = np.asarray(p)
if size == ():
size = (1,)
elif isinstance(size, (int, np.number)):
size = (size,)
else:
size = tuple(size)
if size == (0,):
return np.asarray([])
assert len(size)
if p.ndim == 0:
raise NotImplementedError()
elif p.ndim == 1:
n_draws = int(np.prod(size))
sample = rng.multinomial(n=1, pvals=p, size=int(n_draws))
assert sample.shape == size + (len(p),)
rval = np.dot(sample, np.arange(len(p)))
rval.shape = size
return rval
elif p.ndim == 2:
n_draws_, n_choices = p.shape
(n_draws,) = size
assert n_draws == n_draws_
rval = [
np.where(rng.multinomial(pvals=p[ii], n=1))[0][0] for ii in range(n_draws)
]
rval = np.asarray(rval)
rval.shape = size
return rval
else:
raise NotImplementedError()
def choice(args):
return scope.one_of(*args)
scope.choice = choice
def one_of(*args):
ii = scope.randint(len(args))
return scope.switch(ii, *args)
scope.one_of = one_of
def recursive_set_rng_kwarg(expr, rng=None):
"""
Make all of the stochastic nodes in expr use the rng
uniform(0, 1) -> uniform(0, 1, rng=rng)
"""
if rng is None:
rng = np.random.default_rng()
lrng = as_apply(rng)
for node in dfs(expr):
if node.name in implicit_stochastic_symbols:
for ii, (name, arg) in enumerate(list(node.named_args)):
if name == "rng":
node.named_args[ii] = ("rng", lrng)
break
else:
node.named_args.append(("rng", lrng))
return expr
def sample(expr, rng=None, **kwargs):
"""
Parameters:
expr - a pyll expression to be evaluated
rng - a np.random.Generator instance
default: `np.random.default_rng()`
**kwargs - optional arguments passed along to
`hyperopt.pyll.rec_eval`
"""
if rng is None:
rng = np.random.default_rng()
foo = recursive_set_rng_kwarg(clone(as_apply(expr)), as_apply(rng))
return rec_eval(foo, **kwargs)
| 4,886 | 22.382775 | 86 |
py
|
hyperopt
|
hyperopt-master/hyperopt/pyll/__init__.py
|
from .base import Apply
from .base import Literal
from .base import as_apply
from .base import scope
from .base import rec_eval
from .base import clone
from .base import clone_merge
from .base import dfs
from .base import toposort
# -- adds symbols to scope
from . import stochastic
| 284 | 20.923077 | 29 |
py
|
hyperopt
|
hyperopt-master/hyperopt/pyll/tests/test_stochastic.py
|
from past.utils import old_div
import numpy as np
from hyperopt.pyll import scope, as_apply, rec_eval
from hyperopt.pyll.stochastic import recursive_set_rng_kwarg, sample
def test_recursive_set_rng_kwarg():
uniform = scope.uniform
a = as_apply([uniform(0, 1), uniform(2, 3)])
rng = np.random.default_rng(234)
recursive_set_rng_kwarg(a, rng)
print(a)
val_a = rec_eval(a)
assert 0 < val_a[0] < 1
assert 2 < val_a[1] < 3
def test_lnorm():
G = scope
choice = G.choice
uniform = G.uniform
quantized_uniform = G.quniform
inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3
# -- test that it runs
lnorm = as_apply(
{
"kwargs": {
"inker_shape": (inker_size, inker_size),
"outker_shape": (inker_size, inker_size),
"remove_mean": choice([0, 1]),
"stretch": uniform(low=0, high=10),
"threshold": uniform(
low=old_div(0.1, np.sqrt(10.0)), high=10 * np.sqrt(10)
),
}
}
)
print(lnorm)
print(("len", len(str(lnorm))))
# not sure what to assert
# ... this is too fragile
# assert len(str(lnorm)) == 980
def test_sample_deterministic():
aa = as_apply([0, 1])
print(aa)
dd = sample(aa, np.random.default_rng(3))
assert dd == (0, 1)
def test_repeatable():
u = scope.uniform(0, 1)
aa = as_apply(dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u]))
dd1 = sample(aa, np.random.default_rng(3))
dd2 = sample(aa, np.random.default_rng(3))
dd3 = sample(aa, np.random.default_rng(4))
assert dd1 == dd2
assert dd1 != dd3
def test_sample():
u = scope.uniform(0, 1)
aa = as_apply(dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u]))
print(aa)
dd = sample(aa, np.random.default_rng(3))
assert 0 < dd["u"] < 1
assert 4 < dd["n"] < 6
assert dd["u"] == dd["l"][3]
assert dd["l"][:2] == (0, 1)
assert dd["l"][2] in (2, 3)
| 2,054 | 27.150685 | 85 |
py
|
hyperopt
|
hyperopt-master/hyperopt/pyll/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperopt
|
hyperopt-master/hyperopt/pyll/tests/test_base.py
|
from hyperopt.pyll import base
from hyperopt.pyll.base import (
Literal,
as_apply,
Apply,
dfs,
scope,
rec_eval,
p0,
Lambda,
clone_merge,
)
from nose import SkipTest
from nose.tools import assert_raises
import numpy as np
def test_literal_pprint():
l = Literal(5)
print(str(l))
assert str(l) == "0 Literal{5}"
def test_literal_apply():
l0 = Literal([1, 2, 3])
print(str(l0))
assert str(l0) == "0 Literal{[1, 2, 3]}"
def test_literal_unpacking():
l0 = Literal([1, 2, 3])
a, b, c = l0
print(a)
assert c.name == "getitem"
assert c.pos_args[0] is l0
assert isinstance(c.pos_args[1], Literal)
assert c.pos_args[1]._obj == 2
def test_as_apply_passthrough():
a4 = as_apply(4)
assert a4 is as_apply(a4)
def test_as_apply_literal():
assert isinstance(as_apply(7), Literal)
def test_as_apply_list_of_literals():
l = [9, 3]
al = as_apply(l)
assert isinstance(al, Apply)
assert al.name == "pos_args"
assert isinstance(al.pos_args[0], Literal)
assert isinstance(al.pos_args[1], Literal)
al.pos_args[0]._obj == 9
al.pos_args[1]._obj == 3
def test_as_apply_tuple_of_literals():
l = (9, 3)
al = as_apply(l)
assert isinstance(al, Apply)
assert al.name == "pos_args"
assert isinstance(al.pos_args[0], Literal)
assert isinstance(al.pos_args[1], Literal)
al.pos_args[0]._obj == 9
al.pos_args[1]._obj == 3
assert len(al) == 2
def test_as_apply_list_of_applies():
alist = [as_apply(i) for i in range(5)]
al = as_apply(alist)
assert isinstance(al, Apply)
assert al.name == "pos_args"
# -- have to come back to this if Literal copies args
assert al.pos_args == alist
def test_as_apply_dict_of_literals():
d = {"a": 9, "b": 10}
ad = as_apply(d)
assert isinstance(ad, Apply)
assert ad.name == "dict"
assert len(ad) == 2
assert ad.named_args[0][0] == "a"
assert ad.named_args[0][1]._obj == 9
assert ad.named_args[1][0] == "b"
assert ad.named_args[1][1]._obj == 10
def test_as_apply_dict_of_applies():
d = {"a": as_apply(9), "b": as_apply(10)}
ad = as_apply(d)
assert isinstance(ad, Apply)
assert ad.name == "dict"
assert len(ad) == 2
assert ad.named_args[0][0] == "a"
assert ad.named_args[0][1]._obj == 9
assert ad.named_args[1][0] == "b"
assert ad.named_args[1][1]._obj == 10
def test_as_apply_nested_dict():
d = {"a": 9, "b": {"c": 11, "d": 12}}
ad = as_apply(d)
assert isinstance(ad, Apply)
assert ad.name == "dict"
assert len(ad) == 2
assert ad.named_args[0][0] == "a"
assert ad.named_args[0][1]._obj == 9
assert ad.named_args[1][0] == "b"
assert ad.named_args[1][1].name == "dict"
assert ad.named_args[1][1].named_args[0][0] == "c"
assert ad.named_args[1][1].named_args[0][1]._obj == 11
assert ad.named_args[1][1].named_args[1][0] == "d"
assert ad.named_args[1][1].named_args[1][1]._obj == 12
def test_dfs():
dd = as_apply({"c": 11, "d": 12})
d = {"a": 9, "b": dd, "y": dd, "z": dd + 1}
ad = as_apply(d)
order = dfs(ad)
print([str(o) for o in order])
assert order[0]._obj == 9
assert order[1]._obj == 11
assert order[2]._obj == 12
assert order[3].named_args[0][0] == "c"
assert order[4]._obj == 1
assert order[5].name == "add"
assert order[6].named_args[0][0] == "a"
assert len(order) == 7
@scope.define_info(o_len=2)
def _test_foo():
return 1, 2
def test_o_len():
obj = scope._test_foo()
x, y = obj
assert x.name == "getitem"
assert x.pos_args[1]._obj == 0
assert y.pos_args[1]._obj == 1
def test_eval_arithmetic():
a, b, c = as_apply((2, 3, 4))
assert (a + b).eval() == 5
assert (a + b + c).eval() == 9
assert (a + b + 1 + c).eval() == 10
assert (a * b).eval() == 6
assert (a * b * c * (-1)).eval() == -24
assert (a - b).eval() == -1
assert (a - b * c).eval() == -10
assert (a // b).eval() == 0 # int div
assert (b // a).eval() == 1 # int div
assert (c / a).eval() == 2
assert (4 / a).eval() == 2
assert (a / 4.0).eval() == 0.5
def test_bincount():
def test_f(f):
r = np.arange(10)
counts = f(r)
assert isinstance(counts, np.ndarray)
assert len(counts) == 10
assert np.all(counts == 1)
r = np.arange(10) + 3
counts = f(r)
assert isinstance(counts, np.ndarray)
assert len(counts) == 13
assert np.all(counts[3:] == 1)
assert np.all(counts[:3] == 0)
r = np.arange(10) + 3
counts = f(r, minlength=5) # -- ignore minlength
assert isinstance(counts, np.ndarray)
assert len(counts) == 13
assert np.all(counts[3:] == 1)
assert np.all(counts[:3] == 0)
r = np.arange(10) + 3
counts = f(r, minlength=15) # -- pad to minlength
assert isinstance(counts, np.ndarray)
assert len(counts) == 15
assert np.all(counts[:3] == 0)
assert np.all(counts[3:13] == 1)
assert np.all(counts[13:] == 0)
r = np.arange(10) % 3 + 3
counts = f(r, minlength=7) # -- pad to minlength
assert list(counts) == [0, 0, 0, 4, 3, 3, 0]
try:
test_f(base.bincount)
except TypeError as e:
if "function takes at most 2 arguments" in str(e):
raise SkipTest()
raise
def test_switch_and_Raise():
i = Literal()
ab = scope.switch(i, "a", "b", scope.Raise(Exception))
assert rec_eval(ab, memo={i: 0}) == "a"
assert rec_eval(ab, memo={i: 1}) == "b"
assert_raises(Exception, rec_eval, ab, memo={i: 2})
def test_kwswitch():
i = Literal()
ab = scope.kwswitch(i, k1="a", k2="b", err=scope.Raise(Exception))
assert rec_eval(ab, memo={i: "k1"}) == "a"
assert rec_eval(ab, memo={i: "k2"}) == "b"
assert_raises(Exception, rec_eval, ab, memo={i: "err"})
def test_recursion():
scope.define(
Lambda(
"Fact",
[("x", p0)],
expr=scope.switch(p0 > 1, 1, p0 * base.apply("Fact", p0 - 1)),
)
)
print(scope.Fact(3))
assert rec_eval(scope.Fact(3)) == 6
def test_partial():
add2 = scope.partial("add", 2)
print(add2)
assert len(str(add2).split("\n")) == 3
# add2 evaluates to a scope method
thing = rec_eval(add2)
print(thing)
assert "SymbolTableEntry" in str(thing)
# add2() evaluates to a failure because it's only a partial application
assert_raises(NotImplementedError, rec_eval, add2())
# add2(3) evaluates to 5 because we've filled in all the blanks
thing = rec_eval(add2(3))
print(thing)
assert thing == 5
def test_callpipe():
# -- set up some 1-variable functions
a2 = scope.partial("add", 2)
a3 = scope.partial("add", 3)
def s9(a):
return scope.sub(a, 9)
# x + 2 + 3 - 9 == x - 4
r = scope.callpipe1([a2, a3, s9], 5)
thing = rec_eval(r)
assert thing == 1
def test_clone_merge():
a, b, c = as_apply((2, 3, 2))
d = (a + b) * (c + b)
len_d = len(dfs(d))
e = clone_merge(d, merge_literals=True)
assert len_d == len(dfs(d))
assert len_d > len(dfs(e))
assert e.eval() == d.eval()
def test_clone_merge_no_merge_literals():
a, b, c = as_apply((2, 3, 2))
d = (a + b) * (c + b)
len_d = len(dfs(d))
e = clone_merge(d, merge_literals=False)
assert len_d == len(dfs(d))
assert len_d == len(dfs(e))
assert e.eval() == d.eval()
def test_len():
assert_raises(TypeError, len, scope.uniform(0, 1))
| 7,634 | 24.535117 | 75 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperopt
|
hyperopt-master/hyperopt/tests/test_base.py
|
import copy
import unittest
import numpy as np
import bson
from hyperopt.pyll import scope
from hyperopt.base import JOB_STATE_DONE, JOB_STATE_NEW
from hyperopt.base import TRIAL_KEYS
from hyperopt.base import TRIAL_MISC_KEYS
from hyperopt.base import InvalidTrial
from hyperopt.base import miscs_to_idxs_vals
from hyperopt.base import SONify
from hyperopt.base import STATUS_OK
from hyperopt.base import Trials
from hyperopt.base import trials_from_docs
from hyperopt.exceptions import AllTrialsFailed
uniform = scope.uniform
normal = scope.normal
one_of = scope.one_of
def ok_trial(tid, *args, **kwargs):
return dict(
tid=tid,
result={"status": "algo, ok"},
spec={"a": 1, "foo": (args, kwargs)},
misc={
"tid": tid,
"cmd": ("some cmd",),
"idxs": {"z": [tid]},
"vals": {"z": [1]},
},
extra="extra", # -- more stuff here is ok
owner=None,
state=JOB_STATE_NEW,
version=0,
book_time=None,
refresh_time=None,
exp_key=None,
)
def create_fake_trial(tid, loss=None, status=STATUS_OK, state=JOB_STATE_DONE):
return dict(
tid=tid,
result={"status": status, "loss": loss}
if loss is not None
else {"status": status},
spec={"a": 1},
misc={
"tid": tid,
"cmd": ("some cmd",),
"idxs": {"z": [tid]},
"vals": {"z": [1]},
},
extra="extra", # -- more stuff here is ok
owner=None,
state=state,
version=0,
book_time=None,
refresh_time=None,
exp_key=None,
)
class Suggest_API:
"""
Run some generic sanity-checks of a suggest algorithm to make sure that
it respects the semantics expected by e.g. fmin.
Use it like this:
TestRand = Suggest_API.make_test_class(rand.suggest, 'TestRand')
"""
@classmethod
def make_tst_class(cls, suggest, domain, name):
class Tester(unittest.TestCase, cls):
def suggest(self, *args, **kwargs):
print(args, kwargs)
return suggest(*args, **kwargs)
def setUp(self):
self.domain = domain
Tester.__name__ = name
return Tester
seed_randomizes = True
def idxs_vals_from_ids(self, ids, seed):
docs = self.suggest(ids, self.domain, Trials(), seed)
trials = trials_from_docs(docs)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
return idxs, vals
def test_arbitrary_ids(self):
# -- suggest implementations should work for arbitrary ID
# values (possibly assuming they are hashable), and the
# ID values should have no effect on the return values.
ids_1 = [-2, 0, 7, "a", "007", 66, "a3", "899", 23, 2333]
ids_2 = ["a", "b", "c", "d", 1, 2, 3, 0.1, 0.2, 0.3]
idxs_1, vals_1 = self.idxs_vals_from_ids(ids=ids_1, seed=45)
idxs_2, vals_2 = self.idxs_vals_from_ids(ids=ids_2, seed=45)
all_ids_1 = set()
for var, ids in list(idxs_1.items()):
all_ids_1.update(ids)
all_ids_2 = set()
for var, ids in list(idxs_2.items()):
all_ids_2.update(ids)
self.assertEqual(all_ids_1, set(ids_1))
self.assertEqual(all_ids_2, set(ids_2))
self.assertEqual(vals_1, vals_2)
def test_seed_randomizes(self):
#
# suggest() algorithms can be either stochastic (e.g. random search)
# or deterministic (e.g. grid search). If an suggest implementation
# is stochastic, then changing the seed argument should change the
# return value.
#
if not self.seed_randomizes:
return
# -- sample 20 points to make sure we get some differences even
# for small search spaces (chance of false failure is 1/million).
idxs_1, vals_1 = self.idxs_vals_from_ids(ids=list(range(20)), seed=45)
idxs_2, vals_2 = self.idxs_vals_from_ids(ids=list(range(20)), seed=46)
self.assertNotEqual((idxs_1, vals_1), (idxs_2, vals_2))
class TestTrials(unittest.TestCase):
def setUp(self):
self.trials = Trials()
def test_valid(self):
trials = self.trials
f = trials.insert_trial_doc
fine = ok_trial("ID", 1, 2, 3)
# --original runs fine
f(fine)
# -- take out each mandatory root key
def knockout(key):
rval = copy.deepcopy(fine)
del rval[key]
return rval
for key in TRIAL_KEYS:
self.assertRaises(InvalidTrial, f, knockout(key))
# -- take out each mandatory misc key
def knockout2(key):
rval = copy.deepcopy(fine)
del rval["misc"][key]
return rval
for key in TRIAL_MISC_KEYS:
self.assertRaises(InvalidTrial, f, knockout2(key))
def test_insert_sync(self):
trials = self.trials
assert len(trials) == 0
trials.insert_trial_doc(ok_trial("a", 8))
assert len(trials) == 0
trials.insert_trial_doc(ok_trial(5, a=1, b=3))
assert len(trials) == 0
trials.insert_trial_docs([ok_trial(tid=4, a=2, b=3), ok_trial(tid=9, a=4, b=3)])
assert len(trials) == 0
trials.refresh()
assert len(trials) == 4, len(trials)
assert len(trials) == len(trials.specs)
assert len(trials) == len(trials.results)
assert len(trials) == len(trials.miscs)
trials.insert_trial_docs(
trials.new_trial_docs(
["id0", "id1"],
[dict(a=1), dict(a=2)],
[dict(status="new"), dict(status="new")],
[
dict(tid="id0", idxs={}, vals={}, cmd=None),
dict(tid="id1", idxs={}, vals={}, cmd=None),
],
)
)
assert len(trials) == 4
assert len(trials) == len(trials.specs)
assert len(trials) == len(trials.results)
assert len(trials) == len(trials.miscs)
trials.refresh()
assert len(trials) == 6
assert len(trials) == len(trials.specs)
assert len(trials) == len(trials.results)
assert len(trials) == len(trials.miscs)
def test_best_trial(self):
trials = self.trials
assert len(trials) == 0
# It should throw a reasonable error when no valid trials exist.
trials.insert_trial_doc(create_fake_trial(0, loss=np.NaN))
trials.refresh()
with self.assertRaises(AllTrialsFailed):
assert trials.best_trial is None
# It should work even with some trials with NaN losses.
trials.insert_trial_doc(create_fake_trial(1, loss=1.0))
trials.insert_trial_doc(create_fake_trial(2, loss=np.NaN))
trials.insert_trial_doc(create_fake_trial(3, loss=0.5))
trials.refresh()
best_trial = trials.best_trial
self.assertEqual(best_trial["tid"], 3)
class TestSONify(unittest.TestCase):
def SONify(self, foo):
rval = SONify(foo)
assert bson.BSON.encode(dict(a=rval))
return rval
def test_int(self):
assert self.SONify(1) == 1
def test_float(self):
assert self.SONify(1.1) == 1.1
def test_np_1d_int(self):
assert np.all(self.SONify(np.asarray([1, 2, 3])) == [1, 2, 3])
def test_np_1d_float(self):
assert np.all(self.SONify(np.asarray([1, 2, 3.4])) == [1, 2, 3.4])
def test_np_1d_str(self):
assert np.all(self.SONify(np.asarray(["a", "b", "ccc"])) == ["a", "b", "ccc"])
def test_np_2d_int(self):
assert np.all(self.SONify(np.asarray([[1, 2], [3, 4]])) == [[1, 2], [3, 4]])
def test_np_2d_float(self):
assert np.all(self.SONify(np.asarray([[1, 2], [3, 4.5]])) == [[1, 2], [3, 4.5]])
def test_nested_w_bool(self):
thing = dict(a=1, b="2", c=True, d=False, e=int(3), f=[1])
assert thing == SONify(thing)
| 8,032 | 30.625984 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_tpe.py
|
from past.utils import old_div
from functools import partial
import os
import unittest
import nose
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
from hyperopt import pyll
from hyperopt.pyll import scope
from hyperopt import Trials
from hyperopt.base import miscs_to_idxs_vals, STATUS_OK
from hyperopt import hp
from hyperopt.tpe import adaptive_parzen_normal_orig
from hyperopt.tpe import GMM1
from hyperopt.tpe import GMM1_lpdf
from hyperopt.tpe import LGMM1
from hyperopt.tpe import LGMM1_lpdf
import hyperopt.rand as rand
import hyperopt.tpe as tpe
import hyperopt.atpe as atpe
from hyperopt import fmin
from .test_domains import domain_constructor, CasePerDomain, NonCategoricalCasePerDomain
DO_SHOW = int(os.getenv("HYPEROPT_SHOW", "0"))
def passthrough(x):
return x
def test_adaptive_parzen_normal_orig():
rng = np.random.default_rng(123)
prior_mu = 7
prior_sigma = 2
mus = rng.standard_normal(10) + 5
weights2, mus2, sigmas2 = adaptive_parzen_normal_orig(
mus, 3.3, prior_mu, prior_sigma
)
print(weights2)
print(mus2)
print(sigmas2)
assert len(weights2) == len(mus2) == len(sigmas2) == 11
assert np.all(weights2[0] > weights2[1:])
assert mus2[0] == 7
assert np.all(mus2[1:] == mus)
assert sigmas2[0] == 2
class TestGMM1(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
def test_mu_is_used_correctly(self):
assert np.allclose(10, GMM1([1], [10.0], [0.0000001], rng=self.rng))
def test_sigma_is_used_correctly(self):
samples = GMM1([1], [0.0], [10.0], size=[1000], rng=self.rng)
assert 9 < np.std(samples) < 11
def test_mus_make_variance(self):
samples = GMM1(
[0.5, 0.5], [0.0, 1.0], [0.000001, 0.000001], rng=self.rng, size=[1000]
)
print(samples.shape)
# import matplotlib.pyplot as plt
# plt.hist(samples)
# plt.show()
assert 0.45 < np.mean(samples) < 0.55, np.mean(samples)
assert 0.2 < np.var(samples) < 0.3, np.var(samples)
def test_weights(self):
samples = GMM1(
[0.9999, 0.0001],
[0.0, 1.0],
[0.000001, 0.000001],
rng=self.rng,
size=[1000],
)
assert samples.shape == (1000,)
# import matplotlib.pyplot as plt
# plt.hist(samples)
# plt.show()
assert -0.001 < np.mean(samples) < 0.001, np.mean(samples)
assert np.var(samples) < 0.0001, np.var(samples)
def test_mat_output(self):
samples = GMM1(
[0.9999, 0.0001],
[0.0, 1.0],
[0.000001, 0.000001],
rng=self.rng,
size=[40, 20],
)
assert samples.shape == (40, 20)
assert -0.001 < np.mean(samples) < 0.001, np.mean(samples)
assert np.var(samples) < 0.0001, np.var(samples)
def test_lpdf_scalar_one_component(self):
# x # weights # mu # sigma
llval = GMM1_lpdf(1.0, [1.0], [1.0], [2.0])
assert llval.shape == ()
assert np.allclose(llval, np.log(old_div(1.0, np.sqrt(2 * np.pi * 2.0**2))))
def test_lpdf_scalar_N_components(self):
llval = GMM1_lpdf(
1.0, # x
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
print(llval)
a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0**2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0**2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
def test_lpdf_vector_N_components(self):
llval = GMM1_lpdf(
[1.0, 0.0], # x
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
# case x = 1.0
a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0**2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0**2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
assert llval.shape == (2,)
assert np.allclose(llval[0], np.log(a))
# case x = 0.0
a = old_div(0.25, np.sqrt(2 * np.pi * 1.0**2))
a += (
0.25
/ np.sqrt(2 * np.pi * 2.0**2)
* np.exp(-0.5 * (old_div(1.0, 2.0)) ** 2)
)
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0**2)
* np.exp(-0.5 * (old_div(2.0, 5.0)) ** 2)
)
assert np.allclose(llval[1], np.log(a))
def test_lpdf_matrix_N_components(self):
llval = GMM1_lpdf(
[[1.0, 0.0, 0.0], [0, 0, 1], [0, 0, 1000]],
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
print(llval)
assert llval.shape == (3, 3)
a = 0.25 / np.sqrt(2 * np.pi * 1.0**2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0**2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0**2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
assert np.allclose(llval[0, 0], np.log(a))
assert np.allclose(llval[1, 2], np.log(a))
# case x = 0.0
a = old_div(0.25, np.sqrt(2 * np.pi * 1.0**2))
a += (
0.25
/ np.sqrt(2 * np.pi * 2.0**2)
* np.exp(-0.5 * (old_div(1.0, 2.0)) ** 2)
)
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0**2)
* np.exp(-0.5 * (old_div(2.0, 5.0)) ** 2)
)
assert np.allclose(llval[0, 1], np.log(a))
assert np.allclose(llval[0, 2], np.log(a))
assert np.allclose(llval[1, 0], np.log(a))
assert np.allclose(llval[1, 1], np.log(a))
assert np.allclose(llval[2, 0], np.log(a))
assert np.allclose(llval[2, 1], np.log(a))
assert np.isfinite(llval[2, 2])
class TestGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.q = None
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 500
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self):
self.worked = True
kwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng, size=(self.n_samples,), **kwargs)
samples = np.sort(samples)
edges = samples[:: self.samples_per_bin]
# print samples
pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(edges[:-1], y)
plt.plot(edges[:-1], pdf)
plt.show()
err = (pdf - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if not self.show:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic(self):
self.work()
def test_bounded(self):
self.low = 2.5
self.high = 3.5
self.work()
class TestQGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW # or put a string
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self, **kwargs):
self.__dict__.update(kwargs)
del kwargs
self.worked = True
gkwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = old_div(GMM1(rng=self.rng, size=(self.n_samples,), **gkwargs), self.q)
print("drew", len(samples), "samples")
assert np.all(samples == samples.astype("int"))
min_max = int(samples.min()), int(samples.max())
counts = np.bincount(samples.astype("int") - min_max[0])
print(counts)
xcoords = np.arange(min_max[0], min_max[1] + 1) * self.q
prob = np.exp(GMM1_lpdf(xcoords, **gkwargs))
assert counts.sum() == self.n_samples
y = old_div(counts, float(self.n_samples))
if self.show:
plt.scatter(xcoords, y, c="r", label="empirical")
plt.scatter(xcoords, prob, c="b", label="predicted")
plt.legend()
plt.title(str(self.show))
plt.show()
err = (prob - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
def test_bounded_3(self):
self.work(
weights=[0.14285714, 0.28571429, 0.28571429, 0.28571429],
mus=[5.505, 7.0, 2.0, 10.0],
sigmas=[8.99, 5.0, 8.0, 8.0],
q=1,
low=1.01,
high=10,
n_samples=10000,
# show='bounded_3',
)
def test_bounded_3b(self):
self.work(
weights=[0.33333333, 0.66666667],
mus=[5.505, 5.0],
sigmas=[8.99, 5.19],
q=1,
low=1.01,
high=10,
n_samples=10000,
# show='bounded_3b',
)
class TestLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [-2.0, 1.0, 0.0, 3.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 200
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def LGMM1_kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
)
def LGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.LGMM1_kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = LGMM1(rng=self.rng, size=(self.n_samples,), **self.LGMM1_kwargs)
samples = np.sort(samples)
edges = samples[:: self.samples_per_bin]
centers = 0.5 * edges[:-1] + 0.5 * edges[1:]
print(edges)
pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(centers, y)
plt.plot(centers, pdf)
plt.show()
err = (pdf - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if not self.show:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic(self):
self.work()
def test_bounded(self):
self.work(low=2, high=4)
class TestQLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [-2, 0.0, -3.0, 1.0]
self.sigmas = [2.1, 0.4, 0.8, 2.1]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
def QLGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = old_div(
LGMM1(rng=self.rng, size=(self.n_samples,), **self.kwargs), self.q
)
# -- we've divided the LGMM1 by self.q to get ints here
assert np.all(samples == samples.astype("int"))
min_max = int(samples.min()), int(samples.max())
print("SAMPLES RANGE", min_max)
counts = np.bincount(samples.astype("int") - min_max[0])
# print samples
# print counts
xcoords = np.arange(min_max[0], min_max[1] + 0.5) * self.q
prob = np.exp(LGMM1_lpdf(xcoords, **self.kwargs))
print(xcoords)
print(prob)
assert counts.sum() == self.n_samples
y = old_div(counts, float(self.n_samples))
if self.show:
plt.scatter(xcoords, y, c="r", label="empirical")
plt.scatter(xcoords, prob, c="b", label="predicted")
plt.legend()
plt.show()
# -- calculate errors on the low end, don't take a mean
# over all the range spanned by a few outliers.
err = ((prob - y) ** 2)[:20]
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_basic_pt125(self):
self.work(q=0.125)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
class TestSuggest(unittest.TestCase, CasePerDomain):
def work(self):
# -- smoke test that things simply run,
# for each type of several search spaces.
trials = Trials()
fmin(
passthrough,
space=self.bandit.expr,
algo=partial(tpe.suggest, n_EI_candidates=3),
trials=trials,
max_evals=10,
)
class TestSuggestAtpe(unittest.TestCase, NonCategoricalCasePerDomain):
def work(self):
trials = Trials()
fmin(
passthrough,
space=self.bandit.expr,
algo=atpe.suggest,
trials=trials,
max_evals=10,
)
class TestOpt(unittest.TestCase, CasePerDomain):
thresholds = dict(
quadratic1=1e-5,
q1_lognormal=0.01,
distractor=-1.96,
gauss_wave=-2.0,
gauss_wave2=-2.0,
n_arms=-2.5,
many_dists=0.0005,
branin=0.7,
)
LEN = dict(
# -- running a long way out tests overflow/underflow
# to some extent
quadratic1=1000,
many_dists=200,
distractor=100,
# XXX
q1_lognormal=250,
gauss_wave2=75, # -- boosted from 50 on Nov/2013 after new
# sampling order made thresh test fail.
branin=200,
)
gammas = dict(distractor=0.05)
prior_weights = dict(distractor=0.01)
n_EIs = dict(
# XXX
# -- this can be low in a few dimensions
quadratic1=5,
# -- lower number encourages exploration
# XXX: this is a damned finicky way to get TPE
# to solve the Distractor problem
distractor=15,
)
def setUp(self):
self.olderr = np.seterr("raise")
np.seterr(under="ignore")
def tearDown(self, *args):
np.seterr(**self.olderr)
def work(self):
bandit = self.bandit
assert bandit.name is not None
algo = partial(
tpe.suggest,
gamma=self.gammas.get(bandit.name, tpe._default_gamma),
prior_weight=self.prior_weights.get(bandit.name, tpe._default_prior_weight),
n_EI_candidates=self.n_EIs.get(bandit.name, tpe._default_n_EI_candidates),
)
LEN = self.LEN.get(bandit.name, 50)
trials = Trials()
fmin(
passthrough,
space=bandit.expr,
algo=algo,
trials=trials,
max_evals=LEN,
rstate=np.random.default_rng(np.random.PCG64(0)),
catch_eval_exceptions=False,
)
assert len(trials) == LEN
if 1:
rtrials = Trials()
fmin(
passthrough,
space=bandit.expr,
algo=rand.suggest,
trials=rtrials,
max_evals=LEN,
)
print("RANDOM MINS", list(sorted(rtrials.losses()))[:6])
if 0:
plt.subplot(2, 2, 1)
plt.scatter(list(range(LEN)), trials.losses())
plt.title("TPE losses")
plt.subplot(2, 2, 2)
plt.scatter(list(range(LEN)), ([s["x"] for s in trials.specs]))
plt.title("TPE x")
plt.subplot(2, 2, 3)
plt.title("RND losses")
plt.scatter(list(range(LEN)), rtrials.losses())
plt.subplot(2, 2, 4)
plt.title("RND x")
plt.scatter(list(range(LEN)), ([s["x"] for s in rtrials.specs]))
plt.show()
if 0:
plt.hist([t["x"] for t in self.experiment.trials], bins=20)
print("TPE MINS", list(sorted(trials.losses()))[:6])
thresh = self.thresholds[bandit.name]
print("Thresh", thresh)
assert min(trials.losses()) < thresh
@domain_constructor(loss_target=0)
def opt_q_uniform(target):
rng = np.random.default_rng(123)
x = hp.quniform("x", 1.01, 10, 1)
return {
"loss": (x - target) ** 2 + scope.normal(0, 1, rng=rng),
"status": STATUS_OK,
}
class TestOptQUniform:
show_steps = False
show_vars = DO_SHOW
LEN = 25
def work(self, **kwargs):
self.__dict__.update(kwargs)
bandit = opt_q_uniform(self.target)
prior_weight = 2.5
gamma = 0.20
algo = partial(
tpe.suggest,
prior_weight=prior_weight,
n_startup_jobs=2,
n_EI_candidates=128,
gamma=gamma,
)
trials = Trials()
fmin(
passthrough, space=bandit.expr, algo=algo, trials=trials, max_evals=self.LEN
)
if self.show_vars:
import hyperopt.plotting
hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
idxs = idxs["x"]
vals = vals["x"]
losses = trials.losses()
from hyperopt.tpe import ap_split_trials
from hyperopt.tpe import adaptive_parzen_samplers
qu = scope.quniform(1.01, 10, 1)
fn = adaptive_parzen_samplers["quniform"]
fn_kwargs = dict(size=(4,), rng=np.random)
s_below = pyll.Literal()
s_above = pyll.Literal()
b_args = [s_below, prior_weight] + qu.pos_args
b_post = fn(*b_args, **fn_kwargs)
a_args = [s_above, prior_weight] + qu.pos_args
a_post = fn(*a_args, **fn_kwargs)
# print b_post
# print a_post
fn_lpdf = getattr(scope, a_post.name + "_lpdf")
print(fn_lpdf)
# calculate the llik of b_post under both distributions
a_kwargs = {n: a for n, a in a_post.named_args if n not in ("rng", "size")}
b_kwargs = {n: a for n, a in b_post.named_args if n not in ("rng", "size")}
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
print("=" * 80)
do_show = self.show_steps
for ii in range(2, 9):
if ii > len(idxs):
break
print("-" * 80)
print("ROUND", ii)
print("-" * 80)
all_vals = [2, 3, 4, 5, 6, 7, 8, 9, 10]
below, above = ap_split_trials(
idxs[:ii], vals[:ii], idxs[:ii], losses[:ii], gamma
)
below = below.astype("int")
above = above.astype("int")
print("BB0", below)
print("BB1", above)
# print 'BELOW', zip(range(100), np.bincount(below, minlength=11))
# print 'ABOVE', zip(range(100), np.bincount(above, minlength=11))
memo = {b_post: all_vals, s_below: below, s_above: above}
bl, al, nv = pyll.rec_eval([below_llik, above_llik, new_node], memo=memo)
# print bl - al
print("BB2", dict(list(zip(all_vals, bl - al))))
print("BB3", dict(list(zip(all_vals, bl))))
print("BB4", dict(list(zip(all_vals, al))))
print("ORIG PICKED", vals[ii])
print("PROPER OPT PICKS:", nv)
# assert np.allclose(below, [3, 3, 9])
# assert len(below) + len(above) == len(vals)
if do_show:
plt.subplot(8, 1, ii)
# plt.scatter(all_vals,
# np.bincount(below, minlength=11)[2:], c='b')
# plt.scatter(all_vals,
# np.bincount(above, minlength=11)[2:], c='c')
plt.scatter(all_vals, bl, c="g")
plt.scatter(all_vals, al, c="r")
if do_show:
plt.show()
def test4(self):
self.work(target=4, LEN=100)
def test2(self):
self.work(target=2, LEN=100)
def test6(self):
self.work(target=6, LEN=100)
def test10(self):
self.work(target=10, LEN=100)
| 23,101 | 28.429299 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_webpage.py
|
def test_landing_screen():
# define an objective function
def objective(args):
case, val = args
if case == "case 1":
return val
else:
return val**2
# define a search space
from hyperopt import hp
space = hp.choice(
"a",
[
("case 1", 1 + hp.lognormal("c1", 0, 1)),
("case 2", hp.uniform("c2", -10, 10)),
],
)
# minimize the objective over the space
import hyperopt
best = hyperopt.fmin(objective, space, algo=hyperopt.tpe.suggest, max_evals=100)
print(best)
# -> {'a': 1, 'c2': 0.01420615366247227}
print(hyperopt.space_eval(space, best))
# -> ('case 2', 0.01420615366247227}
| 729 | 22.548387 | 84 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_vectorize.py
|
import numpy as np
from hyperopt.pyll import as_apply, scope, rec_eval, clone, dfs
from hyperopt.pyll.stochastic import recursive_set_rng_kwarg
from hyperopt import base, fmin, rand
from hyperopt.vectorize import VectorizeHelper
from hyperopt.vectorize import replace_repeat_stochastic
from hyperopt.pyll_utils import hp_choice
from hyperopt.pyll_utils import hp_uniform
from hyperopt.pyll_utils import hp_quniform
from hyperopt.pyll_utils import hp_loguniform
from hyperopt.pyll_utils import hp_qloguniform
def config0():
p0 = scope.uniform(0, 1)
p1 = scope.uniform(2, 3)
p2 = scope.one_of(-1, p0)
p3 = scope.one_of(-2, p1)
p4 = 1
p5 = [3, 4, p0]
p6 = scope.one_of(-3, p1)
d = locals()
d["p1"] = None # -- don't sample p1 all the time, only if p3 says so
s = as_apply(d)
return s
def test_clone():
config = config0()
config2 = clone(config)
nodeset = set(dfs(config))
assert not any(n in nodeset for n in dfs(config2))
foo = recursive_set_rng_kwarg(config, scope.rng_from_seed(5))
r = rec_eval(foo)
print(r)
r2 = rec_eval(recursive_set_rng_kwarg(config2, scope.rng_from_seed(5)))
print(r2)
assert r == r2
def test_vectorize_trivial():
N = as_apply(15)
p0 = hp_uniform("p0", 0, 1)
loss = p0
print(loss)
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.default_rng(1)))
# print new_vc
losses, idxs, vals = rec_eval(new_vc)
print("losses", losses)
print("idxs p0", idxs["p0"])
print("vals p0", vals["p0"])
p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
for ii, li in enumerate(losses):
assert p0dct[ii] == li
def test_vectorize_simple():
N = as_apply(15)
p0 = hp_uniform("p0", 0, 1)
loss = p0**2
print(loss)
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.default_rng(1)))
# print new_vc
losses, idxs, vals = rec_eval(new_vc)
print("losses", losses)
print("idxs p0", idxs["p0"])
print("vals p0", vals["p0"])
p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
for ii, li in enumerate(losses):
assert p0dct[ii] ** 2 == li
def test_vectorize_multipath():
N = as_apply(15)
p0 = hp_uniform("p0", 0, 1)
loss = hp_choice("p1", [1, p0, -p0]) ** 2
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
print(vloss)
full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
new_vc = recursive_set_rng_kwarg(full_output, as_apply(np.random.default_rng(1)))
losses, idxs, vals = rec_eval(new_vc)
print("losses", losses)
print("idxs p0", idxs["p0"])
print("vals p0", vals["p0"])
print("idxs p1", idxs["p1"])
print("vals p1", vals["p1"])
p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
p1dct = dict(list(zip(idxs["p1"], vals["p1"])))
for ii, li in enumerate(losses):
print(ii, li)
if p1dct[ii] != 0:
assert li == p0dct[ii] ** 2
else:
assert li == 1
def test_vectorize_config0():
p0 = hp_uniform("p0", 0, 1)
p1 = hp_loguniform("p1", 2, 3)
p2 = hp_choice("p2", [-1, p0])
p3 = hp_choice("p3", [-2, p1])
p4 = 1
p5 = [3, 4, p0]
p6 = hp_choice("p6", [-3, p1])
d = locals()
d["p1"] = None # -- don't sample p1 all the time, only if p3 says so
config = as_apply(d)
N = as_apply("N:TBA")
expr = config
expr_idxs = scope.range(N)
vh = VectorizeHelper(expr, expr_idxs, build=True)
vconfig = vh.v_expr
full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])
if 1:
print("=" * 80)
print("VECTORIZED")
print(full_output)
print("\n" * 1)
fo2 = replace_repeat_stochastic(full_output)
if 0:
print("=" * 80)
print("VECTORIZED STOCHASTIC")
print(fo2)
print("\n" * 1)
new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.default_rng(1)))
if 0:
print("=" * 80)
print("VECTORIZED STOCHASTIC WITH RNGS")
print(new_vc)
Nval = 10
foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})
print("foo[0]", foo[0])
print("foo[1]", foo[1])
assert len(foo) == Nval
if 0: # XXX refresh these values to lock down sampler
assert foo[0] == {
"p0": 0.39676747423066994,
"p1": None,
"p2": 0.39676747423066994,
"p3": 2.1281244479293568,
"p4": 1,
"p5": (3, 4, 0.39676747423066994),
}
assert (foo[1].keys() != foo[2].keys()) or (foo[1].values() != foo[2].values())
print(idxs)
print(vals["p3"])
print(vals["p6"])
print(idxs["p1"])
print(vals["p1"])
assert len(vals["p3"]) == Nval
assert len(vals["p6"]) == Nval
assert len(idxs["p1"]) < Nval
p1d = dict(list(zip(idxs["p1"], vals["p1"])))
for ii, (p3v, p6v) in enumerate(zip(vals["p3"], vals["p6"])):
if p3v == p6v == 0:
assert ii not in idxs["p1"]
if p3v:
assert foo[ii]["p3"] == p1d[ii]
if p6v:
print("p6", foo[ii]["p6"], p1d[ii])
assert foo[ii]["p6"] == p1d[ii]
def test_distributions():
# test that the distributions come out right
# XXX: test more distributions
space = {
"loss": (
hp_loguniform("lu", -2, 2)
+ hp_qloguniform("qlu", np.log(1 + 0.01), np.log(20), 2)
+ hp_quniform("qu", -4.999, 5, 1)
+ hp_uniform("u", 0, 10)
),
"status": "ok",
}
trials = base.Trials()
N = 1000
fmin(
lambda x: x,
space=space,
algo=rand.suggest,
trials=trials,
max_evals=N,
rstate=np.random.default_rng(124),
catch_eval_exceptions=False,
)
assert len(trials) == N
idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
print(list(idxs.keys()))
COUNTMAX = 130
COUNTMIN = 70
# -- loguniform
log_lu = np.log(vals["lu"])
assert len(log_lu) == N
assert -2 < np.min(log_lu)
assert np.max(log_lu) < 2
h = np.histogram(log_lu)[0]
print(h)
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
# -- quantized log uniform
qlu = vals["qlu"]
assert np.all(np.fmod(qlu, 2) == 0)
assert np.min(qlu) == 2
assert np.max(qlu) == 20
bc_qlu = np.bincount(qlu)
assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]
# -- quantized uniform
qu = vals["qu"]
assert np.min(qu) == -5
assert np.max(qu) == 5
assert np.all(np.fmod(qu, 1) == 0)
bc_qu = np.bincount(np.asarray(qu).astype("int") + 5)
assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat
# with new rounding rule?
assert np.all(bc_qu < 125), bc_qu
assert np.all(bc_qu < COUNTMAX)
# -- uniform
u = vals["u"]
assert np.min(u) > 0
assert np.max(u) < 10
h = np.histogram(u)[0]
print(h)
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
# import matplotlib.pyplot as plt
# plt.hist(np.log(vals['node_2']))
# plt.show()
| 7,597 | 26.729927 | 85 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_plotting.py
|
"""
Verify that the plotting routines can at least run.
If environment variable HYPEROPT_SHOW is defined and true,
then the plots actually appear.
"""
import unittest
import os
try:
import matplotlib
matplotlib.use("svg") # -- prevents trying to connect to X server
except ImportError:
import nose
raise nose.SkipTest()
from hyperopt import Trials
import hyperopt.plotting
from hyperopt import rand, fmin
from .test_domains import many_dists
def get_do_show():
rval = int(os.getenv("HYPEROPT_SHOW", "0"))
print("do_show =", rval)
return rval
class TestPlotting(unittest.TestCase):
def setUp(self):
domain = self.domain = many_dists()
trials = self.trials = Trials()
fmin(
lambda x: x,
space=domain.expr,
trials=trials,
algo=rand.suggest,
max_evals=200,
)
def test_plot_history(self):
hyperopt.plotting.main_plot_history(self.trials, do_show=get_do_show())
def test_plot_histogram(self):
hyperopt.plotting.main_plot_histogram(self.trials, do_show=get_do_show())
def test_plot_vars(self):
hyperopt.plotting.main_plot_vars(self.trials, self.domain)
| 1,220 | 22.480769 | 81 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_rand.py
|
import unittest
from hyperopt.base import Trials, trials_from_docs, miscs_to_idxs_vals
from hyperopt import rand
from hyperopt.tests.test_base import Suggest_API
from .test_domains import gauss_wave2, coin_flip
import numpy as np
TestRand = Suggest_API.make_tst_class(rand.suggest, gauss_wave2(), "TestRand")
class TestRand(unittest.TestCase):
def test_seeding(self):
# -- assert that the seeding works a particular way
domain = coin_flip()
docs = rand.suggest(
list(range(10)), domain, Trials(), seed=np.random.PCG64(123)
)
trials = trials_from_docs(docs)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
# Passes Nov 8 / 2013
self.assertEqual(list(idxs["flip"]), list(range(10)))
self.assertEqual(list(vals["flip"]), [0, 1, 1, 0, 1, 0, 0, 0, 0, 0])
# -- TODO: put in a test that guarantees that
# stochastic nodes are sampled in a particular order.
| 959 | 33.285714 | 78 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_anneal.py
|
from functools import partial
import unittest
import numpy as np
from hyperopt import anneal
from hyperopt import rand
from hyperopt import Trials, fmin
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from .test_domains import CasePerDomain
def passthrough(x):
return x
class TestItJustRuns(unittest.TestCase, CasePerDomain):
def work(self):
trials = Trials()
space = self.bandit.expr
fmin(
fn=passthrough,
space=space,
trials=trials,
algo=anneal.suggest,
max_evals=10,
)
class TestItAtLeastSortOfWorks(unittest.TestCase, CasePerDomain):
thresholds = dict(
quadratic1=1e-5,
q1_lognormal=0.01,
distractor=-0.96, # -- anneal is a strategy that can really
# get tricked by the distractor.
gauss_wave=-2.0,
gauss_wave2=-2.0,
n_arms=-2.5,
many_dists=0.0005,
branin=0.7,
)
iters_thresholds = dict(
# -- running a long way out tests overflow/underflow
# to some extent
quadratic1=1000,
many_dists=200,
# -- anneal is pretty bad at this kind of function
distractor=150,
branin=200,
)
def setUp(self):
self.olderr = np.seterr("raise")
np.seterr(under="ignore")
def tearDown(self, *args):
np.seterr(**self.olderr)
def work(self):
bandit = self.bandit
assert bandit.name is not None
algo = partial(anneal.suggest)
iters_thresholds = self.iters_thresholds.get(bandit.name, 50)
trials = Trials()
fmin(
fn=passthrough,
space=self.bandit.expr,
trials=trials,
algo=algo,
max_evals=iters_thresholds,
rstate=np.random.default_rng(8),
)
assert len(trials) == iters_thresholds
rtrials = Trials()
fmin(
fn=passthrough,
space=self.bandit.expr,
trials=rtrials,
algo=rand.suggest,
max_evals=iters_thresholds,
rstate=np.random.default_rng(8),
)
thresh = self.thresholds[bandit.name]
assert min(trials.losses()) < thresh
| 2,273 | 23.451613 | 69 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_atpe_basic.py
|
import random
import numpy as np
from hyperopt import hp, fmin, atpe, space_eval
random.seed(1)
np.random.seed(1)
def test_run_basic_search():
def objective(args):
case, val = args
if case == "case 1":
return val
else:
return val**2
# define a search space
space = hp.choice(
"a",
[
("case 1", 1 + hp.lognormal("c1", 0, 1)),
("case 2", hp.uniform("c2", -10, 10)),
],
)
# minimize the objective over the space
# NOTE: Max evals should be greater than 10, as the first 10 runs are only the initialization rounds
best = fmin(
objective,
space,
algo=atpe.suggest,
max_evals=20,
rstate=np.random.default_rng(1),
)
assert best["a"] == 1
assert space_eval(space, best)[0] == "case 2"
| 859 | 21.631579 | 104 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_ipy.py
|
"""
To use this test script, there should be a cluster of ipython parallel engines
instantiated already. Their working directory should be the current
directory: hyperopt/tests
To start the engines in hyperopt/hyperopt/tests/
use: $ ipcluster start --n=2
"""
import sys
from nose import SkipTest
try:
from IPython.parallel import Client
except ImportError:
print("Skipping IPython Tests (IPython not found)", file=sys.stderr)
raise SkipTest("IPython not present")
from hyperopt.ipy import IPythonTrials
import hyperopt.hp
import hyperopt.tpe
import hyperopt
def test0():
try:
client = Client(debug=True)
except OSError:
raise SkipTest()
client[:].use_cloudpickle()
trials = IPythonTrials(client, "log")
def simple_objective(args):
# -- why are these imports here !?
# -- is it because they need to be imported on the client?
#
# Yes, the client namespace is empty, so some imports may be
# needed here. Errors on the engines can be found by
# using debug=True when instantiating the Client.
import hyperopt
return {"loss": args**2, "status": hyperopt.STATUS_OK}
space = hyperopt.hp.uniform("x", 0, 1)
minval = trials.fmin(
simple_objective,
space=space,
algo=hyperopt.tpe.suggest,
max_evals=25,
verbose=True,
)
print(minval)
assert minval["x"] < 0.2
def test_fmin_fn():
try:
client = Client()
except OSError:
raise SkipTest()
client[:].use_cloudpickle()
trials = IPythonTrials(client, "log")
assert not trials._testing_fmin_was_called
def simple_objective(args):
import hyperopt
return {"loss": args**2, "status": hyperopt.STATUS_OK}
space = hyperopt.hp.uniform("x", 0, 1)
minval = hyperopt.fmin(
simple_objective,
space=space,
algo=hyperopt.tpe.suggest,
max_evals=25,
trials=trials,
)
assert minval["x"] < 0.2
assert trials._testing_fmin_was_called
| 2,061 | 22.701149 | 78 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_randint.py
|
import unittest
from functools import partial
import numpy as np
from hyperopt import hp, Trials, fmin, rand, tpe
import hyperopt.pyll.stochastic
def test_basic():
space = hp.randint("a", 5)
x = np.zeros(5)
rng = np.random.default_rng(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
x[nesto] += 1
print(x)
for i in x:
assert 100 < i < 300
def test_basic2():
space = hp.randint("a", 5, 15)
x = np.zeros(15)
rng = np.random.default_rng(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
x[nesto] += 1
print(x)
for i in range(5):
assert x[i] == 0
for i in range(5, 15):
assert 80 < x[i] < 120
class TestSimpleFMin(unittest.TestCase):
# test that that a space with a randint in it is
# (a) accepted for each algo (random, tpe)
# and
# (b) handled correctly in fmin, finding the solution in the constrained space
#
def setUp(self):
self.space = hp.randint("t", 2, 100)
self.trials = Trials()
def objective(self, a):
# an objective function with roots at 3, 10, 50
return abs(np.poly1d([1, -63, 680, -1500])(a))
def test_random_runs(self):
max_evals = 150
fmin(
self.objective,
space=self.space,
trials=self.trials,
algo=rand.suggest,
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
values = [t["misc"]["vals"]["t"][0] for t in self.trials.trials]
counts = np.bincount(values, minlength=100)
assert counts[:2].sum() == 0
def test_tpe_runs(self):
max_evals = 100
fmin(
self.objective,
space=self.space,
trials=self.trials,
algo=partial(tpe.suggest, n_startup_jobs=10),
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
values = [t["misc"]["vals"]["t"][0] for t in self.trials.trials]
counts = np.bincount(values, minlength=100)
assert counts[:2].sum() == 0
def test_random_finds_constrained_solution(self):
max_evals = 150
# (2, 7), (2, 30), (2, 100), (5, 30), (5, 100), (20, 100)
for lower, upper in zip([2, 2, 2, 5, 5, 20], [7, 30, 100, 30, 100, 100]):
best = fmin(
self.objective,
space=hp.randint("t", lower, upper),
algo=rand.suggest,
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
expected = [i for i in [3, 10, 50] if lower <= i < upper]
assert best["t"] in expected
def test_tpe_finds_constrained_solution(self):
max_evals = 150
# (2, 7), (2, 30), (2, 100), (5, 30), (5, 100), (20, 100)
for lower, upper in zip([2, 2, 2, 5, 5, 20], [7, 30, 100, 30, 100, 100]):
best = fmin(
self.objective,
space=hp.randint("t", lower, upper),
algo=tpe.suggest,
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
expected = [i for i in [3, 10, 50] if lower <= i < upper]
assert best["t"] in expected
| 3,332 | 29.027027 | 82 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_rdists.py
|
from past.utils import old_div
from collections import defaultdict
import unittest
import numpy as np
import numpy.testing as npt
from hyperopt.rdists import (
loguniform_gen,
lognorm_gen,
quniform_gen,
qloguniform_gen,
qnormal_gen,
qlognormal_gen,
)
from scipy import stats
from scipy.stats.tests.test_continuous_basic import (
check_cdf_logcdf,
check_pdf_logpdf,
check_pdf,
check_cdf_ppf,
)
class TestLogUniform(unittest.TestCase):
def test_cdf_logcdf(self):
check_cdf_logcdf(loguniform_gen(0, 1), (0, 1), "")
check_cdf_logcdf(loguniform_gen(0, 1), (-5, 5), "")
def test_cdf_ppf(self):
check_cdf_ppf(loguniform_gen(0, 1), (0, 1), "")
check_cdf_ppf(loguniform_gen(-2, 1), (-5, 5), "")
def test_pdf_logpdf(self):
check_pdf_logpdf(loguniform_gen(0, 1), (0, 1), "")
check_pdf_logpdf(loguniform_gen(low=-4, high=-0.5), (-2, 1), "")
def test_pdf(self):
check_pdf(loguniform_gen(0, 1), (0, 1), "")
check_pdf(loguniform_gen(low=-4, high=-2), (-3, 2), "")
def test_distribution_rvs(self):
alpha = 0.01
loc = 0
scale = 1
arg = (loc, scale)
distfn = loguniform_gen(0, 1)
D, pval = stats.kstest(distfn.rvs, distfn.cdf, args=arg, N=1000)
if pval < alpha:
npt.assert_(
pval > alpha,
f"D = {D:f}; pval = {pval:f}; alpha = {alpha:f}; args={arg}",
)
class TestLogNormal(unittest.TestCase):
def test_cdf_logcdf(self):
check_cdf_logcdf(lognorm_gen(0, 1), (), "")
check_cdf_logcdf(lognorm_gen(0, 1), (), "")
def test_cdf_ppf(self):
check_cdf_ppf(lognorm_gen(0, 1), (), "")
check_cdf_ppf(lognorm_gen(-2, 1), (), "")
def test_pdf_logpdf(self):
check_pdf_logpdf(lognorm_gen(0, 1), args=(), msg="base case")
check_pdf_logpdf(
lognorm_gen(mu=-4, sigma=0.5), args=(), msg="non-default mu, sigma"
)
def test_pdf(self):
check_pdf(lognorm_gen(0, 1), (), "")
check_pdf(lognorm_gen(mu=-4, sigma=2), (), "")
def test_distribution_rvs(self):
import warnings
warnings.warn("test_distribution_rvs is being skipped!")
return # XXX
alpha = 0.01
loc = 0
scale = 1
arg = (loc, scale)
distfn = lognorm_gen(0, 1)
D, pval = stats.kstest(distfn.rvs, distfn.cdf, args=arg, N=1000)
if pval < alpha:
npt.assert_(
pval > alpha,
f"D = {D:f}; pval = {pval:f}; alpha = {alpha:f}; args={arg}",
)
def check_d_samples(dfn, n, rtol=1e-2, atol=1e-2):
counts = defaultdict(lambda: 0)
# print 'sample', dfn.rvs(size=n)
inc = old_div(1.0, n)
for s in dfn.rvs(size=n):
counts[s] += inc
for ii, p in sorted(counts.items()):
t = np.allclose(dfn.pmf(ii), p, rtol=rtol, atol=atol)
if not t:
print(("Error in sampling frequencies", ii))
print("value\tpmf\tfreq")
for jj in sorted(counts):
print(f"{jj:.2f}\t{dfn.pmf(jj):.3f}\t{counts[jj]:.4f}")
npt.assert_(t, "n = %i; pmf = %f; p = %f" % (n, dfn.pmf(ii), p))
class TestQUniform(unittest.TestCase):
def test_smallq(self):
low, high, q = (0, 1, 0.1)
qu = quniform_gen(low, high, q)
check_d_samples(qu, n=10000)
def test_bigq(self):
low, high, q = (-20, -1, 3)
qu = quniform_gen(low, high, q)
check_d_samples(qu, n=10000)
def test_offgrid_int(self):
qn = quniform_gen(0, 2, 2)
assert qn.pmf(0) > 0.0
assert qn.pmf(1) == 0.0
assert qn.pmf(2) > 0.0
assert qn.pmf(3) == 0.0
assert qn.pmf(-1) == 0.0
def test_offgrid_float(self):
qn = quniform_gen(0, 1, 0.2)
assert qn.pmf(0) > 0.0
assert qn.pmf(0.1) == 0.0
assert qn.pmf(0.2) > 0.0
assert qn.pmf(0.4) > 0.0
assert qn.pmf(0.8) > 0.0
assert qn.pmf(-0.2) == 0.0
assert qn.pmf(0.99) == 0.0
assert qn.pmf(-0.99) == 0.0
def test_output_type_int(self):
result = quniform_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(quniform_gen(0, 10, 1.0).rvs())
class TestQLogUniform(unittest.TestCase):
def logp(self, x, low, high, q):
return qloguniform_gen(low, high, q).logpmf(x)
def test_smallq(self):
low, high, q = (0, 1, 0.1)
qlu = qloguniform_gen(low, high, q)
check_d_samples(qlu, n=10000)
def test_bigq(self):
low, high, q = (-20, 4, 3)
qlu = qloguniform_gen(low, high, q)
check_d_samples(qlu, n=10000)
def test_point(self):
low, high, q = (np.log(0.05), np.log(0.15), 0.5)
qlu = qloguniform_gen(low, high, q)
check_d_samples(qlu, n=10000)
def test_2points(self):
low, high, q = (np.log(0.05), np.log(0.75), 0.5)
qlu = qloguniform_gen(low, high, q)
check_d_samples(qlu, n=10000)
def test_point_logpmf(self):
assert np.allclose(self.logp(0, np.log(0.25), np.log(0.5), 1), 0.0)
def test_rounding_logpmf(self):
assert self.logp(0, np.log(0.25), np.log(0.75), 1) > self.logp(
1, np.log(0.25), np.log(0.75), 1
)
assert (
self.logp(-1, np.log(0.25), np.log(0.75), 1)
== self.logp(2, np.log(0.25), np.log(0.75), 1)
== -np.inf
)
def test_smallq_logpmf(self):
assert (
self.logp(0.2, np.log(0.16), np.log(0.55), 0.1)
> self.logp(0.3, np.log(0.16), np.log(0.55), 0.1)
> self.logp(0.4, np.log(0.16), np.log(0.55), 0.1)
> self.logp(0.5, np.log(0.16), np.log(0.55), 0.1)
> -10
)
assert (
self.logp(0.1, np.log(0.16), np.log(0.55), 1)
== self.logp(0.6, np.log(0.16), np.log(0.55), 1)
== -np.inf
)
def test_output_type_int(self):
result = qloguniform_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(qloguniform_gen(0, 10, 1.0).rvs())
class TestQNormal(unittest.TestCase):
def test_smallq(self):
mu, sigma, q = (0, 1, 0.1)
qn = qnormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_bigq(self):
mu, sigma, q = (-20, 4, 3)
qn = qnormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_offgrid_int(self):
qn = qnormal_gen(0, 1, 2)
assert qn.pmf(0) > 0.0
assert qn.pmf(1) == 0.0
assert qn.pmf(2) > 0.0
def test_offgrid_float(self):
qn = qnormal_gen(0, 1, 0.2)
assert qn.pmf(0) > 0.0
assert qn.pmf(0.1) == 0.0
assert qn.pmf(0.2) > 0.0
assert qn.pmf(0.4) > 0.0
assert qn.pmf(-0.2) > 0.0
assert qn.pmf(-0.4) > 0.0
assert qn.pmf(0.99) == 0.0
assert qn.pmf(-0.99) == 0.0
def test_numeric(self):
qn = qnormal_gen(0, 1, 1)
assert qn.pmf(500) > -np.inf
def test_output_type_int(self):
result = qnormal_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(qnormal_gen(0, 10, 1.0).rvs())
class TestQLogNormal(unittest.TestCase):
def test_smallq(self):
mu, sigma, q = (0, 1, 0.1)
qn = qlognormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_bigq(self):
mu, sigma, q = (-20, 4, 3)
qn = qlognormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_offgrid_int(self):
mu, sigma, q = (1, 2, 2)
qn = qlognormal_gen(mu, sigma, q)
assert qn.pmf(0) > qn.pmf(2) > qn.pmf(20) > 0
assert qn.pmf(1) == qn.pmf(2 - 0.001) == qn.pmf(-1) == 0
def test_offgrid_float(self):
mu, sigma, q = (-0.5, 2, 0.2)
qn = qlognormal_gen(mu, sigma, q)
assert qn.pmf(0) > qn.pmf(0.2) > qn.pmf(2) > 0
assert qn.pmf(0.1) == qn.pmf(0.2 - 0.001) == qn.pmf(-0.2) == 0
def test_numeric(self):
# XXX we don't have a numerically accurate computation for this guy
# qn = qlognormal_gen(0, 1, 1)
# assert -np.inf < qn.logpmf(1e-20) < -50
# assert -np.inf < qn.logpmf(1e20) < -50
pass
def test_output_type_int(self):
result = qlognormal_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(qlognormal_gen(0, 10, 1.0).rvs())
# -- non-empty last line for flake8
| 8,714 | 29.578947 | 79 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_domains.py
|
from past.utils import old_div
import unittest
import numpy as np
from hyperopt import Trials, Domain, fmin, hp, base
from hyperopt.rand import suggest
from hyperopt.pyll import as_apply
from hyperopt.pyll import scope
# -- define this bandit here too for completeness' sake
def domain_constructor(**b_kwargs):
"""
Decorate a function that returns a pyll expressions so that
it becomes a Domain instance instead of a function
Example:
@domain_constructor(loss_target=0)
def f(low, high):
return {'loss': hp.uniform('x', low, high) ** 2 }
"""
def deco(f):
def wrapper(*args, **kwargs):
if "name" in b_kwargs:
_b_kwargs = b_kwargs
else:
_b_kwargs = dict(b_kwargs, name=f.__name__)
f_rval = f(*args, **kwargs)
domain = Domain(lambda x: x, f_rval, **_b_kwargs)
return domain
wrapper.__name__ = f.__name__
return wrapper
return deco
@domain_constructor()
def coin_flip():
"""Possibly the simplest possible Bandit implementation"""
return {"loss": hp.choice("flip", [0.0, 1.0]), "status": base.STATUS_OK}
@domain_constructor(loss_target=0)
def quadratic1():
"""
About the simplest problem you could ask for:
optimize a one-variable quadratic function.
"""
return {"loss": (hp.uniform("x", -5, 5) - 3) ** 2, "status": base.STATUS_OK}
@domain_constructor(loss_target=0)
def q1_choice():
o_x = hp.choice(
"o_x", [(-3, hp.uniform("x_neg", -5, 5)), (3, hp.uniform("x_pos", -5, 5))]
)
return {"loss": (o_x[0] - o_x[1]) ** 2, "status": base.STATUS_OK}
@domain_constructor(loss_target=0)
def q1_lognormal():
"""
About the simplest problem you could ask for:
optimize a one-variable quadratic function.
"""
return {
"loss": scope.min(0.1 * (hp.lognormal("x", 0, 2) - 10) ** 2, 10),
"status": base.STATUS_OK,
}
@domain_constructor(loss_target=-2)
def n_arms(N=2):
"""
Each arm yields a reward from a different Gaussian.
The correct arm is arm 0.
"""
rng = np.random.default_rng(123)
x = hp.choice("x", [0, 1])
reward_mus = as_apply([-1] + [0] * (N - 1))
reward_sigmas = as_apply([1] * N)
return {
"loss": scope.normal(reward_mus[x], reward_sigmas[x], rng=rng),
"loss_variance": 1.0,
"status": base.STATUS_OK,
}
@domain_constructor(loss_target=-2)
def distractor():
"""
This is a nasty function: it has a max in a spike near -10, and a long
asymptote that is easy to find, but guides hill-climbing approaches away
from the true max.
The second peak is at x=-10.
The prior mean is 0.
"""
x = hp.uniform("x", -15, 15)
# climbs rightward from 0.0 to 1.0
f1 = old_div(1.0, (1.0 + scope.exp(-x)))
f2 = 2 * scope.exp(-((x + 10) ** 2)) # bump with height 2 at (x=-10)
return {"loss": -f1 - f2, "status": base.STATUS_OK}
@domain_constructor(loss_target=-1)
def gauss_wave():
"""
Essentially, this is a high-frequency sinusoidal function plus a broad quadratic.
One variable controls the position along the curve.
The binary variable determines whether the sinusoidal is shifted by pi.
So there are actually two maxima in this problem, it's just one is more
probable. The tricky thing here is dealing with the fact that there are two
variables and one is discrete.
"""
x = hp.uniform("x", -20, 20)
t = hp.choice("curve", [x, x + np.pi])
f1 = scope.sin(t)
f2 = 2 * scope.exp(-((old_div(t, 5.0)) ** 2))
return {"loss": -(f1 + f2), "status": base.STATUS_OK}
@domain_constructor(loss_target=-2.5)
def gauss_wave2():
"""
Variant of the GaussWave problem in which noise is added to the score
function, and there is an option to either have no sinusoidal variation, or
a negative cosine with variable amplitude.
Immediate local max is to sample x from spec and turn off the neg cos.
Better solution is to move x a bit to the side, turn on the neg cos and turn
up the amp to 1.
"""
rng = np.random.default_rng(123)
var = 0.1
x = hp.uniform("x", -20, 20)
amp = hp.uniform("amp", 0, 1)
t = scope.normal(0, var, rng=rng) + 2 * scope.exp(-((old_div(x, 5.0)) ** 2))
return {
"loss": -hp.choice("hf", [t, t + scope.sin(x) * amp]),
"loss_variance": var,
"status": base.STATUS_OK,
}
@domain_constructor(loss_target=0)
def many_dists():
a = hp.choice("a", [0, 1, 2])
b = hp.randint("b", 10)
bb = hp.randint("bb", 12, 25)
c = hp.uniform("c", 4, 7)
d = hp.loguniform("d", -2, 0)
e = hp.quniform("e", 0, 10, 3)
f = hp.qloguniform("f", 0, 3, 2)
g = hp.normal("g", 4, 7)
h = hp.lognormal("h", -2, 2)
i = hp.qnormal("i", 0, 10, 2)
j = hp.qlognormal("j", 0, 2, 1)
k = hp.pchoice("k", [(0.1, 0), (0.9, 1)])
z = a + b + bb + c + d + e + f + g + h + i + j + k
return {"loss": scope.float(scope.log(1e-12 + z**2)), "status": base.STATUS_OK}
@domain_constructor(loss_target=0.398)
def branin():
"""
The Branin, or Branin-Hoo, function has three global minima,
and is roughly an angular trough across a 2D input space.
f(x, y) = a (y - b x ** 2 + c x - r ) ** 2 + s (1 - t) cos(x) + s
The recommended values of a, b, c, r, s and t are:
a = 1
b = 5.1 / (4 pi ** 2)
c = 5 / pi
r = 6
s = 10
t = 1 / (8 * pi)
Global Minima:
[(-pi, 12.275),
(pi, 2.275),
(9.42478, 2.475)]
Source: http://www.sfu.ca/~ssurjano/branin.html
"""
x = hp.uniform("x", -5.0, 10.0)
y = hp.uniform("y", 0.0, 15.0)
pi = float(np.pi)
loss = (
(y - (old_div(5.1, (4 * pi**2))) * x**2 + 5 * x / pi - 6) ** 2
+ 10 * (1 - old_div(1, (8 * pi))) * scope.cos(x)
+ 10
)
return {"loss": loss, "loss_variance": 0, "status": base.STATUS_OK}
class DomainExperimentMixin:
def test_basic(self):
domain = self._domain_cls()
# print 'domain params', domain.params, domain
# print 'algo params', algo.vh.params
trials = Trials()
fmin(
lambda x: x,
domain.expr,
trials=trials,
algo=suggest,
rstate=np.random.default_rng(4),
max_evals=self._n_steps,
)
assert trials.average_best_error(domain) - domain.loss_target < 0.2
@classmethod
def make(cls, domain_cls, n_steps=500):
class Tester(unittest.TestCase, cls):
def setUp(self):
self._n_steps = n_steps
self._domain_cls = domain_cls
Tester.__name__ = domain_cls.__name__ + "Tester"
return Tester
quadratic1Tester = DomainExperimentMixin.make(quadratic1)
q1_lognormalTester = DomainExperimentMixin.make(q1_lognormal)
q1_choiceTester = DomainExperimentMixin.make(q1_choice)
n_armsTester = DomainExperimentMixin.make(n_arms)
distractorTester = DomainExperimentMixin.make(distractor)
gauss_waveTester = DomainExperimentMixin.make(gauss_wave)
gauss_wave2Tester = DomainExperimentMixin.make(gauss_wave2, n_steps=5000)
many_distsTester = DomainExperimentMixin.make(many_dists)
braninTester = DomainExperimentMixin.make(branin)
class CasePerDomain:
# -- this is a mixin
# -- Override self.work to execute a test for each kind of self.bandit
def test_quadratic1(self):
self.bandit = quadratic1()
self.work()
def test_q1lognormal(self):
self.bandit = q1_lognormal()
self.work()
def test_twoarms(self):
self.bandit = n_arms()
self.work()
def test_distractor(self):
self.bandit = distractor()
self.work()
def test_gausswave(self):
self.bandit = gauss_wave()
self.work()
def test_gausswave2(self):
self.bandit = gauss_wave2()
self.work()
def test_many_dists(self):
self.bandit = many_dists()
self.work()
def test_branin(self):
self.bandit = branin()
self.work()
class NonCategoricalCasePerDomain:
# -- this is a mixin
# -- Override self.work to execute a test for each kind of self.bandit
def test_quadratic1(self):
self.bandit = quadratic1()
self.work()
def test_q1lognormal(self):
self.bandit = q1_lognormal()
self.work()
def test_twoarms(self):
self.bandit = n_arms()
self.work()
def test_distractor(self):
self.bandit = distractor()
self.work()
def test_branin(self):
self.bandit = branin()
self.work()
# -- non-blank last line for flake8
| 8,728 | 27.067524 | 85 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_pyll_utils.py
|
from hyperopt import pyll_utils
from hyperopt.pyll_utils import EQ
from hyperopt.pyll_utils import expr_to_config
from hyperopt import hp
from hyperopt.pyll import as_apply
from hyperopt.pyll.stochastic import sample
import unittest
import numpy as np
import pytest
def test_expr_to_config():
z = hp.randint("z", 10)
a = hp.choice(
"a",
[
hp.uniform("b", -1, 1) + z,
{
"c": 1,
"d": hp.choice(
"d", [3 + hp.loguniform("c", 0, 1), 1 + hp.loguniform("e", 0, 1)]
),
},
],
)
expr = as_apply((a, z))
hps = {}
expr_to_config(expr, (True,), hps)
for label, dct in list(hps.items()):
print(label)
print(
" dist: %s(%s)"
% (
dct["node"].name,
", ".join(map(str, [ii.eval() for ii in dct["node"].inputs()])),
)
)
if len(dct["conditions"]) > 1:
print(" conditions (OR):")
for condseq in dct["conditions"]:
print(" ", " AND ".join(map(str, condseq)))
elif dct["conditions"]:
for condseq in dct["conditions"]:
print(" conditions :", " AND ".join(map(str, condseq)))
assert hps["a"]["node"].name == "randint"
assert hps["b"]["node"].name == "uniform"
assert hps["c"]["node"].name == "loguniform"
assert hps["d"]["node"].name == "randint"
assert hps["e"]["node"].name == "loguniform"
assert hps["z"]["node"].name == "randint"
assert {(True, EQ("a", 0))} == {(True, EQ("a", 0))}
assert hps["a"]["conditions"] == {(True,)}
assert hps["b"]["conditions"] == {(True, EQ("a", 0))}, hps["b"]["conditions"]
assert hps["c"]["conditions"] == {(True, EQ("a", 1), EQ("d", 0))}
assert hps["d"]["conditions"] == {(True, EQ("a", 1))}
assert hps["e"]["conditions"] == {(True, EQ("a", 1), EQ("d", 1))}
assert hps["z"]["conditions"] == {(True,), (True, EQ("a", 0))}
def test_remove_allpaths():
z = hp.uniform("z", 0, 10)
a = hp.choice("a", [z + 1, z - 1])
hps = {}
expr_to_config(a, (True,), hps)
aconds = hps["a"]["conditions"]
zconds = hps["z"]["conditions"]
assert aconds == {(True,)}, aconds
assert zconds == {(True,)}, zconds
def test_remove_allpaths_int():
z = hp.uniformint("z", 0, 10)
a = hp.choice("a", [z + 1, z - 1])
hps = {}
expr_to_config(a, (True,), hps)
aconds = hps["a"]["conditions"]
zconds = hps["z"]["conditions"]
assert aconds == {(True,)}, aconds
assert zconds == {(True,)}, zconds
@pyll_utils.validate_distribution_range
def stub_pyll_fn(label, low, high):
"""
Stub function to test distribution range validation fn
"""
pass
@pytest.mark.parametrize(
"arguments", [["z", 0, 10], {"label": "z", "low": 0, "high": 10}]
)
def test_uniformint_arguments(arguments):
"""
Test whether uniformint can accept both positional and keyword arguments.
Related to PR #704.
"""
if isinstance(arguments, list):
space = hp.uniformint(*arguments)
if isinstance(arguments, dict):
space = hp.uniformint(**arguments)
rng = np.random.default_rng(np.random.PCG64(123))
values = [sample(space, rng=rng) for _ in range(10)]
assert values == [7, 1, 2, 2, 2, 8, 9, 3, 8, 9]
class TestValidateDistributionRange(unittest.TestCase):
"""
We can't test low being set via kwarg while high is set via arg because
that's not a validate fn call
"""
def test_raises_error_for_low_arg_high_arg(self):
self.assertRaises(ValueError, stub_pyll_fn, "stub", 1, 1)
def test_raises_error_for_low_arg_high_kwarg(self):
self.assertRaises(ValueError, stub_pyll_fn, "stub", 1, high=1)
def test_raises_error_for_low_kwarg_high_kwarg(self):
self.assertRaises(ValueError, stub_pyll_fn, "stub", low=1, high=1)
class TestDistributionsWithRangeValidateBoundries(unittest.TestCase):
def test_hp_uniform_raises_error_when_range_is_zero(self):
self.assertRaises(ValueError, hp.uniform, "stub", 10, 10)
def test_hp_quniform_raises_error_when_range_is_zero(self):
self.assertRaises(ValueError, hp.quniform, "stub", 10, 10, 1)
def test_hp_loguniform_raises_error_when_range_is_zero(self):
self.assertRaises(ValueError, hp.loguniform, "stub", 10, 10, 1)
def test_hp_qloguniform_raises_error_when_range_is_zero(self):
self.assertRaises(ValueError, hp.qloguniform, "stub", 10, 10, 1)
| 4,559 | 31.571429 | 85 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_criteria.py
|
from past.utils import old_div
import numpy as np
import hyperopt.criteria as crit
def test_ei():
rng = np.random.default_rng(123)
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 5, 0.25) * np.sqrt(var) + mean
v_n = [
crit.EI_gaussian_empirical(mean, var, thresh, rng, 10000)
for thresh in thresholds
]
v_a = [crit.EI_gaussian(mean, var, thresh) for thresh in thresholds]
# import matplotlib.pyplot as plt
# plt.plot(thresholds, v_n)
# plt.plot(thresholds, v_a)
# plt.show()
if not np.allclose(v_n, v_a, atol=0.03, rtol=0.03):
for t, n, a in zip(thresholds, v_n, v_a):
print((t, n, a, abs(n - a), old_div(abs(n - a), (abs(n) + abs(a)))))
assert 0
# mean, var, thresh, v_n, v_a)
def test_log_ei():
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 30, 0.25) * np.sqrt(var) + mean
ei = np.asarray([crit.EI_gaussian(mean, var, thresh) for thresh in thresholds])
nlei = np.asarray(
[crit.logEI_gaussian(mean, var, thresh) for thresh in thresholds]
)
naive = np.log(ei)
# import matplotlib.pyplot as plt
# plt.plot(thresholds, ei, label='ei')
# plt.plot(thresholds, nlei, label='nlei')
# plt.plot(thresholds, naive, label='naive')
# plt.legend()
# plt.show()
# -- assert that they match when the threshold isn't too high
assert np.allclose(nlei, naive)
def test_log_ei_range():
assert np.all(
np.isfinite(
[
crit.logEI_gaussian(0, 1, thresh)
for thresh in [-500, 0, 50, 100, 500, 5000]
]
)
)
def test_ucb():
assert np.allclose(crit.UCB(0, 1, 1), 1)
assert np.allclose(crit.UCB(0, 1, 2), 2)
assert np.allclose(crit.UCB(0, 4, 1), 2)
assert np.allclose(crit.UCB(1, 4, 1), 3)
# -- flake8
| 1,994 | 28.338235 | 87 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_fmin.py
|
import unittest
import numpy as np
from timeit import default_timer as timer
import time
from hyperopt.early_stop import no_progress_loss
from hyperopt.fmin import generate_trials_to_calculate
import pytest
from hyperopt import (
fmin,
rand,
tpe,
hp,
Trials,
exceptions,
space_eval,
STATUS_FAIL,
STATUS_OK,
)
from hyperopt.base import JOB_STATE_ERROR
def test_quadratic1_rand():
trials = Trials()
argmin = fmin(
fn=lambda x: (x - 3) ** 2,
space=hp.uniform("x", -5, 5),
algo=rand.suggest,
max_evals=500,
trials=trials,
rstate=np.random.default_rng(np.random.PCG64(123)),
)
assert len(trials) == 500
assert abs(argmin["x"] - 3.0) < 0.25
def test_quadratic1_tpe(trials=Trials()):
argmin = fmin(
fn=lambda x: (x - 3) ** 2,
space=hp.uniform("x", -5, 5),
algo=tpe.suggest,
max_evals=50,
trials=trials,
rstate=np.random.default_rng(np.random.PCG64(123)),
)
assert len(trials) == 50, len(trials)
assert abs(argmin["x"] - 3.0) < 0.25, argmin
def test_quadratic1_anneal():
trials = Trials()
import hyperopt.anneal
N = 30
def fn(x):
return (x - 3) ** 2
argmin = fmin(
fn=fn,
space=hp.uniform("x", -5, 5),
algo=hyperopt.anneal.suggest,
max_evals=N,
trials=trials,
rstate=np.random.default_rng(np.random.PCG64(123)),
)
print(argmin)
assert len(trials) == N
assert abs(argmin["x"] - 3.0) < 0.25
def test_duplicate_label_is_error():
trials = Trials()
def fn(xy):
x, y = xy
return x**2 + y**2
with pytest.raises(exceptions.DuplicateLabel):
fmin(
fn=fn,
space=[hp.uniform("x", -5, 5), hp.uniform("x", -5, 5)],
algo=rand.suggest,
max_evals=500,
trials=trials,
rstate=np.random.default_rng(0),
)
def test_space_eval():
space = hp.choice(
"a",
[
("case 1", 1 + hp.lognormal("c1", 0, 1)),
("case 2", hp.uniform("c2", -10, 10)),
],
)
assert space_eval(space, {"a": 0, "c1": 1.0}) == ("case 1", 2.0)
assert space_eval(space, {"a": 1, "c2": 3.5}) == ("case 2", 3.5)
def test_set_fmin_rstate():
def lossfn(x):
return (x - 3) ** 2
trials_seed0 = Trials()
argmin_seed0 = fmin(
fn=lossfn,
space=hp.uniform("x", -5, 5),
algo=rand.suggest,
max_evals=1,
trials=trials_seed0,
rstate=np.random.default_rng(0),
)
assert len(trials_seed0) == 1
trials_seed1 = Trials()
argmin_seed1 = fmin(
fn=lossfn,
space=hp.uniform("x", -5, 5),
algo=rand.suggest,
max_evals=1,
trials=trials_seed1,
rstate=np.random.default_rng(1),
)
assert len(trials_seed1) == 1
assert argmin_seed0 != argmin_seed1
def test_fmin_return_argmin():
def fn(x):
return x
space = hp.choice("x", [100, 5, 10])
# With return_argmin=False it should return the
# best parameter values
best_parameter = fmin(
fn=fn,
space=space,
max_evals=10,
algo=rand.suggest,
return_argmin=False,
rstate=np.random.default_rng(0),
)
assert best_parameter == 5
# With return_argmin=True it should return the
# optimal point in the sample space
best_args = fmin(
fn=fn,
space=space,
max_evals=10,
algo=rand.suggest,
return_argmin=True,
rstate=np.random.default_rng(0),
)
assert best_args["x"] == 1
class TestFmin(unittest.TestCase):
class SomeError(Exception):
# XXX also test domain.exceptions mechanism that actually catches this
pass
def eval_fn(self, space):
raise TestFmin.SomeError()
def setUp(self):
self.trials = Trials()
def test_catch_eval_exceptions_True(self):
# -- should go to max_evals, catching all exceptions, so all jobs
# should have JOB_STATE_ERROR
fmin(
self.eval_fn,
space=hp.uniform("x", 0, 1),
algo=rand.suggest,
trials=self.trials,
max_evals=2,
catch_eval_exceptions=True,
return_argmin=False,
)
trials = self.trials
assert len(trials) == 0
assert len(trials._dynamic_trials) == 2
assert trials._dynamic_trials[0]["state"] == JOB_STATE_ERROR
assert trials._dynamic_trials[0]["misc"]["error"] != None
assert trials._dynamic_trials[1]["state"] == JOB_STATE_ERROR
assert trials._dynamic_trials[1]["misc"]["error"] != None
def test_catch_eval_exceptions_False(self):
with self.assertRaises(TestFmin.SomeError):
fmin(
self.eval_fn,
space=hp.uniform("x", 0, 1),
algo=rand.suggest,
trials=self.trials,
max_evals=2,
catch_eval_exceptions=False,
)
print(len(self.trials))
assert len(self.trials) == 0
assert len(self.trials._dynamic_trials) == 1
def test_status_fail_tpe():
trials = Trials()
argmin = fmin(
fn=lambda x: (
{"loss": (x - 3) ** 2, "status": STATUS_OK}
if (x < 0)
else {"status": STATUS_FAIL}
),
space=hp.uniform("x", -5, 5),
algo=tpe.suggest,
max_evals=50,
trials=trials,
)
assert len(trials) == 50, len(trials)
assert argmin["x"] < 0, argmin
assert "loss" in trials.best_trial["result"], "loss" in trials.best_trial["result"]
assert trials.best_trial["result"]["loss"] >= 9, trials.best_trial["result"]["loss"]
class TestGenerateTrialsToCalculate(unittest.TestCase):
def test_generate_trials_to_calculate(self):
points = [{"x": 0.0, "y": 0.0}, {"x": 1.0, "y": 1.0}]
best = fmin(
fn=lambda space: space["x"] ** 2 + space["y"] ** 2,
space={"x": hp.uniform("x", -10, 10), "y": hp.uniform("y", -10, 10)},
algo=tpe.suggest,
max_evals=10,
points_to_evaluate=points,
)
assert best["x"] == 0.0
assert best["y"] == 0.0
def test_timeout():
def fn(x):
return [time.sleep(1), x][1]
space = hp.choice("x", range(20))
start_time_1 = timer()
fmin(
fn=fn,
space=space,
max_evals=10,
timeout=1,
algo=rand.suggest,
return_argmin=False,
rstate=np.random.default_rng(0),
)
end_time_1 = timer()
assert (end_time_1 - start_time_1) < 2
assert (end_time_1 - start_time_1) > 0.9
start_time_5 = timer()
fmin(
fn=fn,
space=space,
max_evals=10,
timeout=5,
algo=rand.suggest,
return_argmin=False,
rstate=np.random.default_rng(0),
)
end_time_5 = timer()
assert (end_time_5 - start_time_5) < 6
assert (end_time_5 - start_time_5) > 4.9
def test_invalid_timeout():
def fn(x):
return [time.sleep(1), x][1]
space = hp.choice("x", range(20))
for wrong_timeout in [-1, True]:
expected_message = "The timeout argument should be None or a positive value. Given value: {m}".format(
m=wrong_timeout
)
try:
fmin(
fn=fn,
space=space,
max_evals=10,
timeout=wrong_timeout,
algo=rand.suggest,
return_argmin=False,
rstate=np.random.default_rng(0),
)
except Exception as e:
assert str(e) == expected_message
def test_loss_threshold():
loss_threshold = 0.001
hypopt_trials = Trials()
fmin(
fn=lambda x: x**2,
space=hp.uniform("x", -10, 10),
loss_threshold=loss_threshold,
algo=rand.suggest,
trials=hypopt_trials,
rstate=np.random.default_rng(0),
)
best_loss = hypopt_trials.best_trial["result"]["loss"]
assert best_loss <= loss_threshold
assert len(hypopt_trials) > 0
def test_invalid_loss_threshold():
def fn(x):
return [time.sleep(1), x][1]
space = hp.choice("x", range(20))
for wrong_loss_threshold in ["a", True]:
expected_message = (
"The loss_threshold argument should be None "
"or a numeric value. Given value: {m}".format(m=wrong_loss_threshold)
)
try:
fmin(
fn=fn,
space=space,
max_evals=10,
loss_threshold=wrong_loss_threshold,
algo=rand.suggest,
return_argmin=False,
rstate=np.random.default_rng(0),
)
except Exception as e:
assert str(e) == expected_message
def test_early_stop():
trials = Trials()
# basic stop after 100 trials
def stop(trial, count=0):
return count + 1 >= 100, [count + 1]
fmin(
fn=lambda x: x,
space=hp.uniform("x", -5, 5),
algo=rand.suggest,
max_evals=500,
trials=trials,
early_stop_fn=stop,
)
assert len(trials) == 100
def test_early_stop_no_progress_loss():
trials = generate_trials_to_calculate([{"x": -100}])
fmin(
fn=lambda x: x,
space=hp.uniform("x", -5, 5),
algo=rand.suggest,
max_evals=500,
trials=trials,
early_stop_fn=no_progress_loss(10),
)
assert len(trials) == 10
def test_annotated_params_space():
def objective(x: hp.uniform("x", -10, 10), y: hp.uniform("y", -10, 10)):
return (x * y) ** 2
trials = Trials()
fmin(objective, space="annotated", algo=tpe.suggest, max_evals=10, trials=trials)
assert len(trials) == 10
def test_invalid_annotated_params_space():
def objective(x: hp.uniform("x", -10, 10), y: float):
return (x * y) ** 2
with pytest.raises(exceptions.InvalidAnnotatedParameter):
fmin(objective, space="annotated", algo=tpe.suggest, max_evals=10)
| 10,176 | 24.764557 | 110 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_pchoice.py
|
from functools import partial
import numpy as np
import unittest
from hyperopt import hp, Trials, fmin, tpe, anneal, rand
import hyperopt.pyll.stochastic
class TestPChoice(unittest.TestCase):
def test_basic(self):
space = hp.pchoice(
"naive_type",
[(0.14, "gaussian"), (0.02, "multinomial"), (0.84, "bernoulli")],
)
a, b, c = 0, 0, 0
rng = np.random.default_rng(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == "gaussian":
a += 1
elif nesto == "multinomial":
b += 1
elif nesto == "bernoulli":
c += 1
print((a, b, c))
assert a + b + c == 1000
assert 120 < a < 160
assert 0 < b < 40
assert 800 < c < 900
def test_basic2(self):
space = hp.choice(
"normal_choice",
[
hp.pchoice("fsd", [(0.1, "first"), (0.8, "second"), (0.1, 2)]),
hp.choice("something_else", [10, 20]),
],
)
a, b, c = 0, 0, 0
rng = np.random.default_rng(123)
for i in range(0, 1000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == "first":
a += 1
elif nesto == "second":
b += 1
elif nesto == 2:
c += 1
elif nesto in (10, 20):
pass
else:
assert 0, nesto
print((a, b, c))
assert b > 2 * a
assert b > 2 * c
def test_basic3(self):
space = hp.pchoice(
"something",
[
(0.2, hp.pchoice("number", [(0.8, 2), (0.2, 1)])),
(0.8, hp.pchoice("number1", [(0.7, 5), (0.3, 6)])),
],
)
a, b, c, d = 0, 0, 0, 0
rng = np.random.default_rng(123)
for i in range(0, 2000):
nesto = hyperopt.pyll.stochastic.sample(space, rng=rng)
if nesto == 2:
a += 1
elif nesto == 1:
b += 1
elif nesto == 5:
c += 1
elif nesto == 6:
d += 1
else:
assert 0, nesto
print((a, b, c, d))
assert a + b + c + d == 2000
assert 300 < a + b < 500
assert 1500 < c + d < 1700
assert a * 0.3 > b # a * 1.2 > 4 * b
assert c * 3 * 1.2 > d * 7
class TestSimpleFMin(unittest.TestCase):
# test that that a space with a pchoice in it is
# (a) accepted for each algo (random, tpe, anneal)
# and
# (b) handled correctly.
#
def setUp(self):
self.space = hp.pchoice("a", [(0.1, 0), (0.2, 1), (0.3, 2), (0.4, 3)])
self.trials = Trials()
def objective(self, a):
return [1, 1, 1, 0][a]
def test_random(self):
max_evals = 150
fmin(
self.objective,
space=self.space,
trials=self.trials,
algo=rand.suggest,
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
a_vals = [t["misc"]["vals"]["a"][0] for t in self.trials.trials]
counts = np.bincount(a_vals)
assert counts[3] > max_evals * 0.35
assert counts[3] < max_evals * 0.60
def test_tpe(self):
max_evals = 100
fmin(
self.objective,
space=self.space,
trials=self.trials,
algo=partial(tpe.suggest, n_startup_jobs=10),
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
a_vals = [t["misc"]["vals"]["a"][0] for t in self.trials.trials]
counts = np.bincount(a_vals)
assert counts[3] > max_evals * 0.6
def test_anneal(self):
max_evals = 100
fmin(
self.objective,
space=self.space,
trials=self.trials,
algo=partial(anneal.suggest),
rstate=np.random.default_rng(4),
max_evals=max_evals,
)
a_vals = [t["misc"]["vals"]["a"][0] for t in self.trials.trials]
counts = np.bincount(a_vals)
assert counts[3] > max_evals * 0.6
def test_constant_fn_rand():
space = hp.choice(
"preprocess_choice",
[
{"pwhiten": hp.pchoice("whiten_randomPCA", [(0.3, False), (0.7, True)])},
{"palgo": False},
{"pthree": 7},
],
)
fmin(fn=lambda x: 1, space=space, algo=rand.suggest, max_evals=50)
def test_constant_fn_tpe():
space = hp.choice(
"preprocess_choice",
[
{"pwhiten": hp.pchoice("whiten_randomPCA", [(0.3, False), (0.7, True)])},
{"palgo": False},
{"pthree": 7},
],
)
fmin(
fn=lambda x: 1,
space=space,
algo=tpe.suggest,
max_evals=50,
rstate=np.random.default_rng(44),
)
def test_constant_fn_anneal():
space = hp.choice(
"preprocess_choice",
[
{"pwhiten": hp.pchoice("whiten_randomPCA", [(0.3, False), (0.7, True)])},
{"palgo": False},
{"pthree": 7},
],
)
fmin(fn=lambda x: 1, space=space, algo=anneal.suggest, max_evals=50)
| 5,341 | 27.566845 | 85 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_progress.py
|
import sys
from hyperopt.progress import tqdm_progress_callback
def test_tqdm_progress_callback_restores_stdout():
real_stdout = sys.stdout
with tqdm_progress_callback(initial=0, total=100) as ctx:
assert sys.stdout != real_stdout
ctx.postfix = "best loss: 4711"
ctx.update(42)
assert sys.stdout == real_stdout
| 350 | 26 | 61 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/unit/test_utils.py
|
import numpy as np
from nose.tools import raises, eq_
import shutil
import os
from hyperopt.utils import fast_isin
from hyperopt.utils import get_most_recent_inds
from hyperopt.utils import temp_dir, working_dir, get_closest_dir, path_split_all
def test_fast_isin():
Y = np.random.randint(0, 10000, size=(100,))
X = np.arange(10000)
Z = fast_isin(X, Y)
D = np.unique(Y)
D.sort()
T1 = (X[Z] == D).all()
X = np.array(list(range(10000)) + list(range(10000)))
Z = fast_isin(X, Y)
T2 = (X[Z] == np.append(D, D.copy())).all()
X = np.random.randint(0, 100, size=(40,))
X.sort()
Y = np.random.randint(0, 100, size=(60,))
Y.sort()
XinY = np.array([ind for ind in range(len(X)) if X[ind] in Y])
YinX = np.array([ind for ind in range(len(Y)) if Y[ind] in X])
T3 = (fast_isin(X, Y).nonzero()[0] == XinY).all()
T4 = (fast_isin(Y, X).nonzero()[0] == YinX).all()
assert T1 & T2 & T3 & T4
def test_get_most_recent_inds():
test_data = []
most_recent_data = []
for ind in range(300):
k = np.random.randint(1, 6)
for _ind in range(k):
test_data.append({"_id": ind, "version": _ind})
most_recent_data.append({"_id": ind, "version": _ind})
rng = np.random.default_rng(0)
p = rng.permutation(len(test_data))
test_data_rearranged = [test_data[_p] for _p in p]
rind = get_most_recent_inds(test_data_rearranged)
test_data_rearranged_most_recent = [test_data_rearranged[idx] for idx in rind]
assert all([t in most_recent_data for t in test_data_rearranged_most_recent])
assert len(test_data_rearranged_most_recent) == len(most_recent_data)
test_data = [{"_id": 0, "version": 1}]
assert get_most_recent_inds(test_data).tolist() == [0]
test_data = [{"_id": 0, "version": 1}, {"_id": 0, "version": 2}]
assert get_most_recent_inds(test_data).tolist() == [1]
test_data = [
{"_id": 0, "version": 1},
{"_id": 0, "version": 2},
{"_id": 1, "version": 1},
]
assert get_most_recent_inds(test_data).tolist() == [1, 2]
test_data = [
{"_id": -1, "version": 1},
{"_id": 0, "version": 1},
{"_id": 0, "version": 2},
{"_id": 1, "version": 1},
]
assert get_most_recent_inds(test_data).tolist() == [0, 2, 3]
test_data = [
{"_id": -1, "version": 1},
{"_id": 0, "version": 1},
{"_id": 0, "version": 2},
{"_id": 0, "version": 2},
]
assert get_most_recent_inds(test_data).tolist() == [0, 3]
@raises(RuntimeError)
def test_temp_dir_pardir():
with temp_dir("../test_temp_dir"):
pass
def test_temp_dir():
fn = "test_temp_dir"
if os.path.exists(fn):
print("Path %s exists, not running test_temp_dir()" % fn)
return
try:
assert not os.path.exists(fn)
with temp_dir(fn):
assert os.path.exists(fn)
assert os.path.exists(fn)
os.rmdir(fn)
assert not os.path.exists(fn)
with temp_dir(fn, erase_after=True):
assert os.path.exists(fn)
assert not os.path.exists(fn)
finally:
if os.path.isdir(fn):
os.rmdir(fn)
def test_path_split_all():
ll = "foo bar baz".split()
path = os.path.join(*ll)
eq_(list(path_split_all(path)), ll)
def test_temp_dir_sentinel():
from os.path import join, isdir, exists
basedir = "test_temp_dir_sentinel"
fn = join(basedir, "foo", "bar")
if exists(basedir):
print("Path %s exists, not running test_temp_dir_sentinel()" % basedir)
return
os.makedirs(basedir)
eq_(get_closest_dir(fn)[0], basedir)
eq_(get_closest_dir(fn)[1], "foo")
sentinel = join(basedir, "foo.inuse")
try:
with temp_dir(fn, erase_after=True, with_sentinel=True):
assert isdir(fn)
assert exists(sentinel)
# simulate work
open(join(fn, "dummy.txt"), "w").close()
# work file should be deleted together with directory
assert not exists(fn)
assert not exists(join(basedir, "foo"))
# basedir should still exist, though!
assert isdir(basedir)
finally:
if isdir(basedir):
shutil.rmtree(basedir)
def test_workdir():
fn = "test_work_dir"
os.makedirs(fn)
try:
assert fn not in os.getcwd()
with working_dir(fn):
assert fn in os.getcwd()
assert fn not in os.getcwd()
finally:
if os.path.isdir(fn):
os.rmdir(fn)
| 4,546 | 27.778481 | 82 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/integration/test_mongoexp.py
|
import pickle as pickle
import os
import signal
import subprocess
import sys
import traceback
import threading
import time
import unittest
import numpy as np
import nose
import nose.plugins.skip
from hyperopt.base import JOB_STATE_DONE, STATUS_OK
from hyperopt.mongoexp import parse_url
from hyperopt.mongoexp import MongoTrials
from hyperopt.mongoexp import MongoWorker
from hyperopt.mongoexp import ReserveTimeout
from hyperopt.mongoexp import as_mongo_str
from hyperopt.mongoexp import main_worker_helper
from hyperopt.mongoexp import MongoJobs
from hyperopt.fmin import fmin
from hyperopt import hp, rand
import hyperopt.tests.test_base
from hyperopt.tests.unit.test_domains import gauss_wave2
def skiptest(f):
def wrapper(*args, **kwargs):
raise nose.plugins.skip.SkipTest()
wrapper.__name__ = f.__name__
return wrapper
class TempMongo:
"""
Context manager for tests requiring a live database.
with TempMongo() as foo:
mj = foo.mongo_jobs('test1')
"""
def __init__(self, workdir="/tmp/hyperopt_test"):
self.workdir = workdir
def __enter__(self):
try:
open(self.workdir)
assert 0
except OSError:
subprocess.call(["mkdir", "-p", "%s/db" % self.workdir])
proc_args = [
"mongod",
"--dbpath=%s/db" % self.workdir,
"--noprealloc",
"--port=22334",
]
print("starting mongod", proc_args)
self.mongo_proc = subprocess.Popen(
proc_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.workdir, # this prevented mongod assertion fail
)
try:
interval = 0.125
while interval <= 2:
if interval > 0.125:
print("Waiting for mongo to come up")
time.sleep(interval)
interval *= 2
if self.db_up():
break
if self.db_up():
return self
else:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
out, err = self.mongo_proc.communicate()
print(out, file=sys.stderr)
print(err, file=sys.stderr)
raise RuntimeError("No database connection", proc_args)
except Exception as e:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
raise e
def __exit__(self, *args):
os.kill(self.mongo_proc.pid, signal.SIGTERM)
self.mongo_proc.wait()
subprocess.call(["rm", "-Rf", self.workdir])
@staticmethod
def connection_string(dbname):
return as_mongo_str(f"localhost:22334/{dbname}/jobs")
@staticmethod
def mongo_jobs(dbname):
return MongoJobs.new_from_connection_str(TempMongo.connection_string(dbname))
def db_up(self):
try:
self.mongo_jobs("__test_db")
return True
except: # XXX: don't know what exceptions to put here
return False
def test_parse_url():
uris = [
"mongo://hyperopt:[email protected]:27017/hyperoptdb/jobs",
"mongo://hyperopt:[email protected]:27017/hyperoptdb/jobs?authSource=db1",
]
expected = [
("mongo", "hyperopt", "foobar", "127.0.0.1", 27017, "hyperoptdb", "jobs", None),
(
"mongo",
"hyperopt",
"foobar",
"127.0.0.1",
27017,
"hyperoptdb",
"jobs",
"db1",
),
]
for i, uri in enumerate(uris):
assert parse_url(uri) == expected[i]
# -- If we can't create a TempMongo instance, then
# simply print what happened,
try:
with TempMongo() as temp_mongo:
pass
except OSError as e:
print(e, file=sys.stderr)
print(
("Failed to create a TempMongo context," " skipping all mongo tests."),
file=sys.stderr,
)
if "such file" in str(e):
print("Hint: is mongod executable on path?", file=sys.stderr)
raise nose.SkipTest()
class TestMongoTrials(hyperopt.tests.test_base.TestTrials):
def setUp(self):
self.temp_mongo = TempMongo()
self.temp_mongo.__enter__()
self.trials = MongoTrials(
self.temp_mongo.connection_string("foo"), exp_key=None
)
def tearDown(self, *args):
self.temp_mongo.__exit__(*args)
def with_mongo_trials(f, exp_key=None):
def wrapper():
with TempMongo() as temp_mongo:
trials = MongoTrials(temp_mongo.connection_string("foo"), exp_key=exp_key)
print("Length of trials: ", len(trials.results))
f(trials)
wrapper.__name__ = f.__name__
return wrapper
def _worker_thread_fn(host_id, n_jobs, timeout, dbname="foo", logfilename=None):
mw = MongoWorker(
mj=TempMongo.mongo_jobs(dbname),
logfilename=logfilename,
workdir="mongoexp_test_dir",
)
try:
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
except ReserveTimeout:
print("worker timed out:", host_id)
pass
def with_worker_threads(n_threads, dbname="foo", n_jobs=sys.maxsize, timeout=10.0):
"""
Decorator that will run a test with some MongoWorker threads in flight
"""
def newth(ii):
return threading.Thread(
target=_worker_thread_fn, args=(("hostname", ii), n_jobs, timeout, dbname)
)
def deco(f):
def wrapper(*args, **kwargs):
# --start some threads
threads = list(map(newth, list(range(n_threads))))
[th.start() for th in threads]
try:
return f(*args, **kwargs)
finally:
[th.join() for th in threads]
wrapper.__name__ = f.__name__ # -- nose requires test in name
return wrapper
return deco
@with_mongo_trials
def test_with_temp_mongo(trials):
pass # -- just verify that the decorator can run
@with_mongo_trials
def test_new_trial_ids(trials):
a = trials.new_trial_ids(1)
b = trials.new_trial_ids(2)
c = trials.new_trial_ids(3)
assert len(a) == 1
assert len(b) == 2
assert len(c) == 3
s = set()
s.update(a)
s.update(b)
s.update(c)
assert len(s) == 6
@with_mongo_trials
def test_attachments(trials):
blob = b"abcde"
assert "aname" not in trials.attachments
trials.attachments["aname"] = blob
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob
assert trials.attachments["aname"] == blob
blob2 = b"zzz"
trials.attachments["aname"] = blob2
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob2
assert trials.attachments["aname"] == blob2
del trials.attachments["aname"]
assert "aname" not in trials.attachments
@with_mongo_trials
def test_delete_all_on_attachments(trials):
trials.attachments["aname"] = "a"
trials.attachments["aname2"] = "b"
assert "aname2" in trials.attachments
trials.delete_all()
assert "aname" not in trials.attachments
assert "aname2" not in trials.attachments
def test_handles_are_independent():
with TempMongo() as tm:
t1 = tm.mongo_jobs("t1")
t2 = tm.mongo_jobs("t2")
assert len(t1) == 0
assert len(t2) == 0
# test that inserting into t1 doesn't affect t2
t1.insert({"a": 7})
assert len(t1) == 1
assert len(t2) == 0
def passthrough(x):
assert os.path.split(os.getcwd()).count("mongoexp_test_dir") == 1, (
"cwd is %s" % os.getcwd()
)
return x
class TestExperimentWithThreads(unittest.TestCase):
@staticmethod
def worker_thread_fn(host_id, n_jobs, timeout):
mw = MongoWorker(
mj=TempMongo.mongo_jobs("foodb"),
logfilename=None,
workdir="mongoexp_test_dir",
)
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
@staticmethod
def fmin_thread_fn(space, trials, max_evals, seed):
fmin(
fn=passthrough,
space=space,
algo=rand.suggest,
trials=trials,
rstate=np.random.default_rng(seed),
max_evals=max_evals,
return_argmin=False,
)
def test_seeds_AAB(self):
# launch 3 simultaneous experiments with seeds A, A, B.
# Verify all experiments run to completion.
# Verify first two experiments run identically.
# Verify third experiment runs differently.
exp_keys = ["A0", "A1", "B"]
seeds = [1, 1, 2]
n_workers = 2
jobs_per_thread = 6
# -- total jobs = 2 * 6 = 12
# -- divided by 3 experiments: 4 jobs per fmin
max_evals = (n_workers * jobs_per_thread) // len(exp_keys)
# -- should not matter which domain is used here
domain = gauss_wave2()
pickle.dumps(domain.expr)
pickle.dumps(passthrough)
worker_threads = [
threading.Thread(
target=TestExperimentWithThreads.worker_thread_fn,
args=(("hostname", ii), jobs_per_thread, 30.0),
)
for ii in range(n_workers)
]
with TempMongo() as tm:
mj = tm.mongo_jobs("foodb")
print(mj)
trials_list = [
MongoTrials(tm.connection_string("foodb"), key) for key in exp_keys
]
fmin_threads = [
threading.Thread(
target=TestExperimentWithThreads.fmin_thread_fn,
args=(domain.expr, trials, max_evals, seed),
)
for seed, trials in zip(seeds, trials_list)
]
try:
[th.start() for th in worker_threads + fmin_threads]
finally:
print("joining worker threads...")
[th.join() for th in worker_threads + fmin_threads]
# -- not using an exp_key gives a handle to all the trials
# in foodb
all_trials = MongoTrials(tm.connection_string("foodb"))
self.assertEqual(len(all_trials), n_workers * jobs_per_thread)
# Verify that the fmin calls terminated correctly:
for trials in trials_list:
self.assertEqual(
trials.count_by_state_synced(JOB_STATE_DONE), max_evals
)
self.assertEqual(
trials.count_by_state_unsynced(JOB_STATE_DONE), max_evals
)
self.assertEqual(len(trials), max_evals)
# Verify that the first two experiments match.
# (Do these need sorting by trial id?)
trials_A0, trials_A1, trials_B0 = trials_list
self.assertEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_A1.trials],
)
# Verify that the last experiment does not match.
# (Do these need sorting by trial id?)
self.assertNotEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_B0.trials],
)
def objective_with_attachments(x: float):
"""Objective function that includes extra information as attachments and
dictionary attributes."""
return {
"loss": x**2,
"status": STATUS_OK,
"extra_stuff": {"type": None, "value": [0, 1, 2]},
"attachments": {"time": pickle.dumps(time.time)},
}
def fmin_thread_fn(space, mongo_trials: MongoTrials, max_evals: int):
fmin(
fn=objective_with_attachments,
space=space,
algo=rand.suggest,
trials=mongo_trials,
rstate=np.random.default_rng(),
max_evals=max_evals,
return_argmin=False,
)
def test_trial_attachments():
exp_key = "A"
with TempMongo() as tm:
mj = tm.mongo_jobs("foo")
trials = MongoTrials(tm.connection_string("foo"), exp_key=exp_key)
space = hp.uniform("x", -10, 10)
max_evals = 3
fmin_thread = threading.Thread(
target=fmin_thread_fn, args=(space, trials, max_evals)
)
fmin_thread.start()
mw = MongoWorker(mj=mj, logfilename=None, workdir="mongoexp_test_dir")
n_jobs = max_evals
while n_jobs:
try:
mw.run_one("hostname", 10.0, erase_created_workdir=True)
print("worker: ran job")
except Exception as exc:
print(f"worker: encountered error : {str(exc)}")
traceback.print_exc()
n_jobs -= 1
fmin_thread.join()
all_trials = MongoTrials(tm.connection_string("foo"))
assert len(all_trials) == max_evals
assert trials.count_by_state_synced(JOB_STATE_DONE) == max_evals
assert trials.count_by_state_unsynced(JOB_STATE_DONE) == max_evals
class FakeOptions:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# -- assert that the test raises a ReserveTimeout within 5 seconds
@nose.tools.timed(10.0) # XXX: this needs a suspiciously long timeout
@nose.tools.raises(ReserveTimeout)
@with_mongo_trials
def test_main_worker(trials):
options = FakeOptions(
max_jobs=1,
# XXX: sync this with TempMongo
mongo=as_mongo_str("localhost:22334/foodb"),
reserve_timeout=1,
poll_interval=0.5,
workdir=None,
exp_key="foo",
last_job_timeout=None,
)
# -- check that it runs
# and that the reserve timeout is respected
main_worker_helper(options, ())
| 14,327 | 29.355932 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/integration/test_sklearn.py
|
"""Test scikit-learn integration."""
import pytest
from sklearn.linear_model import Ridge, RidgeClassifier
from sklearn.utils.estimator_checks import check_estimator
from hyperopt import hp
from hyperopt.sklearn import HyperoptSearchCV
@pytest.mark.parametrize(
"estimator,check",
list(
check_estimator(
HyperoptSearchCV(
estimator=Ridge(),
space={"alpha": hp.uniform("alpha", 0, 1)},
max_evals=10,
random_state=42,
),
generate_only=True,
)
),
)
def test_estimator_regression(estimator, check):
"""Test compatibility with the scikit-learn API for regressors."""
if "predict" in check.func.__name__:
# Predict methods do a simple passthrough to the underlying best estimator
# https://github.com/scikit-learn/scikit-learn/blob/1.0.2/sklearn/model_selection/_search.py#L493
pytest.skip("Skipping tests that leverage passthrough to underlying estimator.")
elif "nan" in check.func.__name__:
pytest.skip(
"Skipping tests that check for compatiblity with nulls. Underlying estimator should check."
)
else:
check(estimator)
@pytest.mark.parametrize(
"estimator,check",
list(
check_estimator(
HyperoptSearchCV(
estimator=RidgeClassifier(),
space={"alpha": hp.uniform("alpha", 0, 1)},
max_evals=10,
random_state=42,
),
generate_only=True,
)
),
)
def test_estimator_classification(estimator, check):
"""Test compatibility with the scikit-learn API for classifiers."""
if "predict" in check.func.__name__:
# Predict methods do a simple passthrough to the underlying best estimator
# https://github.com/scikit-learn/scikit-learn/blob/1.0.2/sklearn/model_selection/_search.py#L493
pytest.skip("Skipping tests that leverage passthrough to underlying estimator.")
elif "nan" in check.func.__name__:
pytest.skip(
"Skipping tests that check for compatiblity with nulls. Underlying estimator should check."
)
else:
check(estimator)
| 2,229 | 33.307692 | 105 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/integration/test_spark.py
|
import contextlib
import logging
import os
import shutil
import tempfile
import time
import timeit
import unittest
import numpy as np
from pyspark.sql import SparkSession
from io import StringIO
from hyperopt import SparkTrials, anneal, base, fmin, hp, rand
from hyperopt.tests.unit.test_fmin import test_quadratic1_tpe
from py4j.clientserver import ClientServer
@contextlib.contextmanager
def patch_logger(name, level=logging.INFO):
"""patch logger and give an output"""
io_out = StringIO()
log = logging.getLogger(name)
log.setLevel(level)
log.handlers = []
handler = logging.StreamHandler(io_out)
log.addHandler(handler)
try:
yield io_out
finally:
log.removeHandler(handler)
class TestTempDir:
@classmethod
def make_tempdir(cls, dir="/tmp"):
"""
:param dir: Root directory in which to create the temp directory
"""
cls.tempdir = tempfile.mkdtemp(prefix="hyperopt_tests_", dir=dir)
@classmethod
def remove_tempdir(cls):
shutil.rmtree(cls.tempdir)
class BaseSparkContext:
"""
Mixin which sets up a SparkContext for tests
"""
NUM_SPARK_EXECUTORS = 4
@classmethod
def setup_spark(cls):
cls._spark = (
SparkSession.builder.master(
f"local[{BaseSparkContext.NUM_SPARK_EXECUTORS}]"
)
.appName(cls.__name__)
.getOrCreate()
)
cls._sc = cls._spark.sparkContext
cls._pin_mode_enabled = isinstance(cls._sc._gateway, ClientServer)
cls.checkpointDir = tempfile.mkdtemp()
cls._sc.setCheckpointDir(cls.checkpointDir)
# Small tests run much faster with spark.sql.shuffle.partitions=4
cls._spark.conf.set("spark.sql.shuffle.partitions", "4")
@classmethod
def teardown_spark(cls):
cls._spark.stop()
cls._sc = None
shutil.rmtree(cls.checkpointDir)
@property
def spark(self):
return self._spark
@property
def sc(self):
return self._sc
class TestSparkContext(unittest.TestCase, BaseSparkContext):
@classmethod
def setUpClass(cls):
cls.setup_spark()
@classmethod
def tearDownClass(cls):
cls.teardown_spark()
def test_spark_context(self):
rdd1 = self.sc.parallelize(range(10), 10)
rdd2 = rdd1.map(lambda x: x + 1)
sum2 = rdd2.sum()
assert sum2 == 55
def fn_succeed_within_range(x):
"""
Test function to test the handling failures for `fmin`. When run `fmin` with `max_evals=8`,
it has 7 successful trial runs and 1 failed run.
:param x:
:return: 1 when -3 < x < 3, and RuntimeError otherwise
"""
if -3 < x < 3:
return 1
else:
raise RuntimeError(f"{x} is out of range")
class FMinTestCase(unittest.TestCase, BaseSparkContext):
@classmethod
def setUpClass(cls):
cls.setup_spark()
cls._sc.setLogLevel("OFF")
@classmethod
def tearDownClass(cls):
cls.teardown_spark()
def sparkSupportsJobCancelling(self):
return hasattr(self.sc.parallelize([1]), "collectWithJobGroup")
def check_run_status(
self, spark_trials, output, num_total, num_success, num_failure
):
self.assertEqual(
spark_trials.count_total_trials(),
num_total,
"Wrong number of total trial runs: Expected {e} but got {r}.".format(
e=num_total, r=spark_trials.count_total_trials()
),
)
self.assertEqual(
spark_trials.count_successful_trials(),
num_success,
"Wrong number of successful trial runs: Expected {e} but got {r}.".format(
e=num_success, r=spark_trials.count_successful_trials()
),
)
self.assertEqual(
spark_trials.count_failed_trials(),
num_failure,
"Wrong number of failed trial runs: Expected {e} but got {r}.".format(
e=num_failure, r=spark_trials.count_failed_trials()
),
)
log_output = output.getvalue().strip()
self.assertIn(
"Total Trials: " + str(num_total),
log_output,
"""Logging "Total Trials: {num}" missing from the log: {log}""".format(
num=str(num_total), log=log_output
),
)
self.assertIn(
str(num_success) + " succeeded",
log_output,
"""Logging "{num} succeeded " missing from the log: {log}""".format(
num=str(num_success), log=log_output
),
)
self.assertIn(
str(num_failure) + " failed",
log_output,
""" Logging "{num} failed " missing from the log: {log}""".format(
num=str(num_failure), log=log_output
),
)
def assert_task_succeeded(self, log_output, task):
self.assertIn(
f"trial {task} task thread exits normally",
log_output,
"""Debug info "trial {task} task thread exits normally" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def assert_task_failed(self, log_output, task):
self.assertIn(
f"trial {task} task thread catches an exception",
log_output,
"""Debug info "trial {task} task thread catches an exception" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def test_quadratic1_tpe(self):
# TODO: Speed this up or remove it since it is slow (1 minute on laptop)
spark_trials = SparkTrials(parallelism=4)
test_quadratic1_tpe(spark_trials)
def test_trial_run_info(self):
spark_trials = SparkTrials(parallelism=4)
with patch_logger("hyperopt-spark") as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -5, 5),
algo=anneal.suggest,
max_evals=8,
return_argmin=False,
trials=spark_trials,
rstate=np.random.default_rng(94),
)
self.check_run_status(
spark_trials, output, num_total=8, num_success=6, num_failure=2
)
expected_result = {"loss": 1.0, "status": "ok"}
for trial in spark_trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_DONE:
self.assertEqual(
trial["result"],
expected_result,
f"Wrong result has been saved: Expected {expected_result} but got {trial['result']}.",
)
elif trial["state"] == base.JOB_STATE_ERROR:
err_message = trial["misc"]["error"][1]
self.assertIn(
"RuntimeError",
err_message,
"Missing {e} in {r}.".format(e="RuntimeError", r=err_message),
)
self.assertIn(
"Traceback (most recent call last)",
err_message,
"Missing {e} in {r}.".format(e="Traceback", r=err_message),
)
num_success = spark_trials.count_by_state_unsynced(base.JOB_STATE_DONE)
self.assertEqual(
num_success,
6,
f"Wrong number of successful trial runs: Expected 6 but got {num_success}.",
)
num_failure = spark_trials.count_by_state_unsynced(base.JOB_STATE_ERROR)
self.assertEqual(
num_failure,
2,
f"Wrong number of failed trial runs: Expected 2 but got {num_failure}.",
)
def test_accepting_sparksession(self):
spark_trials = SparkTrials(
parallelism=2, spark_session=SparkSession.builder.getOrCreate()
)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_parallelism_arg(self):
default_parallelism = 2
# Test requested_parallelism is None or negative values.
for requested_parallelism in [None, -1]:
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=requested_parallelism,
spark_default_parallelism=default_parallelism,
)
self.assertEqual(
parallelism,
default_parallelism,
"Failed to set parallelism to be default parallelism ({p})"
" ({e})".format(p=parallelism, e=default_parallelism),
)
log_output = output.getvalue().strip()
self.assertIn(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d})".format(d=default_parallelism),
log_output,
"""set to default parallelism missing from log: {log_output}""".format(
log_output=log_output
),
)
# Test requested_parallelism exceeds hard cap
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED + 1,
spark_default_parallelism=default_parallelism,
)
self.assertEqual(
parallelism,
SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED,
"Failed to limit parallelism ({p}) to MAX_CONCURRENT_JOBS_ALLOWED ({e})".format(
p=parallelism, e=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
)
log_output = output.getvalue().strip()
self.assertIn(
"SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c})".format(
c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
log_output,
"""MAX_CONCURRENT_JOBS_ALLOWED value missing from log: {log_output}""".format(
log_output=log_output
),
)
def test_all_successful_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_successful_trials(), 1)
self.assertIn(
"fmin thread exits normally",
log_output,
"""Debug info "fmin thread exits normally" missing from
log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_all_failed_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 10),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_failed_trials(), 1)
self.assert_task_failed(log_output, 0)
spark_trials = SparkTrials(parallelism=4)
# Here return_argmin is True (by default) and an exception should be thrown
with self.assertRaisesRegex(Exception, "There are no evaluation tasks"):
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_timeout_without_job_cancellation(self):
timeout = 4
spark_trials = SparkTrials(parallelism=1, timeout=timeout)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(0.5)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_without_job_cancellation_fmin_timeout(self):
timeout = 5
spark_trials = SparkTrials(parallelism=1)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(1)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
timeout=timeout,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.default_rng(99),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_with_job_cancellation(self):
if not self.sparkSupportsJobCancelling():
print(
"Skipping timeout test since this Apache PySpark version does not "
"support cancelling jobs by job group ID."
)
return
timeout = 2
spark_trials = SparkTrials(parallelism=4, timeout=timeout)
def fn(x):
if x < 0:
time.sleep(timeout + 20)
raise Exception("Task should have been cancelled")
else:
time.sleep(1)
return x
# Test 1 cancelled trial. Examine logs.
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -2, 0),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.default_rng(4),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertEqual(spark_trials.count_cancelled_trials(), 1)
self.assertIn(
"Cancelling all running jobs",
log_output,
""" "Cancelling all running jobs" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"trial task 0 cancelled",
log_output,
""" "trial task 0 cancelled" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_failed(log_output, 0)
# Test mix of successful and cancelled trials.
spark_trials = SparkTrials(parallelism=4, timeout=4)
fmin(
fn=fn,
space=hp.uniform("x", -0.25, 5),
algo=anneal.suggest,
max_evals=6,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=True,
rstate=np.random.default_rng(4),
)
time.sleep(2)
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
# There are 2 finished trials, 1 cancelled running trial and 1 cancelled
# new trial. We do not need to check the new trial since it is not started yet.
self.assertGreaterEqual(
spark_trials.count_successful_trials(),
1,
"Expected at least 1 successful trial but found none.",
)
self.assertGreaterEqual(
spark_trials.count_cancelled_trials(),
1,
"Expected at least 1 cancelled trial but found none.",
)
def test_invalid_timeout(self):
with self.assertRaisesRegex(
Exception,
"timeout argument should be None or a positive value. Given value: -1",
):
SparkTrials(parallelism=4, timeout=-1)
with self.assertRaisesRegex(
Exception,
"timeout argument should be None or a positive value. Given value: True",
):
SparkTrials(parallelism=4, timeout=True)
def test_exception_when_spark_not_available(self):
import hyperopt
orig_have_spark = hyperopt.spark._have_spark
hyperopt.spark._have_spark = False
try:
with self.assertRaisesRegex(Exception, "cannot import pyspark"):
SparkTrials(parallelism=4)
finally:
hyperopt.spark._have_spark = orig_have_spark
def test_no_retry_for_long_tasks(self):
NUM_TRIALS = 2
output_dir = tempfile.mkdtemp()
def fn(_):
with open(os.path.join(output_dir, str(timeit.default_timer())), "w") as f:
f.write("1")
raise Exception("Failed!")
spark_trials = SparkTrials(parallelism=2)
try:
fmin(
fn=fn,
space=hp.uniform("x", 0, 1),
algo=anneal.suggest,
max_evals=NUM_TRIALS,
trials=spark_trials,
show_progressbar=False,
return_argmin=False,
)
except BaseException as e:
self.assertEqual(
"There are no evaluation tasks, cannot return argmin of task losses.",
str(e),
)
call_count = len(os.listdir(output_dir))
self.assertEqual(NUM_TRIALS, call_count)
def test_pin_thread_off(self):
if self._pin_mode_enabled:
raise unittest.SkipTest()
spark_trials = SparkTrials(parallelism=2)
self.assertFalse(spark_trials._spark_pinned_threads_enabled)
self.assertTrue(spark_trials._spark_supports_job_cancelling)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", -1, 1),
algo=rand.suggest,
max_evals=5,
trials=spark_trials,
)
self.assertEqual(spark_trials.count_successful_trials(), 5)
def test_pin_thread_on(self):
if not self._pin_mode_enabled:
raise unittest.SkipTest()
spark_trials = SparkTrials(parallelism=2)
self.assertTrue(spark_trials._spark_pinned_threads_enabled)
self.assertTrue(spark_trials._spark_supports_job_cancelling)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", -1, 1),
algo=rand.suggest,
max_evals=5,
trials=spark_trials,
)
self.assertEqual(spark_trials.count_successful_trials(), 5)
| 21,662 | 33.996769 | 106 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tests/integration/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/learn_model.py
|
#!/usr/local/bin/python3
# encoding: utf-8
import os
import numpy as np
import time
import csv
import math
from src.lattice import *
from itertools import product
from src.utils import parse_files
from src.agent import Agent
from src.model_drift import plot
from src.model_drift import PATuple
from src.model_drift import ObservationGenerator
from src.model_drift import ValidModesInference
from src.model_drift import AgentInterrogationInterface
from src.utils.translate import pddl_parser
from src import generate_random_states
from src.config import *
import generate_random_init_domains
import config
class ModelEstimator(object):
def __init__(self, base_dir, domain_name, drifted_domain_file, start_time, flag_init_domains_type, flag_approach):
self.domain_name = domain_name
self.drifted_domain_file = drifted_domain_file
self.example_dir = base_dir+"domains/"+self.domain_name+"/"
self.domains_dir = self.example_dir+"domains/"
self.observation_dir = self.example_dir+"observations/"
self.observation_dir_drifted = self.observation_dir+"observations_drifted/"
self.problem_dir_drifted = self.example_dir+"instances/instances/"
self.problem_file = "instance-1.pddl"
self.result_dir = base_dir+"results/"
self.plot_dir = self.result_dir+self.domain_name+"/"
if not os.path.exists(self.plot_dir):
os.makedirs(self.plot_dir)
if not os.path.exists(self.observation_dir):
os.makedirs(self.observation_dir)
if not os.path.exists(self.observation_dir_drifted):
os.makedirs(self.observation_dir_drifted)
if not os.path.exists(final_result_dir):
os.makedirs(final_result_dir)
if not os.path.exists(TEMP_FOLDER):
os.makedirs(TEMP_FOLDER)
self.start_time = start_time
self.csvfile = open(self.plot_dir+str(domain_name)+str(start_time)+"_"+str(flag_init_domains_type)+'.csv', 'w')
csvwriter = csv.writer(self.csvfile)
fields = ["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PAsDropped", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect", \
"#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)CompleteActionsDropped","(#)FinalActionsIncorrect", \
"InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
csvwriter.writerow(fields)
self.predicates = list()
self.actions = list()
self.action_to_statepair_set_dict = dict()
self.lifted_action_to_relevant_parameterized_statepair_set_dict = dict()
self.type_to_objects = dict()
self.num_total_pals = None
self.PAtuple_to_ModeTuple_set_dict = dict()
self.action_to_relevant_predicate_args = dict()
self.total_scratch = None
self.unique_scratch = None
self.failed_scratch = None
self.repeated_scratch = None
self.valid_models_scratch = None
self.total = None
self.unique = None
self.failed = None
self.repeated = None
self.valid_models = None
self.data = dict()
self.data["marked_changed_actions"] = set()
self.data["query_info"] = list()
self.data["flag_init_domains_type"] = flag_init_domains_type
self.data["flag_approach"] = flag_approach
self.results = dict()
self.results["initial_accuracy"] = list()
self.results["final_avg_accuracy"] = list()
self.results["queries_scratch"] = None
self.results["queries"] = list()
def read_model(self, domains_dir, domain_file):
"""
Return model and PAtuple to ModeTuple map
"""
print("\nReading model from ",domain_file)
action_parameters, pred_type_mapping, agent_model_actions, abstract_model_actions, \
objects, types, init_state, domain_name = parse_files.generate_ds(domains_dir+domain_file, self.problem_dir_drifted+self.problem_file)
agent = Agent(domain_name, pred_type_mapping, agent_model_actions)
model = agent.agent_model
model_PAtuple_to_ModeTuple_dict = dict()
for action, predicate_modepair_dict in model.actions.items():
for predicate, modepair in predicate_modepair_dict.items():
PAtuple = PATuple(predicate, action)
model_PAtuple_to_ModeTuple_dict[PAtuple] = tuple(modepair)
return model, model_PAtuple_to_ModeTuple_dict
def get_all_predicate_args(self, action, predicates):
type_to_param_list = dict()
for arg in action.parameters:
if arg.type_name not in type_to_param_list.keys():
type_to_param_list[arg.type_name] = list()
type_to_param_list[arg.type_name].append(arg.name)
predicates_params = set()
for predicate in predicates:
pred_param_list_list = list()
for arg in predicate.arguments:
pred_param_list_list.append(type_to_param_list[arg.type_name])
sequences = list(product(*pred_param_list_list))
valid_sequences = [s for s in sequences if len(s) == len(set(s))]
for item in valid_sequences:
string = ""
for arg in item:
arg = str(arg).replace(",","")
arg = str(arg).replace("'","")
arg = str(arg).replace("(","")
arg = str(arg).replace(")","")
string += " "+arg
predicates_params.add("("+predicate.name+string+")")
return predicates_params
def find_relevant_predicates_for_action(self, action, predicates):
action_types = set()
for type_ in action.type_map.values():
action_types.add(type_)
relevant_predicates = set()
for predicate in predicates:
relevant = True
for arg in predicate.arguments:
if arg.type_name not in action_types:
relevant = False
break
if relevant:
relevant_predicates.add(predicate)
return relevant_predicates
def generate_observations_for_drifted_model(self, max_obs):
"""
Generate observations for drifted model
"""
obs_generator = ObservationGenerator(self.example_dir, self.domains_dir, self.data)
print("Generating observations from ",self.drifted_domain_file)
self.action_to_statepair_set_dict, self.lifted_action_to_relevant_parameterized_statepair_set_dict, \
self.type_to_objects, self.predicates, self.actions, self.data = obs_generator.generate_optimal_observations(self.drifted_domain_file, self.problem_dir_drifted, self.observation_dir_drifted, None, max_obs)
def learn_drifted_model_from_scratch(self):
"""
Learn drifted model using just querying
"""
print("Learning drifted model ",drifted_domain_file, " from scratch")
interrogation = AgentInterrogationInterface(self.domains_dir+self.drifted_domain_file, self.problem_dir_drifted+self.problem_file)
self.total_scratch, self.unique_scratch, self.failed_scratch, self.repeated_scratch, self.valid_models_scratch = interrogation.learn_model_from_scratch()
self.results["queries_scratch"] = self.unique_scratch
self.data["drifted_valid_models"] = self.valid_models_scratch
def generate_observations_for_init_model(self, init_domain_file, domains_dir_init, problem_dir_init, observation_dir_init):
"""
Generate observations for init model
"""
obs_generator = ObservationGenerator(self.example_dir, self.domains_dir, self.data)
self.data = obs_generator.generate_observations(domains_dir_init, init_domain_file, self.problem_dir_drifted)
if self.data["flag_approach"]==1 or self.data["flag_approach"]==2:
print("Generating negative examples..")
self.data = obs_generator.get_negative_examples(domains_dir_init, init_domain_file, problem_dir_init, observation_dir_init)
def learn_drifted_model_with_knowledge(self, init_model, init_PAtuple_to_ModeTuple_dict):
"""
Learn drifted model using examples and querying
"""
# Compute PAtuple to set of possible ModeTuples for drifted model
print("Computing possible modes from observations for ",self.drifted_domain_file)
valid_modes_inference = ValidModesInference(self.lifted_action_to_relevant_parameterized_statepair_set_dict, self.predicates, self.actions)
valid_modes_inference.compute_valid_modes()
self.PAtuple_to_ModeTuple_set_dict = valid_modes_inference.PAtuple_to_ModeTuple_set_dict
# Learn drifted model with AIA
agent_interrogation = AgentInterrogationInterface(self.domains_dir+self.drifted_domain_file, self.problem_dir_drifted+self.problem_file)
self.data = agent_interrogation.compute_abstract_model(init_model, self.PAtuple_to_ModeTuple_set_dict, init_PAtuple_to_ModeTuple_dict, self.data)
print("Learning drifted model with AIA")
if len(self.data["PALtuples_dropped"])>0 or len(self.data["marked_changed_actions"])>0:
self.total, self.unique, self.failed, self.repeated, self.valid_models, iaa_main = agent_interrogation.learn_model_with_prior()
else:
self.total, self.unique, self.failed, self.repeated, self.valid_models = 0, 0, 0, 0, [init_model]
# print(iaa_main.pal_tuple_dict)
def analyze_difference(self, init_PAtuple_modepair_dict, PAtuple_to_ModeTuple_set_dict):
print("Keys in Init domain but not in learned Drifted domain:")
for key in init_PAtuple_modepair_dict.keys():
if key not in PAtuple_to_ModeTuple_set_dict.keys():
print("\t",key)
print("Keys in learned Drifted domain but not in Init domain:")
for key in PAtuple_to_ModeTuple_set_dict.keys():
if key not in init_PAtuple_modepair_dict.keys():
print("\t",key)
print("PATuples learned in Init model:",len(init_PAtuple_modepair_dict))
print("PATuples learned in Drifted model:",len(PAtuple_to_ModeTuple_set_dict))
def get_model_difference(self, model1, model2, flag_print=True):
"""
Get difference between two models
"""
pals_diff_count = 0
pals_diff_set = set()
for action in model1.actions:
for pred in model1.actions[action]:
if model1.actions[action][pred][0]!=model2.actions[action][pred][0]:
pals_diff_set.add((action,pred,Location.PRECOND))
pals_diff_count += 1
if model1.actions[action][pred][1]!=model2.actions[action][pred][1]:
pals_diff_set.add((action,pred,Location.EFFECTS))
pals_diff_count += 1
for pred in model2.actions[action]:
if pred not in model1.actions[action]:
if model2.actions[action][pred][0]!=Literal.ABS:
pals_diff_set.add((action,pred,Location.PRECOND))
pals_diff_count += 1
if model2.actions[action][pred][1]!=Literal.ABS:
pals_diff_set.add((action,pred,Location.EFFECTS))
pals_diff_count += 1
actions_diff_set = set()
for pal in pals_diff_set:
actions_diff_set.add(pal[0])
actions_diff_count = len(actions_diff_set)
if flag_print:
for tup in pals_diff_set:
print(tup)
print("Number PALs different :",pals_diff_count,"/",self.num_total_pals)
return pals_diff_count, pals_diff_set, actions_diff_count, actions_diff_set
def print_analysis(self, init_domain_file, init_model, drifted_model):
init_domain_filename = init_domain_file.split("/")[-1].split(".")[0]
print("\nDiff betn init and drifted models:")
initial_num_pals_drifted, initial_pals_drifted_set, initial_num_actions_drifted, initial_actions_drifted_set = self.get_model_difference(init_model, drifted_model)
initial_accuracy = (self.num_total_pals-initial_num_pals_drifted)/self.num_total_pals
print("\n PALs dropped:")
for pal in self.data["PALtuples_dropped"]:
print(pal)
print("\nDiff betn drifted and learnt models:")
final_avg_num_pals_incorrect = 0.0
final_avg_num_actions_incorrect = 0.0
for learned_model in self.valid_models:
incorrect_pals, incorrect_pals_set, incorrect_actions, incorrect_actions_set = self.get_model_difference(drifted_model, learned_model, True)
final_avg_num_pals_incorrect += incorrect_pals
final_avg_num_actions_incorrect += incorrect_actions
final_avg_num_pals_incorrect = final_avg_num_pals_incorrect/len(self.valid_models)
final_avg_num_actions_incorrect = final_avg_num_actions_incorrect/len(self.valid_models)
final_avg_accuracy = (self.num_total_pals-final_avg_num_pals_incorrect)/self.num_total_pals
print("Initial model estimate accuracy:",initial_accuracy)
print("Final model estimate accuracy:",final_avg_accuracy)
print("\ntotal queries: AIA:",self.total_scratch, ", ours:",self.total)
print("unique queries: AIA:",self.unique_scratch, ", ours:",self.unique)
print("failed queries: AIA:",self.failed_scratch, "failed:",self.failed)
print("repeated queries: AIA:",self.repeated_scratch, "repeated:",self.repeated)
self.results["initial_accuracy"].append(initial_accuracy)
self.results["final_avg_accuracy"].append(final_avg_accuracy)
self.results["queries"].append(self.unique)
csvwriter = csv.writer(self.csvfile)
#["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PAsDropped", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect",
# "#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)CompleteActionsDropped","(#)FinalActionsIncorrect",
# "InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
csvwriter.writerow([init_domain_filename, self.num_total_pals, "("+str(initial_num_pals_drifted)+") "+str(initial_pals_drifted_set), "("+str(len(self.data["PALtuples_dropped"]))+") "+str(self.data["PALtuples_dropped"]), "("+str(len(self.data["PALtuples_dropped_no_obs"]))+") "+str(self.data["PALtuples_dropped_no_obs"]), "("+str(final_avg_num_pals_incorrect)+") "+str(incorrect_pals_set), \
len(self.actions), "("+str(initial_num_actions_drifted)+") "+str(initial_actions_drifted_set), "("+str(len(self.actions)-len(self.data["actions_with_no_obs"]))+") "+str(set(self.actions)-set(self.data["actions_with_no_obs"])), "("+str(len(self.data["marked_changed_actions"]))+") "+str(self.data["marked_changed_actions"]), "("+str(final_avg_num_actions_incorrect)+") "+str(incorrect_pals_set), \
initial_accuracy, final_avg_accuracy, self.unique_scratch, self.unique, len(self.valid_models)])
with open(self.plot_dir+str(self.domain_name)+str(self.start_time)+"_"+str(flag_init_domains_type)+'.txt', 'a') as f:
string = "\n\n\n"+str(init_domain_filename)+":\n"
for tup in self.data["query_info"]:
string += "Initial state: "+str(tup[0])+"\n\n"
string += "Plan: "+str(tup[1])+"\n\nModels:\n"
for v in tup[2]:
for action, predicates in v.actions.items():
string += str(action)+str(predicates)+"\n"
string+= "\n"
f.write(string)
f.close()
def get_init_dirs(base_dir, domain_to_total_pals, domain_name, generate_init_domains_type, num_pals, num_files_each_pal, interval):
example_dir = base_dir+"domains/"+domain_name+"/"
domains_dir_init = example_dir+"domains/"
init_domains_random_dir = domains_dir_init+"init_domains_random/"
init_domains_increased_applicability_dir = domains_dir_init+"init_domains_increased_applicability/"
init_domains_mix_dir = domains_dir_init+"init_domains_mix/"
init_problem_random_dir = example_dir+"instances/init_instances_random/"
init_problem_increased_applicability_dir = example_dir+"instances/init_instances_increased_applicability/"
init_problem_mix_dir = example_dir+"instances/init_instances_mix/"
init_observation_random_dir = example_dir+"observations/init_observations_random/"
init_observation_increased_applicability_dir = example_dir+"observations/init_observations_increased_applicability/"
init_observation_mix_dir = example_dir+"observations/init_observations_mix/"
if generate_init_domains_type == 0:
if not os.path.exists(init_problem_random_dir):
os.makedirs(init_problem_random_dir)
if not os.path.exists(init_observation_random_dir):
os.makedirs(init_observation_random_dir)
elif generate_init_domains_type == 1:
if not os.path.exists(init_problem_increased_applicability_dir):
os.makedirs(init_problem_increased_applicability_dir)
if not os.path.exists(init_observation_increased_applicability_dir):
os.makedirs(init_observation_increased_applicability_dir)
elif generate_init_domains_type == 2:
if not os.path.exists(init_problem_mix_dir):
os.makedirs(init_problem_mix_dir)
if not os.path.exists(init_observation_mix_dir):
os.makedirs(init_observation_mix_dir)
init_domains_dir, init_problems_dir, init_observations_dir = None, None, None
init_domain_files = list()
num_incorrect_pals_list = list()
flag_init_domains_type_list = list()
last_reduced_capability_num_dropped_pals = None
if generate_init_domains_type == 0:
init_domains_dir = init_domains_random_dir
init_problems_dir = init_problem_random_dir
init_observations_dir = init_observation_random_dir
interval = math.floor(domain_to_total_pals[domain_name]/num_pals)
for i in range(1,domain_to_total_pals[domain_name],interval):
num_incorrect_pals_list.append(i)
for j in range(num_files_each_pal):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(flag_init_domains_type)
elif generate_init_domains_type == 1:
init_domains_dir = init_domains_increased_applicability_dir
init_problems_dir = init_problem_increased_applicability_dir
init_observations_dir = init_observation_increased_applicability_dir
interval = 1
for i in range(1,domain_to_total_pals_increased_applicability[domain_name], interval):
num_incorrect_pals_list.append(i)
for j in range(num_files_each_pal):
num_incorrect_pals_list.append(i)
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(flag_init_domains_type)
else:
init_domains_dir = init_domains_mix_dir
init_problems_dir = init_problem_mix_dir
init_observations_dir = init_observation_mix_dir
for i in range(1,domain_to_total_pals[domain_name],interval):
num_incorrect_pals_list.append(i)
if i < domain_to_total_pals_increased_applicability[domain_name]:
for j in range(int(num_files_each_pal/2)):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(0)
for j in range(int(num_files_each_pal/2),int(num_files_each_pal)):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(1)
last_reduced_capability_num_dropped_pals = i
else:
for j in range(num_files_each_pal):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(0)
return init_domains_dir, init_problems_dir, init_observations_dir, init_domain_files, num_incorrect_pals_list, flag_init_domains_type_list, last_reduced_capability_num_dropped_pals
def set_paths_generate_random_states(domain_name):
base_dir = os.getcwd()+"/"
domains_path = base_dir+"domains/"+domain_name+"/"
domain_file = "domain_init.pddl"
problem_dir = domains_path+"instances"
random_state_folder = base_dir+"random_states/"
gen_result_file = domains_path+"gen_res.txt"
generate_random_states.main(domain_name, domains_path, domain_file, problem_dir, random_state_folder, gen_result_file)
def learn_model_with_increasing_observations():
domains = config.domains
drifted_domain_file = config.drifted_domain_file
flag_init_domains_type = config.flag_init_domains_type
flag_approach = config.flag_approach
num_affected_pals = config.num_affected_pals
domain_to_num_files_each_pal = config.domain_to_num_files_each_pal
domain_to_total_pals = config.domain_to_total_pals
domain_to_total_pals_increased_applicability = config.domain_to_total_pals_increased_applicability
domains_mix_intervals = config.domains_mix_intervals
domain_to_num_sas = config.domain_to_num_sas
base_dir = os.getcwd()+"/"
start_time = time.time()
for domain_name in domains:
final_results = dict()
for num_obs in range(1,domain_to_num_sas[domain_name],2):
num_files_each_pal = domain_to_num_files_each_pal[domain_name]
final_results[num_obs] = dict()
final_results[num_obs]["initial_model_accuracy"] = list()
final_results[num_obs]["final_model_accuracy"] = list()
final_results[num_obs]["queries_scratch"] = list()
final_results[num_obs]["queries"] = list()
final_results[num_obs]["acc_std_dev"] = list()
final_results[num_obs]["queries_std_dev"] = list()
estimator = ModelEstimator(base_dir, domain_name, drifted_domain_file, start_time, flag_init_domains_type, flag_approach)
estimator.compute_total_number_pal()
domain_to_total_pals[domain_name] = estimator.num_total_pals
print(domain_to_total_pals[domain_name])
drifted_model, drifted_PAtuple_to_ModeTuple_dict = estimator.read_model(estimator.domains_dir, drifted_domain_file)
estimator.generate_observations_for_drifted_model(num_obs)
estimator.learn_drifted_model_from_scratch()
domains_dir_init, problem_dir_init, observation_dir_init, init_domain_files, num_incorrect_pals_list, _, _ = \
get_init_dirs(base_dir, domain_to_total_pals, domain_name, flag_init_domains_type, num_affected_pals, num_files_each_pal, domains_mix_intervals[domain_name])
init_domain_files = ["domain_201_0.pddl"]
for init_domain_file in init_domain_files:
init_model, init_PAtuple_to_ModeTuple_dict = estimator.read_model(domains_dir_init, init_domain_file)
estimator.analyze_difference(init_PAtuple_to_ModeTuple_dict, drifted_PAtuple_to_ModeTuple_dict)
estimator.generate_observations_for_init_model(init_domain_file, domains_dir_init, problem_dir_init, observation_dir_init)
estimator.learn_drifted_model_with_knowledge(init_model, init_PAtuple_to_ModeTuple_dict)
estimator.print_analysis(init_domain_file, init_model, drifted_model)
i = 0
num_files_each_pal = len(init_domain_files)
final_results[num_obs]["initial_model_accuracy"].append(np.sum(estimator.results["initial_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results[num_obs]["final_model_accuracy"].append(np.sum(estimator.results["final_avg_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results[num_obs]["queries_scratch"].append(estimator.results["queries_scratch"])
final_results[num_obs]["queries"].append(np.sum(estimator.results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
final_results[num_obs]["acc_std_dev"].append(np.std(estimator.results["final_avg_accuracy"][i:i+num_files_each_pal]))
final_results[num_obs]["queries_std_dev"].append(np.std(estimator.results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
plot.plot_for_increasing_observations(final_results, estimator.plot_dir+"Observations_experiment_"+str(start_time)+"_"+str(flag_approach)+".png", domain_name +" (#Pals = "+str(domain_to_total_pals[domain_name])+", drift = 50%)")
for num_obs,item in final_results.items():
print(num_obs,":",item)
print("All experiments took ",str(time.time()-start_time)," s")
if __name__=="__main__":
domains = config.domains
drifted_domain_file = config.drifted_domain_file
flag_init_domains_type = config.flag_init_domains_type
flag_approach = config.flag_approach
num_affected_pals = config.num_affected_pals
domain_to_num_files_each_pal = config.domain_to_num_files_each_pal
domain_to_total_pals = config.domain_to_total_pals
domain_to_total_pals_increased_applicability = config.domain_to_total_pals_increased_applicability
domains_mix_intervals = config.domains_mix_intervals
domain_to_num_sas = config.domain_to_num_sas
base_dir = os.getcwd()+"/"
start_time = time.time()
for domain_name in domains:
if config.regenerate_random_states:
set_paths_generate_random_states(domain_name)
num_files_each_pal = domain_to_num_files_each_pal[domain_name]
final_results = dict()
final_results["num_pals_incorrect"] = list()
final_results["initial_model_accuracy"] = list()
final_results["final_model_accuracy"] = list()
final_results["acc_std_dev"] = list()
final_results["queries_scratch"] = list()
final_results["queries"] = list()
final_results["queries_std_dev"] = list()
estimator = ModelEstimator(base_dir, domain_name, drifted_domain_file, start_time, flag_init_domains_type, flag_approach)
estimator.actions, estimator.predicates, _, _, _, \
estimator.num_total_pals, num_total_pals_increased_applicability, estimator.action_to_relevant_predicate_args = generate_random_init_domains.parse_domain(estimator.domains_dir+estimator.drifted_domain_file)
domain_to_total_pals[domain_name] = estimator.num_total_pals
domain_to_total_pals_increased_applicability[domain_name] = num_total_pals_increased_applicability
final_results["domain_to_total_pals"] = domain_to_total_pals[domain_name]
print("Total PALs in the domain:",domain_to_total_pals[domain_name])
drifted_model, drifted_PAtuple_to_ModeTuple_dict = estimator.read_model(estimator.domains_dir, drifted_domain_file)
estimator.generate_observations_for_drifted_model(domain_to_num_sas[domain_name])
estimator.learn_drifted_model_from_scratch()
domains_dir_init, problem_dir_init, observation_dir_init, init_domain_files, num_incorrect_pals_list, _, _ = \
get_init_dirs(base_dir, domain_to_total_pals, domain_name, flag_init_domains_type, num_affected_pals, num_files_each_pal, domains_mix_intervals[domain_name])
final_results["num_pals_incorrect"] = num_incorrect_pals_list
for init_domain_file in init_domain_files:
init_model, init_PAtuple_to_ModeTuple_dict = estimator.read_model(domains_dir_init, init_domain_file)
estimator.analyze_difference(init_PAtuple_to_ModeTuple_dict, drifted_PAtuple_to_ModeTuple_dict)
# Generate observations for init domain to compare with the observations of drifted domain
estimator.generate_observations_for_init_model(init_domain_file, domains_dir_init, problem_dir_init, observation_dir_init)
# Use inferences and query the drifted agent to learn its updated domain
estimator.learn_drifted_model_with_knowledge(init_model, init_PAtuple_to_ModeTuple_dict)
estimator.print_analysis(init_domain_file, init_model, drifted_model)
i = 0
for k in range(len(num_incorrect_pals_list)):
final_results["initial_model_accuracy"].append(np.sum(estimator.results["initial_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["final_model_accuracy"].append(np.sum(estimator.results["final_avg_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["acc_std_dev"].append(np.std(estimator.results["final_avg_accuracy"][i:i+num_files_each_pal]))
final_results["queries_scratch"].append(estimator.results["queries_scratch"])
final_results["queries"].append(np.sum(estimator.results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["queries_std_dev"].append(np.std(estimator.results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
i += num_files_each_pal
plot.plot_1(final_results, estimator.plot_dir+"plot"+str(start_time)+"_"+str(flag_init_domains_type)+".png", domain_name +" (#Pals = "+str(domain_to_total_pals[domain_name])+",#Obs = "+str(domain_to_num_sas[domain_name])+")")
print("All experiments took ",str(time.time()-start_time)," s")
| 29,709 | 58.658635 | 405 |
py
|
DAAISy
|
DAAISy-main/config.py
|
#!/usr/local/bin/python3
# encoding: utf-8
domains = ["blocksworld","gripper","miconic","satellite","rovers","termes"]
drifted_domain_file = "domain.pddl" # IPC domain
regenerate_random_states = False
# Computed automatically in generate_random_init_domains.py and learn_model.py
domain_to_total_pals, domain_to_total_pals_increased_applicability = dict(), dict()
# Type of init domains to be generated. Each init_domain_file has filename format: domainname_#PALsAffected_#file.pddl
flag_init_domains_type_dict = {0: "randomly generated init domains", 1: "only increased applicability init domains", 2: "both types of domain generation"}
flag_init_domains_type = 0 # 0 for randomly generated init domains
flag_approach = 2 # always set to 2 as the approach does not know the type (increased/reduced fucntionality) of the domains
# Number of init domains generated for each #PALsAffected being considered
domain_to_num_files_each_pal = {"blocksworld":6,"gripper":6,"miconic":6,"satellite":6,"rovers":6,"termes":2}
num_affected_pals = 20 # total_pals/num_pals decides the interval between the #PALsAffected for init domains
domains_mix_intervals = {'blocksworld': 2, 'gripper':1, 'miconic':1, 'satellite': 2, 'rovers':4, 'parking':2, 'termes':10, 'freecell':50, 'logistics':10}
# Number of observations (i.e. s-a-s) used (should always be <= the number of obs in the plan generated by the input instance)
# Delete intermediate instances and observation files if you change the init_domains for any flag_init_domains_type
domain_to_num_sas = {'blocksworld': 10, 'gripper':10, 'miconic':10, 'satellite': 10, 'rovers':10, 'termes':10}
| 1,654 | 67.958333 | 157 |
py
|
DAAISy
|
DAAISy-main/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/generate_random_init_domains.py
|
#!/usr/local/bin/python3
# encoding: utf-8
import random
import copy
import math
import time
from itertools import product
import sys
from src.config import *
from src.utils.translate import pddl_parser
import config
seed = int(time.time())
random.seed(seed)
print("Using {} as random seed..".format(seed))
INFO_STRING = ""
def parse_pre(pre):
"""
parses action pre to predicate list string
"""
# TO DO: negative precond
string = ""
if pre.parts:
for atom in pre.parts:
string += "("+atom.predicate
string_ = ""
for key in atom.key[1:]:
key = str(key)
key = key.replace("(","")
key = key.replace(")","")
key = key.replace(",","")
key = key.replace("'","")
string_ += key
if string_!="":
string += " "
string += string_
string += ")\n\t\t"
else:
string += "("+pre.predicate
string_ = ""
for key in pre.key[1:]:
key = str(key)
key = key.replace("(","")
key = key.replace(")","")
key = key.replace(",","")
key = key.replace("'","")
string_ += key
if string_!="":
string += " "
string += string_
string += ")\n\t\t"
return string
def parse_eff(eff_list):
"""
parses action eff to predicate list string
"""
string = ""
for eff in eff_list:
negated = eff.literal.negated
if negated:
string += "(not "
string += "("
string += eff.literal.predicate
string_ = ""
for key in eff.literal.key[1:]:
key = str(key)
key = key.replace("(","")
key = key.replace(")","")
key = key.replace(",","")
key = key.replace("'","")
string_ += key
if string_!="":
string += " "
string += string_+")"
if negated:
string += ")"
string += "\n\t\t"
return string
def find_relevant_predicates_for_action(action, predicates):
action_types = set()
for type_ in action.type_map.values():
action_types.add(type_)
relevant_predicates = set()
for predicate in predicates:
relevant = True
for arg in predicate.arguments:
if arg.type_name not in action_types:
relevant = False
break
if relevant:
relevant_predicates.add(predicate)
return relevant_predicates
def get_all_predicate_args(action, predicates):
type_to_param_list = dict()
for arg in action.parameters:
if arg.type_name not in type_to_param_list.keys():
type_to_param_list[arg.type_name] = list()
type_to_param_list[arg.type_name].append(arg.name)
predicates_params = set()
for predicate in predicates:
pred_param_list_list = list()
for arg in predicate.arguments:
pred_param_list_list.append(type_to_param_list[arg.type_name])
sequences = list(product(*pred_param_list_list))
valid_sequences = [s for s in sequences if len(s) == len(set(s))]
for item in valid_sequences:
string = ""
for arg in item:
arg = str(arg).replace(",","")
arg = str(arg).replace("'","")
arg = str(arg).replace("(","")
arg = str(arg).replace(")","")
string += " "+arg
predicates_params.add("("+predicate.name+string+")")
return predicates_params
def parse_domain(domain_path):
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_path)
domain_name, domain_requirements, types, type_dict, constants, \
predicates, predicate_dict, functions, actions, axioms = pddl_parser.parsing_functions.parse_domain_pddl(fd_domain)
action_relevant_predicates = dict()
action_relevant_predicates_args = dict()
PAL_list = list()
action_to_PALs = dict()
num_total_pals = 0
for action in actions:
action_relevant_predicates[action] = find_relevant_predicates_for_action(action, predicates)
action_relevant_predicates_args[action] = get_all_predicate_args(action, action_relevant_predicates[action])
num_total_pals += len(action_relevant_predicates_args[action])*2
for predicate in action_relevant_predicates_args[action]:
PAL_list.append((predicate, action.name, Location.PRECOND))
PAL_list.append((predicate, action.name, Location.EFFECTS))
if action not in action_to_PALs:
action_to_PALs[action] = list()
action_to_PALs[action].append((predicate, action.name, Location.PRECOND))
action_to_PALs[action].append((predicate, action.name, Location.EFFECTS))
action_to_pre_eff_string = dict()
PA_to_ModeTuple_dict = dict()
for action in actions:
action_to_pre_eff_string[action.name] = dict()
action_to_pre_eff_string[action.name]["pre"] = parse_pre(action.precondition)
action_to_pre_eff_string[action.name]["eff"] = parse_eff(action.effects)
for PAL in action_to_PALs[action]:
if "(not "+PAL[0]+")" in action_to_pre_eff_string[action.name]["pre"]:
pre = Literal.NEG
elif PAL[0] in action_to_pre_eff_string[action.name]["pre"]:
pre = Literal.POS
else:
pre = Literal.ABS
if "(not "+PAL[0]+")" in action_to_pre_eff_string[action.name]["eff"]:
eff = Literal.NEG
elif PAL[0] in action_to_pre_eff_string[action.name]["eff"]:
eff = Literal.POS
else:
eff = Literal.ABS
PA_to_ModeTuple_dict[(PAL[0],PAL[1])] = (pre, eff)
# Add pal-tuples if pre mode is +/-
PAL_list_increased_applicability = list()
for PAL in PAL_list:
if PAL[2]==Location.PRECOND:
mode_tuple = PA_to_ModeTuple_dict[(PAL[0],PAL[1])]
if mode_tuple[0]==Literal.POS or mode_tuple[0]==Literal.NEG:
PAL_list_increased_applicability.append(PAL)
# else:
# mode_tuple = PA_to_ModeTuple_dict[(PAL[0],PAL[1])]
# if mode_tuple[1]==Literal.NEG:
# PAL_list_increased_applicability.append(PAL)
num_total_pals_increased_applicability = len(PAL_list_increased_applicability)-len(actions)
return actions, predicates, PAL_list, PAL_list_increased_applicability, PA_to_ModeTuple_dict, num_total_pals, num_total_pals_increased_applicability, action_relevant_predicates_args
def get_INFO_STRING(sampled_PALs, num_incorrect_pals):
action_to_PAL_changed = dict()
INFO_STRING = "\n"
for PAL in sampled_PALs:
predicate = PAL[0]
action = PAL[1]
if action not in action_to_PAL_changed:
action_to_PAL_changed[action] = list()
action_to_PAL_changed[action].append(PAL)
for action_name, PALs in action_to_PAL_changed.items():
INFO_STRING += ";; "+str(action_name)+" action has changed: "
for PAL in PALs:
INFO_STRING += str(PAL[0])+", "
INFO_STRING += "\n"
return INFO_STRING
def get_random_model(actions, predicates, PAL_list, PA_to_ModeTuple_dict, count):
"""
randomly samples a pal-tuple (location could be precondition or effect) and
changes its mode ensuring that (+,+) and (-,-) are not possible (pre,eff) modes.
This is ensured for pal-tuples with l=pre by selecting - or ABS in pre if eff is +, and
selecting + or ABS in pre if eff is - and similarly for pal-tuples with l=eff.
"""
sampled = random.sample(PAL_list, count)
empty_flag = False
list_ = list()
new_PA_to_ModeTuple_dict = copy.deepcopy(PA_to_ModeTuple_dict)
for PAL in PAL_list:
if PAL in sampled:
possibilities = [Literal.POS, Literal.NEG, Literal.ABS]
current_pre_mode = new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])][0]
current_eff_mode = new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])][1]
if PAL[2]==Location.PRECOND:
possibilities.remove(current_pre_mode)
if current_eff_mode in [Literal.POS,Literal.NEG]:
if current_eff_mode in possibilities:
possibilities.remove(current_eff_mode)
selected_mode = random.choice(possibilities)
new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])] = (selected_mode, current_eff_mode)
else:
possibilities.remove(current_eff_mode)
if current_pre_mode in [Literal.POS,Literal.NEG]:
if current_pre_mode in possibilities:
possibilities.remove(current_pre_mode)
selected_mode = random.choice(possibilities)
new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])] = (current_pre_mode, selected_mode)
new_action_to_pre_eff_string = dict()
for PA, ModeTuple in new_PA_to_ModeTuple_dict.items():
pre_predicate_string = ""
eff_predicate_string = ""
if ModeTuple[0]==Literal.POS:
pre_predicate_string += PA[0]+"\n\t\t\t"
elif ModeTuple[0]==Literal.NEG:
pre_predicate_string += "(not "+PA[0]+")\n\t\t\t"
if ModeTuple[1]==Literal.POS:
eff_predicate_string += PA[0]+"\n\t\t\t"
elif ModeTuple[1]==Literal.NEG:
eff_predicate_string += "(not "+PA[0]+")\n\t\t\t"
if PA[1] not in new_action_to_pre_eff_string:
new_action_to_pre_eff_string[PA[1]] = dict()
new_action_to_pre_eff_string[PA[1]]["pre"] = ""
new_action_to_pre_eff_string[PA[1]]["eff"] = ""
new_action_to_pre_eff_string[PA[1]]["pre"] += pre_predicate_string
new_action_to_pre_eff_string[PA[1]]["eff"] += eff_predicate_string
for action_name, predicate_string_dict in new_action_to_pre_eff_string.items():
if predicate_string_dict["pre"]=="" or predicate_string_dict["eff"]=="":
empty_flag = True
break
return new_action_to_pre_eff_string, new_PA_to_ModeTuple_dict, sampled, empty_flag
def get_random_increased_applicability_model(actions, predicates, PAL_list, PA_to_ModeTuple_dict, PAL_list_pre_present, count):
"""
i.e. when drifted model's functionality has reduced/init model has increased applicability.
changes mode of the sampled pal-tuples (l=pre) with +/- mode to ABS and
sampled pal-tuples (l=eff) with mode - to ABS.
"""
sampled = random.sample(PAL_list_pre_present, count)
empty_flag = False
list_ = list()
new_PA_to_ModeTuple_dict = copy.deepcopy(PA_to_ModeTuple_dict)
for PAL in PAL_list:
if PAL in sampled:
current_pre_mode = new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])][0]
current_eff_mode = new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])][1]
if PAL[2]==Location.PRECOND:
selected_mode = Literal.ABS
new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])] = (selected_mode, current_eff_mode)
else:
selected_mode = Literal.ABS
new_PA_to_ModeTuple_dict[(PAL[0],PAL[1])] = (current_pre_mode, selected_mode)
new_action_to_pre_eff_string = dict()
for PA, ModeTuple in new_PA_to_ModeTuple_dict.items():
pre_predicate_string = ""
eff_predicate_string = ""
if ModeTuple[0]==Literal.POS:
pre_predicate_string += PA[0]+"\n\t\t\t"
elif ModeTuple[0]==Literal.NEG:
pre_predicate_string += "(not "+PA[0]+")\n\t\t\t"
if ModeTuple[1]==Literal.POS:
eff_predicate_string += PA[0]+"\n\t\t\t"
elif ModeTuple[1]==Literal.NEG:
eff_predicate_string += "(not "+PA[0]+")\n\t\t\t"
if PA[1] not in new_action_to_pre_eff_string:
new_action_to_pre_eff_string[PA[1]] = dict()
new_action_to_pre_eff_string[PA[1]]["pre"] = ""
new_action_to_pre_eff_string[PA[1]]["eff"] = ""
new_action_to_pre_eff_string[PA[1]]["pre"] += pre_predicate_string
new_action_to_pre_eff_string[PA[1]]["eff"] += eff_predicate_string
for action_name, predicate_string_dict in new_action_to_pre_eff_string.items():
if predicate_string_dict["pre"]=="" or predicate_string_dict["eff"]=="":
empty_flag = True
break
return new_action_to_pre_eff_string, new_PA_to_ModeTuple_dict, sampled, empty_flag
def initialize_dirs(init_domains_random_dir, init_domains_increased_applicability_dir, init_domains_both_dir, domain_to_total_pals, domain_name, generate_init_domains_type, num_pals, num_files_each_pal, interval):
init_domain_files = list()
num_incorrect_pals_list = list()
flag_init_domains_type_list = list()
last_reduced_capability_num_dropped_pals = None
if generate_init_domains_type == 0:
if not os.path.exists(init_domains_random_dir):
os.mkdir(init_domains_random_dir)
interval = math.floor(domain_to_total_pals[domain_name]/num_pals)
for i in range(1,domain_to_total_pals[domain_name],interval):
for j in range(num_files_each_pal):
num_incorrect_pals_list.append(i)
init_domain_files.append(init_domains_random_dir+"domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(flag_init_domains_type)
elif generate_init_domains_type == 1:
if not os.path.exists(init_domains_increased_applicability_dir):
os.mkdir(init_domains_increased_applicability_dir)
interval = 1
for i in range(1,domain_to_total_pals_increased_applicability[domain_name], interval):
for j in range(num_files_each_pal):
num_incorrect_pals_list.append(i)
init_domain_files.append(init_domains_increased_applicability_dir+"domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(flag_init_domains_type)
else:
if not os.path.exists(init_domains_both_dir):
os.mkdir(init_domains_both_dir)
for i in range(1,domain_to_total_pals[domain_name],interval):
if i < domain_to_total_pals_increased_applicability[domain_name]:
for j in range(int(num_files_each_pal/2)):
num_incorrect_pals_list.append(i)
init_domain_files.append(init_domains_both_dir+"domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(0)
for j in range(int(num_files_each_pal/2),int(num_files_each_pal)):
num_incorrect_pals_list.append(i)
init_domain_files.append(init_domains_both_dir+"domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(1)
last_reduced_capability_num_dropped_pals = i
else:
for j in range(num_files_each_pal):
num_incorrect_pals_list.append(i)
init_domain_files.append(init_domains_both_dir+"domain_"+str(i)+"_"+str(j)+".pddl")
flag_init_domains_type_list.append(0)
return init_domain_files, num_incorrect_pals_list, flag_init_domains_type_list, last_reduced_capability_num_dropped_pals
if __name__=="__main__":
'''
Generates init domains with domainname_#PALsdropped_#file.pddl filename format
'''
domains = config.domains
flag_init_domains_type_dict = config.flag_init_domains_type_dict
flag_init_domains_type = config.flag_init_domains_type
num_affected_pals = config.num_affected_pals
domain_to_num_files_each_pal = config.domain_to_num_files_each_pal
domain_to_total_pals = config.domain_to_total_pals
domain_to_total_pals_increased_applicability = config.domain_to_total_pals_increased_applicability
domains_mix_intervals = config.domains_mix_intervals
for domain_name in domains:
num_files_each_pal = domain_to_num_files_each_pal[domain_name]
init_domains_random_dir = "domains/"+domain_name+"/domains/init_domains_random/"
init_domains_increased_applicability_dir = "domains/"+domain_name+"/domains/init_domains_increased_applicability/"
init_domains_both_dir = "domains/"+domain_name+"/domains/init_domains_mix/"
domain_path = "domains/"+domain_name+"/domains/domain.pddl"
domain_template_path = "domains/"+domain_name+"/domains/domain_template.pddl"
problem_path = "domains/"+domain_name+"/instances/instances/instance-1.pddl"
actions, predicates, PAL_list, PAL_list_increased_applicability, PA_to_ModeTuple_dict, \
num_total_pals, num_total_pals_increased_applicability, _ = parse_domain(domain_path) # parse IPC domain
domain_to_total_pals[domain_name] = num_total_pals
domain_to_total_pals_increased_applicability[domain_name] = num_total_pals_increased_applicability
init_domain_files, num_incorrect_pals_list, flag_init_domains_type_list, last_reduced_capability_num_dropped_pals = initialize_dirs(init_domains_random_dir, init_domains_increased_applicability_dir, init_domains_both_dir, domain_to_total_pals, domain_name, flag_init_domains_type, num_affected_pals, num_files_each_pal, domains_mix_intervals[domain_name])
print(init_domain_files)
for i in range(len(init_domain_files)):
init_domain_file = init_domain_files[i]
num_incorrect_pals = num_incorrect_pals_list[i]
flag_init_domains_type_ = flag_init_domains_type_list[i]
print("Generating ",flag_init_domains_type_dict[flag_init_domains_type_])
print("\nInit domain file: "+init_domain_file)
print("Num incorrect PALs:",num_incorrect_pals)
empty_flag = True
while empty_flag:
if flag_init_domains_type_ == 0:
new_action_to_pre_eff_string, new_PA_to_ModeTuple_dict, sampled_PALs, empty_flag = get_random_model(actions, predicates, PAL_list, PA_to_ModeTuple_dict, num_incorrect_pals)
elif flag_init_domains_type_ == 1:
new_action_to_pre_eff_string, new_PA_to_ModeTuple_dict, sampled_PALs, empty_flag = get_random_increased_applicability_model(actions, predicates, PAL_list, PA_to_ModeTuple_dict, PAL_list_increased_applicability, num_incorrect_pals)
INFO_STRING = get_INFO_STRING(sampled_PALs, num_incorrect_pals)
INFO_STRING += ";; Last_reduced_capability_num_dropped_pals: "+str(last_reduced_capability_num_dropped_pals)+"\n"
with open(domain_template_path, 'r') as f:
template_domain_string = f.read()
for action_name in new_action_to_pre_eff_string.keys():
pre = "%"+action_name.upper()+" PRECONDITION%"
eff = "%"+action_name.upper()+" EFFECTS%"
template_domain_string = template_domain_string.replace(pre, new_action_to_pre_eff_string[action_name]["pre"])
template_domain_string = template_domain_string.replace(eff, new_action_to_pre_eff_string[action_name]["eff"])
template_domain_string = template_domain_string.replace("%INFO_STRING%", INFO_STRING)
with open(init_domain_file, "w") as f:
f.write(template_domain_string)
for domain_name in domain_to_total_pals.keys():
print("\nDomain:",domain_name)
print("Total #PALs in IPC domain:",domain_to_total_pals[domain_name])
print("Total #PALs that can be dropped from preconditions to generate increased applicability init domains:",domain_to_total_pals_increased_applicability[domain_name])
| 19,814 | 48.168734 | 363 |
py
|
DAAISy
|
DAAISy-main/dependencies/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/dependencies/fama/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/dependencies/fama/src/compiler3.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
def get_all_types(task, itype):
output=[itype]
# for i in task.types:
# if itype in i.name:
# if i.basetype_name!="object":
# output = output + [str(i.basetype_name)]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def get_max_steps_from_plans(ps):
iout = 0
for plan in ps:
iout = max(iout, len(plan))
return iout
def get_max_vars_from_plans(ps):
iout = 0
for plan in ps:
for a in plan:
iout = max(iout, len(a.split(" ")) - 1)
return iout
def get_action_schema_from_plans(ps, task):
known_actions = [a.name for a in task.actions]
schemas = []
for plan in ps:
for a in plan:
counter = 0
name = a.replace("(", "").replace(")", "").split(" ")[0]
item = [name]
for p in a.replace("(", "").replace(")", "").split(" ")[1:]:
for o in task.objects:
if p.upper() == o.name.upper():
item.append(str(o.type_name))
counter = counter + 1
break
if item not in schemas:
schemas.insert(0, item)
return [x for x in schemas if x[0] not in known_actions], [x for x in schemas if x[0] in known_actions]
def get_predicates_schema_from_plans(task):
preds = []
for p in task.predicates:
item = []
if p.name == "=":
continue
item.append(p.name)
for a in p.arguments:
item.append(a.type_name)
preds = preds + [item]
return preds
def get_static_predicates(tasks, predicates):
candidates = set([p[0] for p in predicates])
for task in tasks:
task_candidates = set()
for predicate in candidates:
init_predicates = set([p for p in task.init if p.predicate == predicate])
goal_predicates = set([p for p in task.goal.parts if p.predicate == predicate and p.negated == False])
if init_predicates == goal_predicates:
task_candidates.add(predicate)
candidates = candidates.intersection(task_candidates)
reflexive_static_predicates = dict()
for candidate in candidates:
reflexive_static_predicates[candidate] = True
for task in tasks:
init_predicates = set([p for p in task.init if p.predicate == candidate])
for predicate in init_predicates:
if len(predicate.args) == 1 or len(set(predicate.args)) != 1:
reflexive_static_predicates[candidate] = False
break
return [p for p in predicates if p[0] in candidates], reflexive_static_predicates
def get_static_precondition(predicate, action, plans, tasks):
static_preconditions = set()
params = [pddl.pddl_types.TypedObject("?o" + str(i), action[i]) for i in range(1, len(action))]
params = [x for x in params if x.type_name in predicate[1:]]
num_predicate_params = len(predicate[1:])
possible_param_tuples = list(itertools.combinations(params, num_predicate_params))
for t in possible_param_tuples:
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in t]))
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(t)]))
if len([x for x in action[1:] if x in predicate[1:]]) >= num_predicate_params:
all_instances = set()
for task in tasks:
all_instances.update([p.args for p in task.init if p.predicate == predicate[0]])
all_variables = set(sum(all_instances, ()))
for a in [item for sublist in plans for item in sublist]:
a = a.replace('(','').replace(')','').split(" ")
if a[0] == action[0]:
variables = [x for x in a[1:] if x in all_variables]
possible_tuples = list(itertools.combinations(variables, num_predicate_params))
static_preconditions_candidates = set()
for i in range(len(possible_tuples)):
if possible_tuples[i] in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in possible_param_tuples[i]]))
elif tuple(reversed(possible_tuples[i])) in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(possible_param_tuples[i])]))
static_preconditions = static_preconditions.intersection(static_preconditions_candidates)
return list(static_preconditions)
def possible_pred_for_action(task, p, a, tup):
if (len(p) > len(a)):
return False
# action_types = [set(get_all_types(task, str(a[int(tup[i])]))) for i in range(len(tup))]
action_types = [set([a[int(tup[i])]]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x)) for x in p[1:]]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
# for i in range(0, len(tup)):
# bfound = False
# for t in get_all_types(task, str(a[int(tup[i])])):
# if t in get_all_types(task, str(p[i + 1])):
# bfound = True
# if bfound == False:
# return False
return all(fits)
def is_binary_mutex(axiom):
return isinstance(axiom.condition, pddl.UniversalCondition) and isinstance(axiom.condition.parts[0],
pddl.Disjunction) and len(axiom.condition.parts[0].parts) == 2 and isinstance(
axiom.condition.parts[0].parts[0], pddl.NegatedAtom) and isinstance(axiom.condition.parts[0].parts[1],
pddl.NegatedAtom)
def get_binary_mutexes(fd_task):
binary_mutexes = dict()
for axiom in fd_task.axioms:
if is_binary_mutex(axiom):
part1 = axiom.condition.parts[0].parts[0]
part2 = axiom.condition.parts[0].parts[1]
args1 = part1.args
args2 = part2.args
arity1 = len(args1)
arity2 = len(args2)
matchings = list()
if arity1 == 0:
matchings.extend([(-1,i) for i in range(arity2)])
elif arity2 == 0:
matchings.extend([(i, -1) for i in range(arity2)])
else:
for i in range(arity1):
for j in range(arity2):
if args1[i] == args2[j]:
matchings.append((i,j))
# print(part1, part2)
# print(matchings)
for tup in itertools.product(range(1, MAX_VARS+1), repeat=max(arity1, arity2)):
vars = ["var" + str(t) for t in tup]
# print(vars)
m1 = [vars[i] for i in range(arity1)]
for tup2 in itertools.product(vars, repeat=arity2):
m2 = [t for t in tup2]
# print(m1, m2)
match_all = True
for matching in matchings:
if matching[0] == -1 or matching[1] == -1:
continue
else:
match_all = match_all & (m1[matching[0]] == m2[matching[1]])
if match_all:
key = tuple([part1.predicate] + m1)
mutex = tuple([part2.predicate] + m2)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
key = tuple([part2.predicate] + m2)
mutex = tuple([part1.predicate] + m1)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
# print(key, mutex)
return binary_mutexes
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
check_static_predicates = True
sys.argv.remove("-s")
else:
check_static_predicates = False
if "-i" in sys.argv:
program_with_invariants = True
sys.argv.remove("-i")
else:
program_with_invariants = False
domain_folder_name = sys.argv[1]
domain_file = sys.argv[2]
problems_prefix_filename = sys.argv[3]
plans_prefix_filename = sys.argv[4]
input_level = int(sys.argv[5])
except:
print "Usage:"
print sys.argv[0] + "[-s] [-i] <domain> <domain filename> <problems prefix> <plans prefix> <input level (0 plans, 1 steps, 2 len(plan), 3 minimum)>"
sys.exit(-1)
# Reading the example plans
plans = []
i = 0
for filename in sorted(glob.glob(domain_folder_name + "/" + plans_prefix_filename + "*")):
plans.append([])
lcounter = 0
file = open(filename, 'r')
for line in file:
if input_level != config.INPUT_STEPS or (input_level == config.INPUT_STEPS and lcounter % 3 != 0):
plans[i].append(line.replace("\n", "").split(": ")[1])
lcounter = lcounter + 1
file.close()
i = i + 1
# Creating a FD task with the domain and the first problem file
domain_filename = "{}{}.pddl".format(domain_folder_name, domain_file)
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problems = []
fd_tasks = []
counter = 0
for problem_filename in sorted(glob.glob(domain_folder_name + "/" + problems_prefix_filename + "*")):
fd_problems = fd_problems + [pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)]
fd_tasks = fd_tasks + [pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[counter])]
counter = counter + 1
fd_task = copy.deepcopy(fd_tasks[0])
known_action_models = [action for action in fd_task.actions]
MAX_STEPS = get_max_steps_from_plans(plans)
MAX_VARS = get_max_vars_from_plans(plans)
new_actions, known_actions = get_action_schema_from_plans(plans, fd_task)
actions = new_actions + known_actions
predicates = get_predicates_schema_from_plans(fd_task)
static_predicates, reflexive_static_predicates = get_static_predicates(fd_tasks, predicates)
binary_mutexes = get_binary_mutexes(fd_task)
# Compilation Problem
init_aux = copy.deepcopy(fd_task.init)
fd_task.init = []
fd_task.init.append(pddl.conditions.Atom("modeProg", []))
allpres = []
for a in new_actions: # All possible preconditions are initially programmed
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
# fd_task.init.append(
# pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
allpres = allpres + [str("pre_" + "_".join([p[0]] + [a[0]] + vars))]
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i), "i" + str(i + 1)]))
goals = []
for i in range(0, len(plans) + 1):
goals = goals + [pddl.conditions.Atom("test" + str(i), [""])]
fd_task.goal = pddl.conditions.Conjunction(goals)
# Compilation Domain
if input_level <= config.INPUT_LENPLAN:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 2):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
fd_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
for i in range(0, len(plans) + 1):
fd_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if input_level <= config.INPUT_LENPLAN:
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
# for axiom in fd_task.axioms:
# fd_task.predicates.append(pddl.predicates.Predicate(axiom.name, []))
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
fd_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("del_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p[0]] + [a[0]] + vars), []))
if input_level <= config.INPUT_STEPS:
for a in actions:
fd_task.predicates.append(pddl.predicates.Predicate("plan-" + a[0],
[pddl.pddl_types.TypedObject("?i", "step")] + [
pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i
in range(1, len(a))]))
learned_static_preconditions = dict()
# Original domain actions
# old_actions = copy.deepcopy(actions)
for a in actions:
pre = list()
eff = list()
is_known_action = False
# Add derived predicates
pre.extend([invariant.condition for invariant in fd_task.axioms])
if a in known_actions:
is_known_action = True
for action in fd_task.actions:
if action.name == a[0]:
if isinstance(action.precondition, pddl.conditions.Atom):
pre.append(action.precondition)
else:
pre.extend([x for x in action.precondition.parts])
eff = action.effects
fd_task.actions.remove(action)
break
params = [pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i in range(1, len(a))]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
params = params + [pddl.pddl_types.TypedObject("?i1", "step")]
params = params + [pddl.pddl_types.TypedObject("?i2", "step")]
if check_static_predicates and input_level <= config.INPUT_STEPS:
for static_predicate in static_predicates:
static_preconditions = get_static_precondition(static_predicate, a, plans, fd_tasks)
learned_static_preconditions[a[0]] = list()
for static_precondition in static_preconditions:
pre.append(static_precondition)
learned_static_preconditions[a[0]].append(static_precondition)
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
if input_level <= config.INPUT_PLANS and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("plan-" + a[0], ["?i1"] + ["?o" + str(i) for i in range(1, len(a))])]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("current", ["?i1"])]
pre = pre + [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
disjunction = pddl.conditions.Disjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])] + [
pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup])])
pre = pre + [disjunction]
if input_level < config.INPUT_STEPS:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif input_level < config.INPUT_MINIMUM:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p[0], ["?o" + str(t) for t in tup]))]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup]))]
fd_task.actions.append(pddl.actions.Action(a[0], params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action schema
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
params = []
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_pre_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p in static_predicates and check_static_predicates:
continue
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
if program_with_invariants:
aux = [pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
key = tuple([p[0]] + vars)
for mutex in binary_mutexes.get(key, set()):
aux = aux + [
pddl.conditions.NegatedAtom("add_" + "_".join([mutex[0]] + [a[0]] + [e for e in mutex[1:]]),
[])]
pre = pre + [pddl.conditions.Conjunction(aux)]
else:
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Atom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []), pddl.conditions.Atom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.NegatedAtom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []), pddl.conditions.Atom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_eff_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for validating the tests
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
#pre.extend([invariant.condition for invariant in fd_task.axioms])
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test0", []))]
for f in init_aux:
if f.predicate != "=":
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), f)]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for i in range(0, len(plans[0])):
action = plans[0][i]
name = action[1:-1].split(" ")[0]
params = action[1:-1].split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + name, ["i" + str(i + 1)] + params))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("modeProg", []))]
fd_task.actions.append(pddl.actions.Action("validate_0", [], 0, pddl.conditions.Conjunction(pre), eff, 0))
for i in range(0, len(plans)):
pre = []
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
pre.extend([invariant.condition for invariant in fd_task.axioms])
for j in range(0, len(plans) + 1):
if j < i + 1:
pre = pre + [pddl.conditions.Atom("test" + str(j), [])]
else:
pre = pre + [pddl.conditions.NegatedAtom("test" + str(j), [])]
if input_level <= config.INPUT_LENPLAN:
pre = pre + [pddl.conditions.Atom("current", ["i" + str(len(plans[i]) + 1)])]
current_state = set()
for g in fd_tasks[i].goal.parts:
pre = pre + [g]
if isinstance(g, pddl.Atom):
current_state.add(g)
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test" + str(i + 1), []))]
if i < len(plans)-1:
next_state = set()
for atom in fd_tasks[i+1].init:
if atom.predicate != "=":
next_state.add(atom)
lost_atoms = current_state.difference(next_state)
new_atoms = next_state.difference(current_state)
for atom in lost_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(atom.predicate, atom.args))]
for atom in new_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(atom.predicate, atom.args))]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(len(plans[i]) + 1)]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for j in range(0, len(plans[i])):
name = "plan-" + plans[i][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(name, pars))]
if i < len(plans) - 1:
for j in range(0, len(plans[i + 1])):
name = "plan-" + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(name, pars))]
fd_task.actions.append(
pddl.actions.Action("validate_" + str(i + 1), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
# Writing the compilation output domain and problem
fdomain = open("aux_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
fdomain = open("aux_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fdomain.close()
# Solving the compilation
if input_level <= config.INPUT_LENPLAN:
starting_horizon = str(len(plans) + sum([len(p) for p in plans]))
else:
starting_horizon = str(len(plans))
cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " aux_domain.pddl aux_problem.pddl -F " + starting_horizon + " " + config.PLANNER_PARAMS + " > planner_out.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Reading the plan output by the compilation
pres = [[] for _ in xrange(len(new_actions))]
dels = [[] for _ in xrange(len(new_actions))]
adds = [[] for _ in xrange(len(new_actions))]
file = open(config.OUTPUT_FILENAME, 'r')
for line in file:
keys = "(program_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
# allpres.remove(str("pre_" + pred[0] + "_" + action[0] + "_" + "_".join(map(str, pred[1:]))))
pres[indexa].append(pred)
keys = "(program_eff_"
if keys in line:
# act = p.split("_")[2]
# pred = [p.split("_")[1]] + p.split("_")[3:]
# indexa = [a[0] for a in new_actions].index(act)
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
if not pred in pres[indexa]:
adds[indexa].append(pred)
else:
dels[indexa].append(pred)
file.close()
counter = 0
new_fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[0])
new_fd_task.actions = []
for action in new_actions:
params = ["?o" + str(i + 1) for i in range(0, len(action[1:]))]
ps = [pddl.pddl_types.TypedObject(params[i], action[i + 1]) for i in range(0, len(params))]
pre = []
if check_static_predicates:
pre += learned_static_preconditions.get(action[0], [])
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action[0], ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open("learned_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, fd_domain))
fdomain.close()
sys.exit(0)
| 30,587 | 42.021097 | 222 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/experimenter_FAMA.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools, math, time
import pddl, pddl_parser
import fdtask_to_pddl
import numpy as np
import model_evaluator
import config
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
index = sys.argv.index("-s")
check_static_predicates = "-s "
sys.argv.pop(index)
else:
check_static_predicates = ""
if "-i" in sys.argv:
index = sys.argv.index("-i")
use_invariants = "-i "
sys.argv.pop(index)
else:
use_invariants = ""
if "-g" in sys.argv:
index = sys.argv.index("-g")
positive_goal = "-g "
sys.argv.pop(index)
else:
positive_goal = ""
if "-f" in sys.argv:
index = sys.argv.index("-f")
finite_steps = "-f "
sys.argv.pop(index)
else:
finite_steps = ""
if "-t" in sys.argv:
index = sys.argv.index("-t")
trace_prefix = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_prefix = "trace"
if "-l" in sys.argv:
index = sys.argv.index("-l")
trace_min = int(sys.argv[index+1])
trace_max = int(sys.argv[index+2])
sys.argv.pop(index)
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_min = None
if "-avg" in sys.argv:
index = sys.argv.index("-avg")
avg = int(sys.argv[index+1])
sys.argv.pop(index)
sys.argv.pop(index)
else:
avg = None
if "-c" in sys.argv:
index = sys.argv.index("-c")
cnt = int(sys.argv[index+1])
sys.argv.pop(index)
sys.argv.pop(index)
else:
cnt = 0
domain_folder_name = sys.argv[1]
action_observability = sys.argv[2]
state_observability = sys.argv[3]
goal_observability = sys.argv[4]
except:
print "Usage:"
print sys.argv[0] + "[-s] <domain folder> <action observability (0-100)> <state observability (0-100)> <goal observability (0-100)>"
sys.exit(-1)
LEARNED_DOMAIN_PATH = config.OUTPUT_PATH+"/learned_domain.pddl"
# outdir = "results/" + s + "-"+ d + "/"
# cmd = "mkdir " + outdir
# print("\n\nExecuting... " + cmd)
# os.system(cmd)
base_cmd = config.PROJECT_PATH + "/src/FAMA.py " + use_invariants + positive_goal + finite_steps + check_static_predicates + domain_folder_name + " " + action_observability + " " + state_observability + " " + goal_observability + " -ct " + str(cnt) + " -t " + trace_prefix
if avg == None:
if trace_min != None:
cmd = base_cmd + " -l {} {}".format(trace_min, trace_max)
else:
cmd = base_cmd
print "\n\nExecuting... " + cmd
tic = time.time()
os.system(cmd)
toc = time.time()
processing_time = toc - tic
try:
domain_name, best_evaluation, best_matches = model_evaluator.evaluate(LEARNED_DOMAIN_PATH, domain_folder_name+"/domain.pddl", True)
print(" & ".join(
[domain_name] + [str(round(e, 2)) for e in best_evaluation]) + " & {}".format(round(processing_time,2)) + " \\\\" + " % {}".format(best_matches))
except:
print("No solution found")
else:
global_results = np.array([0. for _ in range(9)])
timeouts = 0
for i in range(avg):
cmd = base_cmd + " -l {} {}".format(i, i+1)
# print("\n\nExecuting... " + cmd)
tic = time.time()
os.system(cmd)
toc = time.time()
processing_time = toc - tic
try:
domain_name, best_evaluation, best_matches = model_evaluator.evaluate(LEARNED_DOMAIN_PATH,
domain_folder_name+"/domain.pddl",
True)
results = np.array([e for e in best_evaluation] + [processing_time])
global_results += results
# print(" & ".join([domain_name] + [str(round(e, 2)) for e in best_evaluation] + [str(round(processing_time,2))]))
except:
timeouts += 1
results = np.array([0. for _ in range(9)] + [processing_time])
global_results /= avg
print(" & ".join([domain_name] + [str(round(e, 2)) for e in global_results]) + " // " + str(timeouts))
sys.exit(0)
| 4,360 | 28.268456 | 272 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/FAMA.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
import numpy as np
def get_max_vars(actions):
max_vars = 0
for a in actions:
max_vars = max(max_vars, a.num_external_parameters)
return max_vars
def get_max_steps(traces):
traces_steps = list()
for trace in traces:
not_empty_states = len([state for state in trace.states if state != []])
not_empty_actions = len([action for action in trace.actions if action != []])
traces_steps.append(max(not_empty_states, not_empty_actions))
return sum(traces_steps), max(traces_steps)
def get_all_types(task, itype):
output=[itype]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def possible_pred_for_action(task, p, a, tup):
if (len(p.arguments) > len(a.parameters)):
return False
action_types = [set([a.parameters[int(tup[i])-1].type_name]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x.type_name)) for x in p.arguments]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
return all(fits)
def get_static_predicates(state_trajectories, predicates):
candidates = set([p.name for p in predicates])
for trajectory in state_trajectories:
trace_candidates = set()
for predicate in candidates:
static = True
init_literals = set([l for l in trajectory[0] if l.predicate == predicate])
for state in trajectory[1:]:
state_literals = set([l for l in state if l.predicate == predicate])
if init_literals != state_literals:
static = False
break
if static:
trace_candidates.add(predicate)
candidates = candidates.intersection(trace_candidates)
# reflexive_static_predicates = dict()
# for candidate in candidates:
# reflexive_static_predicates[candidate] = True
# for trace in traces:
# init_literals = set([l for l in trace.init if l.predicate == candidate])
# for literal in init_literals:
# if len(literal.args) == 1 or len(set(literal.args)) != 1:
# reflexive_static_predicates[candidate] = False
# break
return candidates
# return [p for p in predicates if p.name in candidates]
def get_mutexes(predicate, vars, model_representation_predicates, invariants):
mutexes = []
for prop1 in [x for x in invariants.keys() if predicate.name == x[0]]:
var_index = prop1[1] - 1
if var_index != -1:
var = vars[prop1[1] - 1]
for prop2 in invariants[prop1]:
for mr_pred in model_representation_predicates:
if mr_pred[0] == prop2[0] and prop2[1] == 0:
mutexes += [mr_pred]
if mr_pred[0] == prop2[0] and mr_pred[prop2[1]] == var:
mutexes += [mr_pred]
else:
var = None
for prop2 in invariants[prop1]:
for mr_pred in model_representation_predicates:
if mr_pred[0] == prop2[0]:
mutexes += [mr_pred]
return mutexes
def complete_state_from_invariants(state, invariants, objects, predicate_dict, type_dict):
new_literals = list()
for literal in [l for l in state if not l.negated]:
for prop1 in [x for x in invariants.keys() if x[0] == literal.predicate]:
for prop2 in invariants.get(prop1, []):
if prop1 != prop2: # mutex
pred = prop2[0]
if prop1[1] == 0:
arg = predicate_dict[pred].arguments[0]
fitting_types = [type_dict[arg.type_name].name] + [t for t in type_dict.keys() if
arg.type_name in type_dict[
t].supertype_names]
fitting_objects = [o.name for o in objects if o.type_name in fitting_types]
for o in fitting_objects:
new_literals.append(pddl.conditions.NegatedAtom(pred, [o]))
elif prop2[1] == 0:
new_literals.append(pddl.conditions.NegatedAtom(pred, []))
else:
common_object = literal.args[prop1[1] - 1]
common_object_pos = prop2[1] - 1
arrays = []
for i in range(len(predicate_dict[pred].arguments)):
if i == common_object_pos:
arrays += [[common_object]]
else:
arg = predicate_dict[pred].arguments[i]
fitting_types = [type_dict[arg.type_name].name] + [t for t in type_dict.keys() if
arg.type_name in type_dict[
t].supertype_names]
fitting_objects = [o.name for o in objects if o.type_name in fitting_types]
arrays += [fitting_objects]
combinations = list(itertools.product(*arrays))
for comb in combinations:
new_literals.append(pddl.conditions.NegatedAtom(pred, comb))
elif prop1 == prop2: # uniqueness
pred = prop2[0]
common_object = literal.args[prop1[1] - 1]
common_object_pos = prop2[1] - 1
arrays = []
for i in range(len(predicate_dict[pred].arguments)):
if i == common_object_pos:
arrays += [[common_object]]
else:
arg = predicate_dict[pred].arguments[i]
fitting_types = [type_dict[arg.type_name].name] + [t for t in type_dict.keys() if
arg.type_name in type_dict[
t].supertype_names]
fitting_objects = [o.name for o in objects if o.type_name in fitting_types]
arrays += [fitting_objects]
combinations = list(itertools.product(*arrays))
for comb in combinations:
if literal.args != comb:
new_literals.append(pddl.conditions.NegatedAtom(pred, comb))
state.extend(new_literals)
return list(set(state))
def get_invariants(axioms):
invariants = dict()
for axiom in axioms:
pred1 = axiom.condition.parts[0].parts[0]
pred2 = axiom.condition.parts[0].parts[1]
if len(pred1.args) != 0 and len(pred2.args) != 0:
common_arg = set([x for x in pred1.args]).intersection(set([y for y in pred2.args])).pop()
index1 = pred1.args.index(common_arg) + 1
index2 = pred2.args.index(common_arg) + 1
prop1 = tuple([pred1.predicate, index1]) # "{}_{}".format(pred1.predicate, str(index1))
prop2 = tuple([pred2.predicate, index2]) # "{}_{}".format(pred2.predicate, str(index2))
elif len(pred1.args) == 0:
index1 = 0
index2 = 1
prop1 = tuple([pred1.predicate, index1]) # "{}_{}".format(pred1.predicate, str(index1))
prop2 = tuple([pred2.predicate, index2]) # "{}_{}".format(pred2.predicate, str(index2))
elif len(pred2.args) == 0:
index1 = 1
index2 = 0
prop1 = tuple([pred1.predicate, index1]) # "{}_{}".format(pred1.predicate, str(index1))
prop2 = tuple([pred2.predicate, index2]) # "{}_{}".format(pred2.predicate, str(index2))
inv = invariants.get(prop1, [])
if prop2 not in inv:
inv.append(prop2)
invariants[prop1] = inv
inv = invariants.get(prop2, [])
if prop1 not in inv:
inv.append(prop1)
invariants[prop2] = inv
return invariants
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
index = sys.argv.index("-s")
check_static_predicates = True
sys.argv.pop(index)
else:
check_static_predicates = False
if "-v" in sys.argv:
index = sys.argv.index("-v")
learned_domain = sys.argv[index+1]
validation_mode = True
sys.argv.pop(index)
sys.argv.pop(index)
else:
validation_mode = False
if "-f" in sys.argv:
index = sys.argv.index("-f")
finite_steps = True
sys.argv.pop(index)
else:
finite_steps = False
if "-t" in sys.argv:
index = sys.argv.index("-t")
trace_prefix = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_prefix = "trace"
if "-l" in sys.argv:
index = sys.argv.index("-l")
trace_min = int(sys.argv[index+1])
trace_max = int(sys.argv[index+2])
sys.argv.pop(index)
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_min = None
if "-c" in sys.argv:
index = sys.argv.index("-c")
config_filename = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
config_filename = None
if "-d" in sys.argv:
index = sys.argv.index("-d")
distance_metric = True
sys.argv.pop(index)
else:
distance_metric = False
if "-i" in sys.argv:
index = sys.argv.index("-i")
use_invariants = True
sys.argv.pop(index)
else:
use_invariants = False
if "-g" in sys.argv:
index = sys.argv.index("-g")
positive_goals = True
sys.argv.pop(index)
else:
positive_goals = False
if "-ct" in sys.argv:
index = sys.argv.index("-ct")
cnt = int(sys.argv[index+1])
sys.argv.pop(index)
sys.argv.pop(index)
else:
cnt = 0
domain_folder_name = sys.argv[1]
action_observability = float(sys.argv[2])/100
state_observability = float(sys.argv[3])/100
goal_observability = float(sys.argv[4])/100
# if action_observability == 1 or state_observability == 1:
# finite_steps = True
except:
print "Usage:"
print sys.argv[0] + "[-i] [-g] [-s] [-f] [-v learned_domain] <domain folder> <action observability (0-100)> <state observability (0-100)> <goal observability> -t trace_prefix -l input_limit"
sys.exit(-1)
LEARNED_DOMAIN_PATH = config.OUTPUT_PATH+"/learned_domain.pddl"
# Read the domain file
if not validation_mode:
domain_filename = "{}domain".format(domain_folder_name)
else:
domain_filename = learned_domain
domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(domain_pddl)
# INVARIANTS
if use_invariants:
invariants = get_invariants(axioms)
else:
invariants = dict()
# FILTERS
if config_filename != None:
config_file = open(config_filename, "r")
filters = config_file.readlines();
trace_filter = filters[0].strip().split(", ")
pres_filter = filters[1].strip().split(", ")
effects_filter = filters[2].strip().split(", ")
else:
trace_filter = pres_filter = effects_filter = [p.name for p in predicates]
# Read the input traces
traces = list()
file_list = sorted(glob.glob(domain_folder_name + trace_prefix + "*"))[0:0+cnt]
print(file_list)
for filename in file_list:
trace_pddl = pddl_parser.pddl_file.parse_pddl_file("trace", filename)
traces.append(pddl_parser.parsing_functions.parse_trace_pddl(trace_pddl, predicates, action_observability, state_observability, goal_observability, positive_goals, finite_steps))
for trace in traces:
for i in range(len(trace.states)):
trace.states[i] = [atom for atom in trace.states[i] if atom.predicate in trace_filter]
if trace_min != None:
traces = traces[trace_min:trace_max]
MAX_VARS = get_max_vars(actions)
TOTAL_STEPS, MAX_STEPS = get_max_steps(traces)
# static_predicates, reflexive_static_predicates = get_static_predicates(traces, predicates)
### LEARNING PROBLEM
# The objects of the original domain for the learning task
# is the union of all objects in the input traces
objects = list()
for trace in traces:
objects.extend(trace.objects)
objects = list(set(objects))
# Empty initial state for now
init = []
# Empty goal for now
goal = []
original_task = pddl.Task(domain_name, 'learning_problem', domain_requirements, types, objects,
predicates, functions, init, goal, actions, axioms, True)
learning_task = copy.deepcopy(original_task)
learning_task.actions = []
for trace in traces:
for state in trace.states:
state = complete_state_from_invariants(state, invariants, objects, predicate_dict, type_dict)
trace.goal = complete_state_from_invariants(trace.goal, invariants, objects, predicate_dict, type_dict)
### LEARNING DOMAIN
# Define "modeProg" predicate
learning_task.predicates.append(pddl.predicates.Predicate("modeProg1", []))
learning_task.predicates.append(pddl.predicates.Predicate("modeProg2", []))
# Define "disabled" predicate
learning_task.predicates.append(pddl.predicates.Predicate("disabled", []))
# Define "test" predicates
for i in range(1, TOTAL_STEPS+2):
learning_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if action_observability > 0:
# Define "step" domain type
learning_task.types.append(pddl.pddl_types.Type("step", "None"))
# Define "current" predicate. Example (current ?i - step)
learning_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
# Define "inext" predicate. Example (inext ?i1 - step ?i2 - step)
learning_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
learning_task.predicates.append(pddl.predicates.Predicate("action_applied", []))
all_pres = dict()
all_effs = dict()
# Define action model representation predicates
# Eample (pre_clear_pickup_var1)
for a in actions:
var_ids = []
all_pres[a.name] = []
all_effs[a.name] = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p.arguments))):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
learning_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p.name] + [a.name] + vars), []))
learning_task.predicates.append(
pddl.predicates.Predicate("eff_" + "_".join([p.name] + [a.name] + vars), []))
all_pres[a.name] += [[p.name] + vars]
all_effs[a.name] += [[p.name] + vars]
# Define action validation predicates
# Example (plan-pickup ?i - step ?x - block)
if action_observability > 0:
for a in actions:
learning_task.predicates.append(pddl.predicates.Predicate("plan-" + a.name,
[pddl.pddl_types.TypedObject("?i", "step")] + a.parameters))
# Original domain actions
for a in actions:
original_params = [par.name for par in a.parameters]
params = [pddl.pddl_types.TypedObject("?o" + str(i+1), a.parameters[i].type_name ) for i in range(a.num_external_parameters)]
pre = list()
known_preconditions = list(a.precondition.parts)
for known_precondition in known_preconditions:
if known_precondition.predicate not in pres_filter:
pre += [pddl.conditions.Atom(known_precondition.predicate,
["?o" + str(original_params.index(arg) + 1) for arg in known_precondition.args])]
if finite_steps:
pre += [pddl.conditions.NegatedAtom("action_applied", [])]
eff = list()
known_effects = list(a.effects)
for known_effect in known_effects:
if not known_effect.literal.negated and known_effect.literal.predicate not in effects_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom(known_effect.literal.predicate,
["?o" + str(original_params.index(arg) + 1) for arg in
known_effect.literal.args]))]
elif known_effect.literal.negated and known_effect.literal.predicate not in effects_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(known_effect.literal.predicate,
["?o" + str(original_params.index(arg) + 1) for arg
in known_effect.literal.args]))]
# action_applied predicate
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("action_applied", []))]
# Add "step" parameters to the original actions
# This will allow to reproduce the input traces
if action_observability > 0:
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
# Add "modeProg" precondition
pre += [pddl.conditions.NegatedAtom("modeProg1", [])]
pre += [pddl.conditions.NegatedAtom("modeProg2", [])]
# Add all possible preconditions as implications
# Example (or (not (pre_on_stack_var1_var1 ))(on ?o1 ?o1))
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p.arguments))):
if p.name in pres_filter and possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.NegatedAtom(p.name, ["?o" + str(t) for t in tup])])
eff = eff + [
pddl.effects.Effect([], condition,
pddl.conditions.Atom("disabled", []))]
# INVARIANTS IN THE PRECONDITIONS
# precondition_mutexes = get_mutexes(p, vars, all_pres[a.name], invariants)
#
# for m in precondition_mutexes:
# m_tup = tuple([v.split("var")[1] for v in m[1:]])
# if p.name == m[0] and m_tup == tup:
# pass
# else:
# condition = pddl.conditions.Conjunction(
# [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), []),
# pddl.conditions.Atom(m[0], ["?o" + str(t) for t in m_tup])])
# eff = eff + [
# pddl.effects.Effect([], condition,
# pddl.conditions.Atom("disabled", []))]
# Define action validation condition
# Example (and (plan-pickup ?i1 ?o1) (current ?i1) (inext ?i1 ?i2))
if action_observability > 0:
# validation_condition = [pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]
validation_condition = [pddl.conditions.Atom("current", ["?i1"])]
validation_condition += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if action_observability == 1:
validation_condition += [pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]
pre += validation_condition
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif action_observability > 0:
# Define conditional effect to validate an action in the input traces
# This effect advances the program counter when an observed action is executed
pre += validation_condition
eff += [pddl.effects.Effect([], pddl.conditions.Conjunction([pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Conjunction([pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]), pddl.conditions.Atom("current", ["?i2"]))]
# Add all possible effects as conditional effects
# Example (when (and (del_ontable_put-down_var1 ))(not (ontable ?o1)))
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if p.name in effects_filter and possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
# del effects
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.Atom("eff_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition,
pddl.conditions.NegatedAtom(p.name, ["?o" + str(t) for t in tup]))]
# add effects
condition = pddl.conditions.Conjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.Atom("eff_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p.name, ["?o" + str(t) for t in tup]))]
# INVARIANTS IN THE EFFECTS
effects_mutexes = get_mutexes(p, vars, all_pres[a.name], invariants)
for m in effects_mutexes:
m_tup = tuple([v.split("var")[1] for v in m[1:]])
if p.name == m[0] and m_tup == tup:
pass
else:
m_vars = ["var" + str(t) for t in m_tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.Atom("eff_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.NegatedAtom("pre_" + "_".join([m[0]] + [a.name] + m_vars), []),
pddl.conditions.Atom("eff_" + "_".join([m[0]] + [a.name] + m_vars), []),
pddl.conditions.Atom(m[0], ["?o" + str(t) for t in m_tup])])
eff = eff + [
pddl.effects.Effect([], condition,
pddl.conditions.Atom("disabled", []))]
condition = pddl.conditions.Conjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.Atom("eff_" + "_".join([p.name] + [a.name] + vars), []),
pddl.conditions.NegatedAtom("eff_" + "_".join([m[0]] + [a.name] + m_vars), []),
pddl.conditions.Atom(m[0], ["?o" + str(t) for t in m_tup])])
eff = eff + [
pddl.effects.Effect([], condition,
pddl.conditions.Atom("disabled", []))]
learning_task.actions.append(pddl.actions.Action(a.name, params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action model
for a in actions:
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
params = []
if p.name in pres_filter:
# Action for programmming de preconditions
precondition_mutexes = get_mutexes(p, vars, all_pres[a.name], invariants)
pre = []
pre = pre + [pddl.conditions.Atom("modeProg1", [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"pre_" + "_".join([p.name] + [a.name] + vars), []))]
for mutex in precondition_mutexes:
eff += [pddl.effects.Effect([], pddl.conditions.Atom(
"pre_" + "_".join([mutex[0]] + [a.name] + mutex[1:]), []), pddl.conditions.Atom(
"disabled", []))]
learning_task.actions.append(
pddl.actions.Action("insert_pre_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p.name in effects_filter:
if not validation_mode:
# Action for programming the effects
effects_mutexes = get_mutexes(p, vars, all_effs[a.name], invariants)
pre = []
pre += [pddl.conditions.Atom("modeProg2", [])]
pre += [pddl.conditions.NegatedAtom("eff_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"eff_" + "_".join([p.name] + [a.name] + vars), []))]
for mutex in effects_mutexes:
condition = []
condition += [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
condition += [
pddl.conditions.Atom("pre_" + "_".join([mutex[0]] + [a.name] + mutex[1:]), [])]
condition += [
pddl.conditions.Atom("eff_" + "_".join([mutex[0]] + [a.name] + mutex[1:]), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Conjunction(condition),
pddl.conditions.Atom("disabled", []))]
condition = []
condition += [
pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
condition += [
pddl.conditions.NegatedAtom("pre_" + "_".join([mutex[0]] + [a.name] + mutex[1:]), [])]
condition += [
pddl.conditions.Atom("eff_" + "_".join([mutex[0]] + [a.name] + mutex[1:]), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Conjunction(condition),
pddl.conditions.Atom("disabled", []))]
learning_task.actions.append(
pddl.actions.Action("insert_eff_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
else:
# TODO
# Action for inserting negative effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"del_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("insert_del_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Action for inserting positive effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("insert_add_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
#TODO
if validation_mode:
if p.name in pres_filter:
# Delete precondition
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"pre_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_pre_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p.name in effects_filter:
# Delete add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_add_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Delete del effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("del_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"del_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_del_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# effects_programming action
# Disables modeProg
pre = [pddl.conditions.Atom("modeProg1", [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("modeProg1", []))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("modeProg2", []))]
effects_programming_action = pddl.actions.Action("effects_programming", [], 0, pddl.conditions.Conjunction(pre), eff, 0)
learning_task.actions.append(effects_programming_action)
last_state_validations = list()
MAX_ISTEPS = 1
# ACTIONS FOR THE VALIDATION OF THE INPUT TRACES
del_plan_effects = [] # store plan predicates here to delete in the next validate action
# First validate action
# Disables modeProg
pre = [pddl.conditions.Atom("modeProg2", [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("modeProg2", []))]
if action_observability > 0:
# Setups program counter to 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("current", ["i1"]))]
# Setups the initial state of the first trace
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), atom) for atom in traces[0].init]
num_traces = len(traces)
states_seen = 0 # Used for "test" predicates
total_actions_seen = 0
for j in range(len(traces)):
trace = traces[j]
trace_length = len(trace.states)
actions_seen = 0
for step in range(trace_length):
if trace.actions[step] != []:
actions_seen += 1
total_actions_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
del_plan_effects += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
if trace.states[step] != []:
states_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
if states_seen != 1:
pre += [pddl.conditions.Atom("test" + str(states_seen - 1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
learning_task.actions.append(
pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
pre = [pddl.conditions.NegatedAtom("modeProg1", [])]
pre = [pddl.conditions.NegatedAtom("modeProg2", [])]
# action_applied
pre += [pddl.conditions.Atom("action_applied", [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("action_applied", []))]
if action_observability > 0:
pre += [pddl.conditions.Atom("current", ["i" + str(actions_seen + 1)])]
pre += trace.states[step]
eff = del_plan_effects
if actions_seen != 0:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(actions_seen + 1)]))]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("test" + str(states_seen), []))]
# If it is the last/goal state of the trace but not the last trace
if step == trace_length -1 and j < len(traces)-1:
last_state_validations.append(states_seen+1)
next_state = set()
current_state = set()
for atom in traces[j + 1].init:
if not atom.negated:
next_state.add(atom)
for atom in traces[j].goal:
if not atom.negated:
current_state.add(atom)
lost_atoms = current_state.difference(next_state)
new_atoms = next_state.difference(current_state)
for atom in lost_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(atom.predicate, atom.args))]
for atom in new_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom(atom.predicate, atom.args))]
del_plan_effects = []
MAX_ISTEPS = max(MAX_ISTEPS, actions_seen + 1)
actions_seen = 0
# Goal validation
# pre = [pddl.conditions.NegatedAtom("modeProg", [])]
# pre += [pddl.conditions.Atom("current", ["i" + str(action_cnt)])]
# pre += trace.goal
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
# if action_cnt != 1:
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("current", ["i" + str(action_cnt)]))]
# Setup state for the next trace
# pre += [pddl.conditions.Atom("current", ["i" + str(actions_seen+1)])]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.Atom("test" + str(states_seen), []))]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
states_seen += 1
pre += [pddl.conditions.Atom("test" + str(states_seen-1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
learning_task.actions.append(pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
last_state_validations.append(states_seen)
# print("final states validated at: {}".format(", ".join([str(i) for i in last_state_validations])))
### LEARNING PROBLEM
learning_task.goal = pddl.conditions.Conjunction([pddl.conditions.Atom("test"+str(states_seen), []), pddl.conditions.NegatedAtom("disabled", [])])
if action_observability > 0:
# Add inext fluents
for i in range(2, MAX_ISTEPS+1):
learning_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
# Add step onjects
for i in range(1, MAX_ISTEPS + 1):
learning_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
# Add modeProg fluent
learning_task.init.append(pddl.conditions.Atom("modeProg1", []))
# TODO
# size(M)
model_size = 0
# Add known preconditions and effects
for action in actions:
action_params = [p.name for p in action.parameters]
known_pres = list()
if type(action.precondition) is pddl.Conjunction and len(action.precondition.parts) > 0:
known_pres = action.precondition.parts
elif type(action.precondition) is pddl.Atom:
known_pres = [action.precondition]
filtered_known_pres = [pre for pre in known_pres if pre.predicate in pres_filter]
for pre in filtered_known_pres:
if type(pre) is pddl.conditions.Truth:
continue
model_representation_fluent = "pre_" + "_".join([pre.predicate] + [action.name] + ["var"+str(action_params.index(pre.args[i])+1) for i in range(len(pre.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_adds = [eff for eff in action.effects if
not eff.literal.negated and eff.literal.predicate in effects_filter]
for eff in filtered_known_adds:
model_representation_fluent = "add_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i
in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_dels = [eff for eff in action.effects if
eff.literal.negated and eff.literal.predicate in effects_filter]
for eff in filtered_known_dels:
model_representation_fluent = "del_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i
in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
### Write the learning task domain and problem to pddl
fdomain = open(LEARNED_DOMAIN_PATH, "w")
fdomain.write(fdtask_to_pddl.format_domain(learning_task, domain_pddl))
fdomain.close()
fdomain = open(config.OUTPUT_PATH+"/learning_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(learning_task, domain_pddl))
fdomain.close()
### Solvie the learning task
# starting_horizon = str(2*TOTAL_STEPS + 3)
validation_steps = max(states_seen-1, total_actions_seen)*2 + 1
if action_observability == 1 and state_observability == 0:
validation_steps = states_seen + total_actions_seen
if finite_steps:
validation_steps = (states_seen-1) * 2 + 1
starting_horizon = str(validation_steps + 3)
if action_observability == 1 or state_observability == 1 or finite_steps:
ending_horizon = " -T " + starting_horizon
else:
ending_horizon = ""
plan_type = ""
if validation_mode:
plan_type = "-P 0"
ending_horizon = ""
cmd = "rm " + config.OUTPUT_FILENAME + " "+config.OUTPUT_PATH+"/planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " "+LEARNED_DOMAIN_PATH+" "+config.OUTPUT_PATH +"/learning_problem.pddl -F " + starting_horizon + " " +ending_horizon + " " + plan_type + " " + config.PLANNER_PARAMS + " > "+config.OUTPUT_PATH+"/planner_out.log"
# cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PROJECT_PATH + "util/FF-v2.3/ff" + " -o learning_domain.pddl -f learning_problem.pddl > planner_out.log"
# print("\n\nExecuting... " + cmd)
os.system(cmd)
# print("Executed ", cmd)
# file = open("planner_out.log", "r")
# lines = file.readlines()
# for i in range(len(lines)):
# if "step" in lines[i]:
# break
# lines = lines[i:]
# for i in range(len(lines)):
# if lines[i] == "\n":
# break
# lines = lines[:i-1]
# file.close()
#
# lines = [l.lower() for l in lines]
# file = open(config.OUTPUT_FILENAME, "w")
# file.writelines(lines)
# file.close()
### Read the solution plan to the learning task
if not validation_mode:
pres = [[] for _ in xrange(len(actions))]
dels = [[] for _ in xrange(len(actions))]
adds = [[] for _ in xrange(len(actions))]
file = open(config.OUTPUT_FILENAME, 'r')
# Parse programming actions
for line in file:
keys = "(insert_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a.name for a in actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
pres[indexa].append(pred)
keys = "(insert_eff_"
if keys in line:
# act = p.split("_")[2]
# pred = [p.split("_")[1]] + p.split("_")[3:]
# indexa = [a[0] for a in new_actions].index(act)
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a.name for a in actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
if not pred in pres[indexa]:
adds[indexa].append(pred)
else:
dels[indexa].append(pred)
keys = "(validate_1)"
if keys in line:
break
if check_static_predicates:
subplans = [[] for _ in last_state_validations]
arities = dict()
for action in actions:
arities[action.name] = action.num_external_parameters
# Parse validation actions
validating_trace = 0
for line in file:
if "validate_" in line:
validate_num = int(line.split(":")[1].strip("()\n").split("_")[1])
if validate_num in last_state_validations:
validating_trace += 1
if validate_num == last_state_validations[-1]:
break
else:
aux = line.split(":")[1].strip().strip("()\n").split(" ")
action = aux[:arities[aux[0]]+1]
subplans[validating_trace].append(action)
file.close()
adds_dict = dict()
dels_dict = dict()
for i in range(len(actions)):
adds_dict[actions[i].name] = adds[i]
dels_dict[actions[i].name] = dels[i]
inferred_state_trejectories = list()
for i in range(len(traces)):
inferred_state_trajectory = list()
init = [atom for atom in traces[i].init if not atom.negated]
inferred_state_trajectory.append(set(init))
for a in subplans[i]:
negative_effects = copy.deepcopy(dels_dict[a[0]])
positive_effects = copy.deepcopy(adds_dict[a[0]])
for j in range(1,len(a)):
negative_effects = [[a[j] if x == "var"+str(j) else x for x in effect] for effect in negative_effects]
positive_effects = [[a[j] if x == "var" + str(j) else x for x in effect] for effect in positive_effects]
new_state = inferred_state_trajectory[-1]
new_state = new_state.difference(set([pddl.conditions.Atom(effect[0], effect[1:]) for effect in negative_effects]))
new_state = new_state.union(set([pddl.conditions.Atom(effect[0], effect[1:]) for effect in positive_effects]))
inferred_state_trajectory.append(new_state)
inferred_state_trejectories.append(inferred_state_trajectory)
static_predicates = get_static_predicates(inferred_state_trejectories, predicates)
pre_states = dict()
for i in range(len(subplans)):
subplan = subplans[i]
trajectory = inferred_state_trejectories[i]
for j in range(len(subplan)):
action = subplan[j]
state = trajectory[j]
pre_state = list()
for literal in state:
if set(literal.args).issubset(set(action[1:])):
args_indices = list()
for arg in literal.args:
indices = ["var"+str(i) for i in range(1,len(action)) if action[i] == arg ]
args_indices.append(indices)
for tup in itertools.product(*args_indices):
pre_state.append(tuple([literal.predicate] + list(tup)))
# parameterized_args = ["var"+str(action.index(arg)) for arg in literal.args]
pre_states_list = pre_states.get(action[0], [])
pre_states_list.append(pre_state)
pre_states[action[0]] = pre_states_list
only_static = False
for k,v in pre_states.items():
new_preconditions = set(v[0])
for pre_state in v[1:]:
new_preconditions = new_preconditions.intersection(set(pre_state))
if only_static:
new_preconditions = [list(pre) for pre in new_preconditions if pre[0] in static_predicates]
else:
new_preconditions = [list(pre) for pre in new_preconditions]
# Remove symmetric static preconditions, keeping the one with sorted arguments (var1, var2,...)
new_preconditions = sorted(new_preconditions)
for precondition in new_preconditions:
if precondition[0] in static_predicates and \
[precondition[0]]+list(reversed(precondition[1:])) in new_preconditions and \
precondition[1:] != list(sorted(precondition[1:])):
new_preconditions.remove(precondition)
indexa = [a.name for a in actions].index(k)
learned_pres = pres[indexa]
new_preconditions = [pre for pre in new_preconditions if pre not in learned_pres]
pres[indexa] += new_preconditions
counter = 0
new_fd_task = copy.deepcopy(original_task)
new_fd_task.actions = []
for action in actions:
ps = [pddl.pddl_types.TypedObject("?o"+str(i+1), action.parameters[i].type_name) for i in range(action.num_external_parameters)]
pre = []
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action.name, ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
# new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open(config.OUTPUT_PATH+"/learned_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, domain_pddl))
fdomain.close()
sys.exit(0)
### Read the solution plan to the evaluation task
inserts = 0
deletes = 0
if validation_mode:
file = open(config.OUTPUT_FILENAME, 'r')
# Parse edition actions
for line in file:
if "insert_" in line:
inserts += 1
# aux = line.replace("\n", "").replace(")", "").split("insert_")[1].split("_")
# action = aux[2]
# predicate = aux[1] + aux[3:]
#
# pred = [aux[0].split("_")[0]]
# if [aux[0].split("_")[2:]][0] != ['']:
# pred = pred + [aux[0].split("_")[2:]][0]
# # pres[indexa].append(pred)
elif "delete_" in line:
deletes += 1
else:
break
file.close()
if distance_metric:
print("Distance: {}".format(inserts + deletes))
else:
semPrecision = np.float64(model_size - deletes) / model_size
semRecall = np.float64(model_size - deletes) / (model_size - deletes + inserts)
print("{} & {} & {} \\\\".format(domain_name, semPrecision, semRecall))
| 54,118 | 44.25 | 345 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/aij18generator.py
|
#! /usr/bin/env python
import sys,glob,os
import pddl, pddl_parser
import config, fdtask_to_pddl
# **************************************#
# MAIN
# **************************************#
try:
source_folder_name = sys.argv[1]
destination_folder_name = sys.argv[2]
except:
print "Usage:"
print sys.argv[0] + " <source folder name> <destination folder name>"
sys.exit(-1)
# Reading the source examples
for item in sorted(glob.glob(source_folder_name+"/*")):
domain_name = item[len(source_folder_name)+1:]
domain_filename = source_folder_name + "/" + domain_name + "/domain"
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
plan=[]
for plan_file_name in sorted(glob.glob(source_folder_name + "/" + domain_name + "/plan-*.txt")):
plan_file = open(plan_file_name, 'r')
step = plan_file.readline().replace('\n', '')
plan.append(step[3:])
plan_file.close()
# HEAD
problem_filenames = sorted(glob.glob(source_folder_name + "/" + domain_name + "/test-*.pddl"))
problem_filename = problem_filenames[0]
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
str_out = ""
str_out = str_out + "(solution \n"
str_out = str_out + "(:objects "
for i in sorted(set(fd_task.objects)):
str_out = str_out + str(i).replace(":"," - ") + " "
str_out = str_out + ")\n"
str_out = str_out + "(:init " + fdtask_to_pddl.format_condition([i for i in fd_task.init if i.predicate!="="])+")\n"
# body
counter = 0
for test_file_name in problem_filenames:
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", test_file_name)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
str_out = str_out + "(:observations " + fdtask_to_pddl.format_condition([i for i in fd_task.init if i.predicate!="="])+")\n"
str_out = str_out + "\n" + plan[counter] + "\n\n"
counter = counter + 1
# tail
problem_filename = problem_filenames[-1]
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
str_out = str_out + "(:goal "
for item in fd_task.goal.parts:
str_out=str_out+fdtask_to_pddl.format_condition(item)
str_out = str_out + "))"
# Generation
output_folder = destination_folder_name + "/" + domain_name
cmd = "mkdir " + output_folder
print("\n\nExecuting... " + cmd)
os.system(cmd)
plan_file = open(output_folder + "/trace01", 'w')
plan_file.write(str_out)
plan_file.close()
sys.exit(0)
| 2,763 | 35.853333 | 138 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/compiler_new.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
import numpy as np
def get_max_vars(actions):
max_vars = 0
for a in actions:
max_vars = max(max_vars, a.num_external_parameters)
return max_vars
def get_max_steps(traces):
traces_steps = list()
for trace in traces:
not_empty_states = len([state for state in trace.states if state != []])
not_empty_actions = len([action for action in trace.actions if action != []])
traces_steps.append(max(not_empty_states, not_empty_actions))
return sum(traces_steps), max(traces_steps)
def get_all_types(task, itype):
output=[itype]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def possible_pred_for_action(task, p, a, tup):
if (len(p.arguments) > len(a.parameters)):
return False
action_types = [set([a.parameters[int(tup[i])-1].type_name]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x.type_name)) for x in p.arguments]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
return all(fits)
def get_static_predicates(state_trajectories, predicates):
candidates = set([p.name for p in predicates])
for trajectory in state_trajectories:
trace_candidates = set()
for predicate in candidates:
static = True
init_literals = set([l for l in trajectory[0] if l.predicate == predicate])
# print("init_lit: ", init_literals)
for state in trajectory[1:]:
state_literals = set([l for l in state if l.predicate == predicate])
# print("state_lit: ", state_literals)
if init_literals != state_literals:
static = False
break
if static:
# print("NP: ", predicate)
trace_candidates.add(predicate)
candidates = candidates.intersection(trace_candidates)
# reflexive_static_predicates = dict()
# for candidate in candidates:
# reflexive_static_predicates[candidate] = True
# for trace in traces:
# init_literals = set([l for l in trace.init if l.predicate == candidate])
# for literal in init_literals:
# if len(literal.args) == 1 or len(set(literal.args)) != 1:
# reflexive_static_predicates[candidate] = False
# break
return candidates
# return [p for p in predicates if p.name in candidates]
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
index = sys.argv.index("-s")
check_static_predicates = True
sys.argv.pop(index)
else:
check_static_predicates = False
if "-v" in sys.argv:
index = sys.argv.index("-v")
learned_domain = sys.argv[index+1]
validation_mode = True
sys.argv.pop(index)
sys.argv.pop(index)
else:
validation_mode = False
if "-f" in sys.argv:
index = sys.argv.index("-f")
finite_steps = True
sys.argv.pop(index)
else:
finite_steps = False
if "-t" in sys.argv:
index = sys.argv.index("-t")
trace_prefix = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_prefix = "trace"
if "-l" in sys.argv:
index = sys.argv.index("-l")
trace_min = int(sys.argv[index+1])
trace_max = int(sys.argv[index+2])
sys.argv.pop(index)
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_min = None
if "-c" in sys.argv:
index = sys.argv.index("-c")
config_filename = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
config_filename = None
if "-d" in sys.argv:
index = sys.argv.index("-d")
distance_metric = True
sys.argv.pop(index)
else:
distance_metric = False
if "-ct" in sys.argv:
index = sys.argv.index("-ct")
cnt = int(sys.argv[index+1])
sys.argv.pop(index)
sys.argv.pop(index)
else:
cnt = 0
if "-m" in sys.argv:
index = sys.argv.index("-m")
model_file = sys.argv[index+1]
sys.argv.pop(index)
sys.argv.pop(index)
else:
model_file = "learned_domain.pddl"
domain_folder_name = sys.argv[1]
action_observability = float(sys.argv[2])/100
state_observability = float(sys.argv[3])/100
# if action_observability == 1 or state_observability == 1:
# finite_steps = True
except:
print "Usage:"
print sys.argv[0] + "[-s] [-f] [-v learned_domain] <domain folder> <action observability (0-100)> <state observability (0-100)> -t " \
"_prefix -l input_limit"
sys.exit(-1)
LEARNED_DOMAIN_PATH = config.OUTPUT_PATH+"/learned_domain.pddl"
# Read the domain file
if not validation_mode:
domain_filename = "{}domain".format(domain_folder_name)
else:
domain_filename = learned_domain
domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(domain_pddl)
# FILTERS
if config_filename != None:
config_file = open(config_filename, "r")
filters = config_file.readlines();
trace_filter = filters[0].strip().split(", ")
pres_filter = filters[1].strip().split(", ")
effects_filter = filters[2].strip().split(", ")
else:
trace_filter = pres_filter = effects_filter = [p.name for p in predicates]
# Read the input traces
traces = list()
file_list = sorted(glob.glob(domain_folder_name + trace_prefix + "*"))[0:0+cnt]
# print(domain_folder_name + trace_prefix )
# print(file_list)
for filename in file_list:
# print("Filename: ",filename)
trace_pddl = pddl_parser.pddl_file.parse_pddl_file("trace", filename)
traces.append(pddl_parser.parsing_functions.parse_trace_pddl(trace_pddl, predicates, action_observability, state_observability))
for trace in traces:
for i in range(len(trace.states)):
trace.states[i] = [atom for atom in trace.states[i] if atom.predicate in trace_filter]
if trace_min != None:
traces = traces[trace_min:trace_max]
MAX_VARS = get_max_vars(actions)
TOTAL_STEPS, MAX_STEPS = get_max_steps(traces)
# static_predicates, reflexive_static_predicates = get_static_predicates(traces, predicates)
### LEARNING PROBLEM
# The objects of the original domain for the learning task
# is the union of all objects in the input traces
objects = list()
for trace in traces:
objects.extend(trace.objects)
objects = list(set(objects))
# Empty initial state for now
init = []
# Empty goal for now
goal = []
original_task = pddl.Task(domain_name, 'learning_problem', domain_requirements, types, objects,
predicates, functions, init, goal, actions, axioms, True)
learning_task = copy.deepcopy(original_task)
learning_task.actions = []
### LEARNING DOMAIN
# Define "modeProg" predicate
learning_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
# Define "test" predicates
for i in range(1, TOTAL_STEPS+2):
learning_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if action_observability > 0:
# Define "step" domain type
learning_task.types.append(pddl.pddl_types.Type("step", "None"))
# Define "current" predicate. Example (current ?i - step)
learning_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
# Define "inext" predicate. Example (inext ?i1 - step ?i2 - step)
learning_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
learning_task.predicates.append(pddl.predicates.Predicate("action_applied", []))
# Define action model representation predicates
# Eample (pre_clear_pickup_var1)
for a in actions:
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p.arguments))):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
if p.name in pres_filter:
learning_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p.name] + [a.name] + vars), []))
if p.name in effects_filter:
learning_task.predicates.append(
pddl.predicates.Predicate("del_" + "_".join([p.name] + [a.name] + vars), []))
learning_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p.name] + [a.name] + vars), []))
# Define action validation predicates
# Example (plan-pickup ?i - step ?x - block)
if action_observability > 0:
for a in actions:
learning_task.predicates.append(pddl.predicates.Predicate("plan-" + a.name,
[pddl.pddl_types.TypedObject("?i", "step")] + a.parameters))
# Original domain actions
for a in actions:
original_params = [par.name for par in a.parameters]
params = [pddl.pddl_types.TypedObject("?o" + str(i+1), a.parameters[i].type_name ) for i in range(a.num_external_parameters)]
pre = list()
known_preconditions = list(a.precondition.parts)
for known_precondition in known_preconditions:
if known_precondition.predicate not in pres_filter:
pre += [pddl.conditions.Atom(known_precondition.predicate,
["?o" + str(original_params.index(arg) + 1) for arg in known_precondition.args])]
if finite_steps:
pre += [pddl.conditions.NegatedAtom("action_applied", [])]
eff = list()
known_effects = list(a.effects)
for known_effect in known_effects:
if not known_effect.literal.negated and known_effect.literal.predicate not in effects_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom(known_effect.literal.predicate,
["?o" + str(original_params.index(arg) + 1) for arg in
known_effect.literal.args]))]
elif known_effect.literal.negated and known_effect.literal.predicate not in effects_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(known_effect.literal.predicate,
["?o" + str(original_params.index(arg) + 1) for arg
in known_effect.literal.args]))]
# action_applied predicate
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("action_applied", []))]
# Add "step" parameters to the original actions
# This will allow to reproduce the input traces
if action_observability > 0:
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
# Add "modeProg" precondition
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
# Add all possible preconditions as implications
# Example (or (not (pre_on_stack_var1_var1 ))(on ?o1 ?o1))
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p.arguments))):
if p.name in pres_filter and possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
disjunction = pddl.conditions.Disjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])] + [
pddl.conditions.Atom(p.name, ["?o" + str(t) for t in tup])])
pre = pre + [disjunction]
# Define action validation condition
# Example (and (plan-pickup ?i1 ?o1) (current ?i1) (inext ?i1 ?i2))
if action_observability > 0:
# validation_condition = [pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]
validation_condition = [pddl.conditions.Atom("current", ["?i1"])]
validation_condition += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if action_observability == 1:
validation_condition += [pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]
pre += validation_condition
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif action_observability > 0:
# Define conditional effect to validate an action in the input traces
# This effect advances the program counter when an observed action is executed
pre += validation_condition
eff += [pddl.effects.Effect([], pddl.conditions.Conjunction([pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Conjunction([pddl.conditions.Atom("plan-" + a.name, ["?i1"] + ["?o" + str(i+1) for i in range(a.num_external_parameters) ])]), pddl.conditions.Atom("current", ["?i2"]))]
# Add all possible effects as conditional effects
# Example (when (and (del_ontable_put-down_var1 ))(not (ontable ?o1)))
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if p.name in effects_filter and possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
# del effects
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p.name, ["?o" + str(t) for t in tup]))]
# add effects
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p.name, ["?o" + str(t) for t in tup]))]
learning_task.actions.append(pddl.actions.Action(a.name, params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action model
for a in actions:
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
params = []
if p.name in pres_filter:
# Action for programmming de preconditions
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"pre_" + "_".join([p.name] + [a.name] + vars), []))]
if not validation_mode:
learning_task.actions.append(
pddl.actions.Action("program_pre_" + "_".join([p.name]+[a.name]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
else:
learning_task.actions.append(
pddl.actions.Action("insert_pre_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p.name in effects_filter:
if not validation_mode:
# Action for programming the effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Atom(
"pre_" + "_".join([p.name] + [a.name] + vars), []), pddl.conditions.Atom(
"del_" + "_".join([p.name] + [a.name] + vars), []))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.NegatedAtom(
"pre_" + "_".join([p.name] + [a.name] + vars), []), pddl.conditions.Atom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("program_eff_" + "_".join([p.name]+[a.name]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
else:
# Action for inserting negative effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"del_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("insert_del_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Action for inserting positive effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
pre += [pddl.conditions.NegatedAtom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("insert_add_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if validation_mode:
if p.name in pres_filter:
# Delete precondition
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p.name] + [a.name] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"pre_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_pre_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p.name in effects_filter:
# Delete add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_add_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Delete del effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("del_" + "_".join([p.name] + [a.name] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"del_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_del_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
last_state_validations = list()
MAX_ISTEPS = 1
# ACTIONS FOR THE VALIDATION OF THE INPUT TRACES
del_plan_effects = [] # store plan predicates here to delete in the next validate action
# First validate action
# Disables modeProg
pre = [pddl.conditions.Atom("modeProg", [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("modeProg", []))]
if action_observability > 0:
# Setups program counter to 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("current", ["i1"]))]
# Setups the initial state of the first trace
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), atom) for atom in traces[0].init]
num_traces = len(traces)
states_seen = 0 # Used for "test" predicates
total_actions_seen = 0
for j in range(len(traces)):
trace = traces[j]
trace_length = len(trace.states)
actions_seen = 0
for step in range(trace_length):
if trace.actions[step] != []:
actions_seen += 1
total_actions_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
del_plan_effects += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
if trace.states[step] != []:
states_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
if states_seen != 1:
pre += [pddl.conditions.Atom("test" + str(states_seen - 1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
learning_task.actions.append(
pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
pre = [pddl.conditions.NegatedAtom("modeProg", [])]
# action_applied
pre += [pddl.conditions.Atom("action_applied", [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("action_applied", []))]
if action_observability > 0:
pre += [pddl.conditions.Atom("current", ["i" + str(actions_seen + 1)])]
pre += trace.states[step]
eff = del_plan_effects
if actions_seen != 0:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(actions_seen + 1)]))]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("test" + str(states_seen), []))]
# If it is the last/goal state of the trace but not the last trace
if step == trace_length -1 and j < len(traces)-1:
last_state_validations.append(states_seen+1)
next_state = set()
current_state = set()
for atom in traces[j + 1].init:
if not atom.negated:
next_state.add(atom)
for atom in traces[j].goal:
if not atom.negated:
current_state.add(atom)
lost_atoms = current_state.difference(next_state)
new_atoms = next_state.difference(current_state)
for atom in lost_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(atom.predicate, atom.args))]
for atom in new_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom(atom.predicate, atom.args))]
del_plan_effects = []
MAX_ISTEPS = max(MAX_ISTEPS, actions_seen + 1)
actions_seen = 0
# Goal validation
# pre = [pddl.conditions.NegatedAtom("modeProg", [])]
# pre += [pddl.conditions.Atom("current", ["i" + str(action_cnt)])]
# pre += trace.goal
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
# if action_cnt != 1:
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("current", ["i" + str(action_cnt)]))]
# Setup state for the next trace
# pre += [pddl.conditions.Atom("current", ["i" + str(actions_seen+1)])]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.Atom("test" + str(states_seen), []))]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
states_seen += 1
pre += [pddl.conditions.Atom("test" + str(states_seen-1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
learning_task.actions.append(pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
last_state_validations.append(states_seen)
# # print("final states validated at: {}".format(", ".join([str(i) for i in last_state_validations])))
### LEARNING PROBLEM
learning_task.goal = pddl.conditions.Conjunction([pddl.conditions.Atom("test"+str(states_seen), [])])
if action_observability > 0:
# Add inext fluents
for i in range(2, MAX_ISTEPS+1):
learning_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
# Add step onjects
for i in range(1, MAX_ISTEPS + 1):
learning_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
# Add modeProg fluent
learning_task.init.append(pddl.conditions.Atom("modeProg", []))
# size(M)
model_size = 0
# Add known preconditions and effects
for action in actions:
action_params = [p.name for p in action.parameters]
known_pres = list()
if type(action.precondition) is pddl.Conjunction and len(action.precondition.parts) > 0:
known_pres = action.precondition.parts
elif type(action.precondition) is pddl.Atom:
known_pres = [action.precondition]
filtered_known_pres = [pre for pre in known_pres if pre.predicate in pres_filter]
for pre in filtered_known_pres:
if type(pre) is pddl.conditions.Truth:
continue
model_representation_fluent = "pre_" + "_".join([pre.predicate] + [action.name] + ["var"+str(action_params.index(pre.args[i])+1) for i in range(len(pre.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_adds = [eff for eff in action.effects if
not eff.literal.negated and eff.literal.predicate in effects_filter]
for eff in filtered_known_adds:
model_representation_fluent = "add_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i
in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_dels = [eff for eff in action.effects if
eff.literal.negated and eff.literal.predicate in effects_filter]
for eff in filtered_known_dels:
model_representation_fluent = "del_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i
in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
### Write the learning task domain and problem to pddl
fdomain = open(LEARNED_DOMAIN_PATH, "w")
fdomain.write(fdtask_to_pddl.format_domain(learning_task, domain_pddl))
fdomain.close()
fdomain = open(config.OUTPUT_PATH+"/learning_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(learning_task, domain_pddl))
fdomain.close()
### Solvie the learning task
# starting_horizon = str(2*TOTAL_STEPS + 3)
validation_steps = max(states_seen-1, total_actions_seen)*2 + 1
if action_observability == 1 and state_observability == 0:
validation_steps = states_seen + total_actions_seen
starting_horizon = str(validation_steps + 2)
if action_observability == 1 or state_observability == 1:
ending_horizon = " -T " + starting_horizon
else:
ending_horizon = ""
plan_type = ""
if validation_mode:
plan_type = "-P 0"
ending_horizon = ""
cmd = "rm " + config.OUTPUT_FILENAME + " "+config.OUTPUT_PATH+"/planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " "+LEARNED_DOMAIN_PATH+" "+config.OUTPUT_PATH +"/learning_problem.pddl -F " + starting_horizon + " " +ending_horizon + " " + plan_type + " " + config.PLANNER_PARAMS + " > "+config.OUTPUT_PATH+"/planner_out.log"
# cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " learning_domain.pddl learning_problem.pddl -F " + starting_horizon + " " +ending_horizon + " " + plan_type + " " + config.PLANNER_PARAMS + " > planner_out.log"
# cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PROJECT_PATH + "util/FF-v2.3/ff" + " -o learning_domain.pddl -f learning_problem.pddl > planner_out.log"
os.system(cmd)
# print("\n\nExecuted " + cmd)
# file = open("planner_out.log", "r")
# lines = file.readlines()
# for i in range(len(lines)):
# if "step" in lines[i]:
# break
# lines = lines[i:]
# for i in range(len(lines)):
# if lines[i] == "\n":
# break
# lines = lines[:i-1]
# file.close()
#
# lines = [l.lower() for l in lines]
# file = open(config.OUTPUT_FILENAME, "w")
# file.writelines(lines)
# file.close()
### Read the solution plan to the learning task
if not validation_mode:
pres = [[] for _ in xrange(len(actions))]
dels = [[] for _ in xrange(len(actions))]
adds = [[] for _ in xrange(len(actions))]
file = open(config.OUTPUT_FILENAME, 'r')
# Parse programming actions
for line in file:
keys = "(program_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a.name for a in actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
# allpres.remove(str("pre_" + pred[0] + "_" + action[0] + "_" + "_".join(map(str, pred[1:]))))
pres[indexa].append(pred)
keys = "(program_eff_"
if keys in line:
# act = p.split("_")[2]
# pred = [p.split("_")[1]] + p.split("_")[3:]
# indexa = [a[0] for a in new_actions].index(act)
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a.name for a in actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
if not pred in pres[indexa]:
adds[indexa].append(pred)
else:
dels[indexa].append(pred)
keys = "(validate_1)"
if keys in line:
break
# print("pres: ", pres)
# print("adds: ", adds)
# print("dels: ", dels)
if check_static_predicates:
# print("Here")
subplans = [[] for _ in last_state_validations]
arities = dict()
for action in actions:
arities[action.name] = action.num_external_parameters
# Parse validation actions
validating_trace = 0
for line in file:
if "validate_" in line:
validate_num = int(line.split(":")[1].strip("()\n").split("_")[1])
if validate_num in last_state_validations:
validating_trace += 1
if validate_num == last_state_validations[-1]:
break
else:
aux = line.split(":")[1].strip().strip("()\n").split(" ")
action = aux[:arities[aux[0]]+1]
subplans[validating_trace].append(action)
file.close()
adds_dict = dict()
dels_dict = dict()
for i in range(len(actions)):
adds_dict[actions[i].name] = adds[i]
dels_dict[actions[i].name] = dels[i]
# print(adds_dict)
# print(dels_dict)
inferred_state_trejectories = list()
for i in range(len(traces)):
inferred_state_trajectory = list()
init = [atom for atom in traces[i].init if not atom.negated]
inferred_state_trajectory.append(set(init))
for a in subplans[i]:
negative_effects = copy.deepcopy(dels_dict[a[0]])
positive_effects = copy.deepcopy(adds_dict[a[0]])
for j in range(1,len(a)):
negative_effects = [[a[j] if x == "var"+str(j) else x for x in effect] for effect in negative_effects]
positive_effects = [[a[j] if x == "var" + str(j) else x for x in effect] for effect in positive_effects]
new_state = inferred_state_trajectory[-1]
new_state = new_state.difference(set([pddl.conditions.Atom(effect[0], effect[1:]) for effect in negative_effects]))
new_state = new_state.union(set([pddl.conditions.Atom(effect[0], effect[1:]) for effect in positive_effects]))
inferred_state_trajectory.append(new_state)
# print("IST: ", new_state)
inferred_state_trejectories.append(inferred_state_trajectory)
static_predicates = get_static_predicates(inferred_state_trejectories, predicates)
pre_states = dict()
# print(subplans)
for i in range(len(subplans)):
subplan = subplans[i]
trajectory = inferred_state_trejectories[i]
for j in range(len(subplan)):
action = subplan[j]
state = trajectory[j]
pre_state = list()
for literal in state:
if set(literal.args).issubset(set(action[1:])):
args_indices = list()
for arg in literal.args:
indices = ["var"+str(i) for i in range(1,len(action)) if action[i] == arg ]
args_indices.append(indices)
for tup in itertools.product(*args_indices):
pre_state.append(tuple([literal.predicate] + list(tup)))
# parameterized_args = ["var"+str(action.index(arg)) for arg in literal.args]
pre_states_list = pre_states.get(action[0], [])
pre_states_list.append(pre_state)
pre_states[action[0]] = pre_states_list
only_static = False
for k,v in pre_states.items():
new_preconditions = set(v[0])
for pre_state in v[1:]:
new_preconditions = new_preconditions.intersection(set(pre_state))
if only_static:
new_preconditions = [list(pre) for pre in new_preconditions if pre[0] in static_predicates]
else:
new_preconditions = [list(pre) for pre in new_preconditions]
# Remove symmetric static preconditions, keeping the one with sorted arguments (var1, var2,...)
new_preconditions = sorted(new_preconditions)
for precondition in new_preconditions:
if precondition[0] in static_predicates and \
[precondition[0]]+list(reversed(precondition[1:])) in new_preconditions and \
precondition[1:] != list(sorted(precondition[1:])):
new_preconditions.remove(precondition)
indexa = [a.name for a in actions].index(k)
learned_pres = pres[indexa]
new_preconditions = [pre for pre in new_preconditions if pre not in learned_pres]
pres[indexa] += new_preconditions
# print("pres 2: ", pres)
# print("adds 2: ", adds)
# print("dels 2: ", dels)
counter = 0
new_fd_task = copy.deepcopy(original_task)
new_fd_task.actions = []
for action in actions:
# print("Action: ", action)
ps = [pddl.pddl_types.TypedObject("?o"+str(i+1), action.parameters[i].type_name) for i in range(action.num_external_parameters)]
pre = []
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
# print("PRE: ", pre)
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action.name, ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
# new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open(config.OUTPUT_PATH+"/models/"+model_file, "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, domain_pddl))
fdomain.close()
sys.exit(0)
### Read the solution plan to the evaluation task
inserts = 0
deletes = 0
if validation_mode:
file = open(config.OUTPUT_FILENAME, 'r')
# Parse edition actions
for line in file:
if "insert_" in line:
inserts += 1
# aux = line.replace("\n", "").replace(")", "").split("insert_")[1].split("_")
# action = aux[2]
# predicate = aux[1] + aux[3:]
#
# pred = [aux[0].split("_")[0]]
# if [aux[0].split("_")[2:]][0] != ['']:
# pred = pred + [aux[0].split("_")[2:]][0]
# # pres[indexa].append(pred)
elif "delete_" in line:
deletes += 1
else:
break
file.close()
if distance_metric:
print("Distance: {}".format(inserts + deletes))
else:
semPrecision = np.float64(model_size - deletes) / model_size
semRecall = np.float64(model_size - deletes) / (model_size - deletes + inserts)
print("{} & {} & {} \\\\".format(domain_name, semPrecision, semRecall))
| 43,617 | 43.782341 | 345 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/fdtask_to_pddl.py
|
import sys
import utils,pddl
def format_problem(task,domain):
str_out = "(define (problem " + task.task_name + ")\n"
str_out = str_out + " (:domain "+ task.domain_name + ")\n"
str_out = str_out + " (:objects "
for i in set(task.objects):
str_out = str_out + str(i).replace(":"," - ") + " "
str_out = str_out + ")\n"
str_out = str_out + " (:init " + format_condition([i for i in task.init if i.predicate!="="])+")\n"
str_out = str_out + " (:goal " + format_condition(task.goal)+")"
str_out = str_out + ")"
return str_out
def format_domain(task,domain):
str_out = "(define (domain " + task.domain_name + ")\n"
str_out = str_out + " (:requirements " + str(task.requirements).replace(",","").replace(":non-deterministic","") + ")\n"
str_out = str_out + " (:types "
for i in task.types:
str_out = str_out + i.name + " - " + str(i.basetype_name) + " "
str_out = str_out + ")\n"
constants=utils.compute_constants(task,domain)
constants_str = list()
if len(constants)>0:
str_out = str_out + " (:constants "
for i in sorted(set(task.objects)):
aux = str(i).replace(":"," - ")
constants_str.append(aux)
constants_str = sorted(constants_str)
str_out += " ".join(constants_str) + ")\n"
str_out = str_out + " (:predicates "
predicates_str = list()
for i in task.predicates:
if i.name != "=":
aux = "(" + i.name
for j in i.arguments:
aux += " " + j.name + " - " + j.type_name
aux += ")"
predicates_str.append(aux)
predicates_str = sorted(predicates_str)
str_out += " ".join(predicates_str) + ")\n"
# for axiom in task.axioms:
# str_out = str_out + " (:derived (" + axiom.name + ")\n"
# str_out = str_out + format_condition(axiom.condition)
# str_out = str_out + ")\n"
str_out=str_out+"\n"
for a in task.actions:
str_out=str_out + format_action(a,task)
str_out=str_out + ")"
return str_out
def format_action(a,task):
str_out=""
str_out=str_out + " (:action " + a.name +"\n"
str_out=str_out + " :parameters (" + " ".join(map(str, a.parameters)).replace(":"," -") + ")\n"
str_out=str_out + " :precondition " + format_condition(a.precondition) +"\n"
str_out=str_out + " :effect (and "
for item in a.effects:
str_out = str_out + format_effect(item)
str_out = str_out + "))\n\n"
return str_out
def format_condition(c):
str_out=""
if isinstance(c,list):
for item in c:
str_out = str_out + format_condition(item) +" "
if isinstance(c,pddl.conditions.Conjunction):
str_out=str_out+"(and "
for item in c.parts:
str_out=str_out+format_condition(item)
str_out=str_out+")"
if isinstance(c,pddl.conditions.Disjunction):
str_out=str_out+"(or "
for item in c.parts:
str_out=str_out+format_condition(item)
str_out=str_out+")"
if isinstance(c,pddl.conditions.UniversalCondition):
str_out = str_out + "(forall ("
if isinstance(c,pddl.conditions.ExistentialCondition):
str_out = str_out + "(exists ("
if isinstance(c,pddl.conditions.UniversalCondition) or isinstance(c,pddl.conditions.ExistentialCondition):
for p in c.parameters:
str_out = str_out + p.name + " - " + p.type_name +" "
str_out = str_out + ")" + format_condition(list(c.parts)) + ")"
if isinstance(c,pddl.conditions.NegatedAtom):
str_out=str_out+"(not ("+str(c.predicate)+" "+" ".join(map(str, c.args))+"))"
if isinstance(c,pddl.conditions.Atom):
str_out=str_out+"("+str(c.predicate)+" "+" ".join(map(str, c.args))+")"
return str_out
def format_effect(e):
str_out=""
if isinstance(e,pddl.effects.Effect):
if e.parameters:
str_out=str_out+"(forall (" + " ".join(map(str, e.parameters)).replace(":"," - ")+")"
if e.condition != pddl.conditions.Truth():
str_out=str_out+"(when "+format_condition(e.condition)
if e.literal.negated:
str_out=str_out+"(not ("+str(e.literal.predicate)+" "+" ".join(map(str, e.literal.args))+"))"
else:
str_out=str_out+"("+str(e.literal.predicate)+" "+" ".join(map(str, e.literal.args))+")"
if e.condition != pddl.conditions.Truth():
str_out=str_out+")"
if e.parameters:
str_out=str_out+")"
return str_out
| 4,449 | 30.560284 | 123 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/tim.py
|
#! /usr/bin/env python
import pddl_parser
import sys
import itertools
def build_property_map(task):
properties = dict()
counter = 0
for predicate in task.predicates:
#if predicate.name != "=":
for i in range(1, len(predicate.arguments)+1):
key = "{}_{}".format(predicate.name, i)
properties[key] = counter
counter += 1
return properties
def PRS_to_string(PRS, inv_properties):
precs = list()
del_precs = list()
adds = list()
for i in range(len(PRS[0])):
if PRS[0][i] > 0:
precs.extend([inv_properties[i]*PRS[0][i]])
for i in range(len(PRS[1])):
if PRS[1][i] > 0:
del_precs.extend([inv_properties[i] * PRS[1][i]])
for i in range(len(PRS[2])):
if PRS[2][i] > 0:
adds.extend([inv_properties[i] * PRS[2][i]])
str = "\tprecs: \t{}\n".format(", ".join(precs))
str += "\tdel_precs: \t{}\n".format(", ".join(del_precs))
str += "\tadds: \t{}".format(", ".join(adds))
return str
def transition_rule_to_string(T, inv_properties):
E = list()
S = list()
F = list()
for i in range(len(T[0])):
if T[0][i] > 0:
E.extend([inv_properties[i] * T[0][i]])
if len(E) == 0:
E.append("null")
for i in range(len(T[1])):
if T[1][i] > 0:
S.extend([inv_properties[i] * T[1][i]])
if len(S) == 0:
S.append("null")
for i in range(len(T[2])):
if T[2][i] > 0:
F.extend([inv_properties[i] * T[2][i]])
if len(F) == 0:
F.append("null")
str = "{} => {} -> {}".format(", ".join(E), ", ".join(S), ", ".join(F))
return str
def property_set_to_string(property_set, inv_properties):
return "{{{}}}".format(", ".join([inv_properties[i] for i in range(len(property_set)) if property_set[i] == 1]))
def property_space_to_string(space, inv_properties):
str = "{}\t%%%\t{}\t%%%\t{}\t%%%\t{}".format(property_set_to_string(space['property_set'], inv_properties),
" | ".join([transition_rule_to_string(t, inv_properties) for t in space['transition_rules']]),
", ".join(space['objects']),
" | ".join([property_set_to_string(state, inv_properties) for state in space['states']] ))
return str
def extend_attribute_space(space, property_spaces):
new_objects = set()
space['marked'] = True
for t in space['transition_rules']:
if not any(t[1]):
enablers = [i for i in range(len(t[0])) if t[0][i] > 0]
for p in enablers:
for s in property_spaces:
if s['property_set'][p] > 0:
if s['attribute_space'] and not s['marked']:
new_objects.update(extend_attribute_space(s, property_spaces))
else:
new_objects.update(s['objects'])
break
space['objects'].update(new_objects)
return new_objects
def find_types_for_property(property, patterns):
types = list()
for j in range(len(patterns.keys())):
if property in patterns[patterns.keys()[j]]['properties']:
types.append("T{}".format(j))
return types
def is_superset_state(s, other):
return ([k & l for k,l in zip(s, other)] == other) and any([k - l for k,l in zip(s, other)])
def build_predicate_arity_map(fd_task):
predicate_arity_map = dict()
for pred in fd_task.predicates:
pred_arity = len(pred.arguments)
predicate_arity_map[pred.name] = pred_arity
return predicate_arity_map
def construct_identity_invariant(p, predicate_arity_map, property_type_map, patterns, num_invariants):
predicate_name = p[:-2]
position = int(p[-1])
predicate_arity = predicate_arity_map[predicate_name]
positions = [i for i in range(1, predicate_arity+1)]
positions.remove(position)
if predicate_arity > 1:
num_invariants += 1
str = "FORALL ?x:{}. ".format(" U ".join(find_types_for_property(p, patterns)))
str2 = "(:derived (invariant-{})\n\t(forall".format(num_invariants)
extra_variables = 2 * (predicate_arity - 1)
for i in range(extra_variables):
str += "".join(["FORALL ?y{}:{}.".format(i + 1, " U ".join(
find_types_for_property("{}_{}".format(predicate_name, positions[j]), patterns))) for j in
range(len(positions))])
# str2 += " ".join("?y{}".format(i+1) for i in range(extra_variables)) + " - object)\n\t\t(not (and "
main_type = property_type_map[p]
params = ["" for x in range(predicate_arity)]
params[position - 1] = "?x"
arguments = list()
arguments.append("?x - {}".format(main_type))
parts = list()
for i in range(2):
for j in range(len(positions)):
params[positions[j]-1] = "?y{}".format((i+1)*(j+1))
arguments.append("?y{} - {}".format((i + 1) * (j + 1), property_type_map["{}_{}".format(predicate_name, positions[j])]))
str += "".join([" ({} {})".format(predicate_name, " ".join(params))])
# str2 += "".join([" ({} {})".format(predicate_name, " ".join(params))])
parts.append("".join([" ({} {})".format(predicate_name, " ".join(params))]))
if i < 1:
str += " AND "
str += " -> y1 = y2"
str2 += "({})\n".format(" ".join(arguments))
str2 += "\t\t(not (and {}".format(" ".join(parts))
str2 += " (not (= ?y1 ?y2)) ))))"
# print(str)
print(str2)
return num_invariants
def construct_state_membership_invariant(space, fd_task, patterns, inv_properties):
associated_types = set()
for o in space['objects']:
for i in range(len(patterns.keys())):
if o in patterns[patterns.keys()[i]]['objects']:
associated_types.add("T{}".format(i))
if len(space['states']) < 2:
return
parts = list()
for state in space['states']:
properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
parts.append("("+" AND ".join(properties)+")")
print("FORALL x:{}. {}".format(" U ".join(associated_types), " OR ".join(parts)))
def construct_uniqueness_invariant(space, fd_task, patterns, inv_properties):
associated_types = set()
for o in space['objects']:
for i in range(len(patterns.keys())):
if o in patterns[patterns.keys()[i]]['objects']:
associated_types.add("T{}".format(i))
if len(space['states']) == 1:
state = space['states'][0]
properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
print("FORALL x:{}. {}".format(" U ".join(associated_types), "NOT ({})".format(" AND ".join(properties))))
else:
parts = list()
for state in space['states']:
properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
parts.append("("+" AND ".join(properties)+")")
print("FORALL x:{}. {}".format(" U ".join(associated_types), "NOT ({})".format(" AND ".join(parts))))
def construct_binary_mutexes(space, predicate_arity_map, patterns, inv_properties):
associated_types = set()
for o in space['objects']:
for i in range(len(patterns.keys())):
if o in patterns[patterns.keys()[i]]['objects']:
associated_types.add("T{}".format(i))
exclusive_states = list()
for s1 in space['states']:
is_subset = False
for s2 in space['states']:
is_subset = is_subset | is_superset_state(s2, s1)
if not is_subset:
exclusive_states.append(s1)
if len(exclusive_states) == 1:
state = exclusive_states[0]
properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
# print("FORALL x:{}. {}".format(" U ".join(associated_types), "NOT ({})".format(" AND ".join(properties))))
else:
binary_mutexes = itertools.combinations(exclusive_states, 2)
for mutex in binary_mutexes:
parts = list()
mutex_parts = list()
str = ""
extra_variables = 0
for state in mutex:
properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
parts.append("(" + " AND ".join(properties) + ")")
state_parts = list()
for property in properties:
predicate = property[:-2]
pos = int(property[-1])
predicate_arity = predicate_arity_map[predicate]
params = [None for _ in range(predicate_arity)]
for i in range(1, predicate_arity + 1):
if i == pos:
params[i-1] = "?x"
else:
extra_variables += 1
params[i-1] = "?y{}".format(extra_variables)
state_parts.append("({} {})".format(predicate, " ".join(params)))
mutex_parts.append("(and {})".format(" ".join(state_parts)))
str += "(:derived (invariant-#)\n\t(forall (?x {})\n".format(" ".join(["?y{}".format(j+1) for j in range(extra_variables)]))
str += "\t\t(not (and {}))))".format(" ".join(mutex_parts))
print("FORALL x:{}. {}".format(" U ".join(associated_types), "NOT ({})".format(" AND ".join(parts))))
print(str)
def build_property_type_map(fd_task):
property_type_map = dict()
for pred in fd_task.predicates:
count = 0
for arg in pred.arguments:
count += 1
property = "{}_{}".format(pred.name, count)
property_type_map[property] = arg.type_name
return property_type_map
def get_common_subtype(type1, type2, fd_task):
if type1 == type2:
return type1
else:
for d_type in fd_task.types:
if d_type.name == type1:
d_type1 = d_type
elif d_type.name == type2:
d_type2 = d_type
if d_type1.name in d_type2.supertype_names:
return d_type2.name
else:
return d_type1.name
def construct_binary_predicate_mutexes(space, predicate_arity_map, property_type_map, inv_properties, num_invariants, fd_task):
valid_states = list()
space_properties = set()
for s in space['states']:
state_properties = [inv_properties[i] for i in range(len(s)) if s[i] == 1]
valid_state = set(state_properties)
space_properties.update(state_properties)
if valid_state not in valid_states:
valid_states.append(valid_state)
mutexes = set()
for comb in itertools.combinations(space_properties, 2):
if not any([set(comb).issubset(s) for s in valid_states]):
mutexes.add(comb)
for mutex in mutexes:
prop1 = mutex[0]
prop2 = mutex[1]
main_type_1 = property_type_map[prop1]
main_type_2 = property_type_map[prop2]
main_type = get_common_subtype(main_type_1, main_type_2, fd_task)
extra_variables = 0
arguments = list()
parts1 = [None for _ in range(predicate_arity_map[prop1[:-2]]+1)]
parts1[0] = prop1[:-2]
for i in range(1, predicate_arity_map[prop1[:-2]] + 1):
if i == int(prop1[-1]):
arguments.append("{} - {}".format("?x", main_type))
parts1[i] = "?x"
else:
extra_variables += 1
parts1[i] = "?y{}".format(extra_variables)
prop = prop1[:-1] + str(i)
arguments.append("{} - {}".format("?y{}".format(extra_variables), property_type_map[prop]))
parts2 = [None for _ in range(predicate_arity_map[prop2[:-2]]+1)]
parts2[0] = prop2[:-2]
for i in range(1, predicate_arity_map[prop2[:-2]] + 1):
if i == int(prop2[-1]):
parts2[i] = "?x"
else:
extra_variables += 1
parts2[i] = "?y{}".format(extra_variables)
prop = prop2[:-1] + str(i)
arguments.append("{} - {}".format("?y{}".format(extra_variables), property_type_map[prop]))
num_invariants += 1
invariant = "(:derived (invariant-{})\n\t(forall ({})\n".format(num_invariants, " ".join(arguments))
invariant += "\t\t(not (and {}))))".format(" ".join(["({})".format(" ".join(parts1))] + ["({})".format(" ".join(parts2))]))
print(invariant)
return num_invariants
# for mutex in binary_mutexes:
# parts = list()
# mutex_parts = list()
# str = ""
# extra_variables = 0
# for state in mutex:
# properties = [inv_properties[i] for i in range(len(state)) if state[i] == 1]
# parts.append("(" + " AND ".join(properties) + ")")
# state_parts = list()
#
# for property in properties:
# predicate = property[:-2]
# pos = int(property[-1])
# predicate_arity = predicate_arity_map[predicate]
# params = [None for _ in range(predicate_arity)]
# for i in range(1, predicate_arity + 1):
# if i == pos:
# params[i-1] = "?x"
# else:
# extra_variables += 1
# params[i-1] = "?y{}".format(extra_variables)
# state_parts.append("({} {})".format(predicate, " ".join(params)))
# mutex_parts.append("(and {})".format(" ".join(state_parts)))
# str += "(:derived (invariant-#)\n\t(forall (?x {})\n".format(" ".join(["?y{}".format(j+1) for j in range(extra_variables)]))
# str += "\t\t(not (and {}))))".format(" ".join(mutex_parts))
#
# print("FORALL x:{}. {}".format(" U ".join(associated_types), "NOT ({})".format(" AND ".join(parts))))
# print(str)
def run_limited_instantiation(domain, problem, N=2):
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
print("=== Limited Instantiation with N={} (Rintanen 2017)".format(N))
for d_type in fd_task.types:
type_name = d_type.name
prms_a_list = list()
for action in fd_task.actions:
prms_a = 0
for parameter in action.parameters:
if parameter.type_name == type_name:
prms_a += 1
prms_a_list.append(prms_a)
prms_p_list = list()
for predicate in fd_task.predicates:
if predicate.name == '=':
continue
prms_p = 0
for argument in predicate.arguments:
if argument.type_name == type_name:
prms_p += 1
prms_p_list.append(prms_p)
max_prms_a = max(prms_a_list)
max_prms_p = max(prms_p_list)
L = max(max_prms_a, max_prms_p) + (N-1)*max_prms_p
print("For type <{}>: {} objects".format(type_name, L))
pass
def run_TIM(domain, problem):
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
properties = build_property_map(fd_task)
inv_properties = {v: k for k, v in properties.iteritems()}
num_properties = len(properties)
predicate_arity_map = build_predicate_arity_map(fd_task)
### Construct base PRSs (Section 2.3)
provisional_Ps = list()
for action in fd_task.actions:
type_list = [p.name for p in action.parameters]
prs_count = 0
for type in type_list:
prs_count = prs_count +1
precs = [0 for i in range(num_properties)]
del_precs = [0 for i in range(num_properties)]
adds = [0 for i in range(num_properties)]
if action.precondition.parts:
for pre in action.precondition.parts:
if type in pre.args:
index = pre.args.index(type) + 1
property = "{}_{}".format(pre.predicate, index)
precs[properties[property]] += 1
else:
if type in action.precondition.args:
index = action.precondition.args.index(type) + 1
property = "{}_{}".format(action.precondition.predicate, index)
precs[properties[property]] += 1
for eff in action.effects:
literal = eff.literal
if type in literal.args:
index = literal.args.index(type) + 1
property = "{}_{}".format(literal.predicate, index)
if not literal.negated:
adds[properties[property]] += 1
else:
del_precs[properties[property]] += 1
P = (precs, del_precs, adds)
provisional_Ps.append(P)
### Second PRS phase
Ps = list()
for P in provisional_Ps:
exchanged_properties = [k & l for k,l in zip(P[1], P[2])]
if any(exchanged_properties):
exchanged_properties_indexes = [i for i in range(len(exchanged_properties)) if exchanged_properties[i] == 1]
# PRS for the exchanged properties
for i in exchanged_properties_indexes:
mask = [0 for x in range(num_properties)]
mask[i] = 1
new_P = (P[0], mask, mask)
Ps.append(new_P)
P[1][i] -= 1
P[2][i] -= 1
Ps.append(P)
else:
Ps.append(P)
print("=== PRS List ===")
for i in range(1, len(Ps)+1):
print("PRS {}:\n{}".format(i,PRS_to_string(Ps[i-1], inv_properties)))
### Construct transition rules (Section 2.3)
Ts = list()
for P in Ps:
if not any(P[1]):
# Increasing attribute transition rule
for i in range(len(P[2])):
if P[2][i] > 0:
E = [k-l for k,l in zip(P[0],P[1])]
S = [0 for _ in P[0]]
F = [0 for _ in P[0]]
F[i] = 1
T = (E, S, F)
Ts.append(T)
elif not any(P[2]):
# Decreasing attribute transition rule
for i in range(len(P[1])):
if P[1][i] > 0:
E = [k - l for k, l in zip(P[0], P[1])]
S = [0 for _ in P[0]]
F = [0 for _ in P[0]]
S[i] = 1
T = (E, S, F)
Ts.append(T)
elif not any(P[0]):
# Last special case
for i in range(len(P[2])):
if P[2][i] > 0:
E = [0 for _ in P[0]]
S = [0 for _ in P[0]]
F = [0 for _ in P[0]]
F[i] = 1
T = (E, S, F)
Ts.append(T)
else:
E = [k-l for k,l in zip(P[0],P[1])]
T = (E, P[1], P[2])
Ts.append(T)
# Ts = list()
# for P in Ps:
# E = [k-l for k,l in zip(P[0],P[1])]
# T = (E, P[1], P[2])
# Ts.append(T)
print("=== Transition rules ===")
for i in range(1, len(Ts)+1):
print("TR {}:\n\t{}".format(i, transition_rule_to_string(Ts[i-1], inv_properties)))
### Seed property and attribute spaces (Section 2.3)
# Build united sets of properties
property_sets = set()
for T in Ts:
aux = [k | l for k, l in zip(T[1], T[2])]
for T2 in Ts:
aux2 = [k | l for k, l in zip(T2[1], T2[2])]
if any([k & l for k, l in zip(aux, aux2)]):
aux = [k | l for k, l in zip(aux, aux2)]
if any(aux):
property_sets.add(tuple(aux))
property_sets = list(property_sets)
# Initialize property and attribute spaces
property_spaces = list()
for i in range(len(property_sets)):
property_spaces.append({"property_set":property_sets[i], "transition_rules": [], "states": [], "objects": set(), "attribute_space": False})
### Assign transition rules (Section 2.4)
for property_space in property_spaces:
associated_rules = list()
for T in Ts:
if any([k & l for k,l in zip(property_space["property_set"], T[1])]) or any([k & l for k,l in zip(property_space["property_set"], T[2])]):
associated_rules.append(T)
if not any(T[1]) or not any(T[2]):
property_space['attribute_space'] = True
property_space['marked'] = False
property_space["transition_rules"] = associated_rules
# Analyse initial state (Section 2.4)
for obj in fd_task.objects:
obj_properties = [0 for x in range(num_properties)]
for atom in fd_task.init:
# if atom.predicate != "=" and obj.name in atom.args:
if obj.name in atom.args:
index = atom.args.index(obj.name) + 1
property = "{}_{}".format(atom.predicate, index)
obj_properties[properties[property]] = 1
for property_space in property_spaces:
b = [k & l for k,l in zip(property_space['property_set'], obj_properties)]
if any(b):
property_space['objects'].add(obj.name)
if not property_space['attribute_space'] and b not in property_space['states']:
property_space['states'].append(b)
### Extend property spaces (Section 2.4)
for p in property_spaces:
if not p['attribute_space']:
newgen = list()
for s in p['states']:
for t in p['transition_rules']:
aux = [k & l for k,l in zip(s, t[1])]
if aux == t[1]:
new_s = [k^l for k,l in zip(s,aux)]
new_s = [k | l for k,l in zip(new_s, t[2])]
#REVISAR SUPERSETS
for s2 in newgen:
if is_superset_state(new_s, s2):
p['attribute_space'] = True
if new_s not in p['states'] and new_s not in newgen:
newgen.append(new_s)
if not p['attribute_space']:
p['states'].extend(newgen)
### Extend attribute spaces (Section 2.4)
changes = True
while changes:
changes = False
for space in property_spaces:
if space['attribute_space'] and not space['marked']:
added_objects = extend_attribute_space(space, property_spaces)
if len(added_objects) > 0:
changes = True
print("=== Property spaces ===")
for i in range(1, len(property_spaces)+1) :
print("PS {}:\n\t{}".format(i, property_space_to_string(property_spaces[i-1], inv_properties)))
### Identify types (Section 2.6)
patterns = dict()
for o in fd_task.objects:
object_name = o.name
pattern = [0 for x in range(len(property_spaces))]
object_properties = set()
for i in range(len(pattern)):
if object_name in property_spaces[i]['objects']:
pattern[i] = 1
object_properties.update([inv_properties[j] for j in range(len(property_spaces[i]['property_set'])) if property_spaces[i]['property_set'][j] == 1 ])
pattern = tuple(pattern)
aux = patterns.get(pattern, {'objects': list(), 'properties': set()})
aux['objects'].append(object_name)
aux['properties'].update(object_properties)
patterns[pattern] = aux
print("=== Types ===")
for i in range(len(patterns)):
print("Type {}: {}".format(i, ", ".join(patterns[patterns.keys()[i]]['objects'])))
property_type_map = build_property_type_map(fd_task)
num_invariants = 0
print("=== Invariants ===")
### Construct invariants (Section 2.7)
for space in property_spaces:
if not space['attribute_space']:
property_set = [inv_properties[i] for i in range(len(space['property_set'])) if space['property_set'][i] == 1]
for p in property_set:
num_invariants = construct_identity_invariant(p, predicate_arity_map, property_type_map, patterns, num_invariants)
# construct_state_membership_invariant(space, fd_task, patterns, inv_properties)
# construct_uniqueness_invariant(space, fd_task, patterns, inv_properties)
# construct_binary_mutexes(space, predicate_arity_map, patterns, inv_properties)
num_invariants = construct_binary_predicate_mutexes(space, predicate_arity_map, property_type_map, inv_properties, num_invariants, fd_task)
# ### Sub-space analysis (Section 2.7.1)
# property_subspaces = list()
# for space in property_spaces:
# space_types = dict()
# for space_object in space['objects']:
# for k in patterns.keys():
# if space_object in patterns[k]['objects']:
# aux = space_types.get(k,set())
# aux.add(space_object)
# space_types[k] = aux
# if len(space_types.keys()) > 1:
# for k in space_types.keys():
# new_subspace = {"property_set":space['property_set'], "transition_rules": [], "states": [], "objects": space_types[k], "attribute_space": False}
# for t in space['transition_rules']:
# rule_enablers = set([inv_properties[i] for i in range(len(t[0])) if t[0][i] > 0])
# if rule_enablers.intersection(patterns[k]['properties']) == rule_enablers or len(rule_enablers) == 0:
# new_subspace["transition_rules"].append(t)
# if not any(t[1]) or not any(t[2]):
# property_space['attribute_space'] = True
# property_space['marked'] = False
# property_subspaces.append(new_subspace)
#
#
# # Analyse initial state for subspaces
# for obj in fd_task.objects:
# obj_properties = [0 for x in range(num_properties)]
# for atom in fd_task.init:
# # if atom.predicate != "=" and obj.name in atom.args:
# if obj.name in atom.args:
# index = atom.args.index(obj.name) + 1
# property = "{}_{}".format(atom.predicate, index)
# obj_properties[properties[property]] = 1
#
# for property_space in property_subspaces:
# b = [k & l for k, l in zip(property_space['property_set'], obj_properties)]
# if any(b):
# property_space['objects'].add(obj.name)
# if not property_space['attribute_space'] and b not in property_space['states']:
# property_space['states'].append(b)
#
# ### Extend property subspaces (Section 2.4)
# for p in property_subspaces:
# if not p['attribute_space']:
# newgen = list()
# for s in p['states']:
# for t in p['transition_rules']:
# aux = [k & l for k, l in zip(s, t[1])]
# if aux == t[1]:
# new_s = [k ^ l for k, l in zip(s, aux)]
# new_s = [k | l for k, l in zip(new_s, t[2])]
# # REVISAR SUPERSETS
# for s2 in newgen:
# if is_superset_state(new_s, s2):
# p['attribute_space'] = True
# if new_s not in p['states'] and new_s not in newgen:
# newgen.append(new_s)
# if not p['attribute_space']:
# p['states'].extend(newgen)
#
# print("=== Property sub-spaces ===")
# for i in range(1, len(property_subspaces) + 1):
# print("PS {}:\n\t{}".format(i, property_space_to_string(property_subspaces[i - 1], inv_properties)))
#
#
# print("=== Sub-space Invariants ===")
# ### Construct invariants (Section 2.7)
# for space in property_subspaces:
# if not space['attribute_space']:
# property_set = [inv_properties[i] for i in range(len(space['property_set'])) if
# space['property_set'][i] == 1]
# for p in property_set:
# num_invariants = construct_identity_invariant(p, predicate_arity_map, property_type_map, patterns,
# num_invariants)
# construct_state_membership_invariant(space, fd_task, patterns, inv_properties)
# construct_uniqueness_invariant(space, fd_task, patterns, inv_properties)
# # construct_binary_mutexes(space, predicate_arity_map, patterns, inv_properties)
if __name__ == "__main__":
try:
domain = sys.argv[1]
problem = sys.argv[2]
except:
print "Usage:"
print sys.argv[0] + " <domain> <problem>"
sys.exit(-1)
run_limited_instantiation(domain, problem)
run_TIM(domain, problem)
| 29,768 | 38.221344 | 164 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/compiler2.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
def get_all_types(task, itype):
output=[itype]
# for i in task.types:
# if itype in i.name:
# if i.basetype_name!="object":
# output = output + [str(i.basetype_name)]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def get_max_steps_from_plans(ps):
iout = 0
for plan in ps:
iout = max(iout, len(plan))
return iout
def get_max_vars_from_plans(ps):
iout = 0
for plan in ps:
for a in plan:
iout = max(iout, len(a.split(" ")) - 1)
return iout
def get_action_schema_from_plans(ps, task):
known_actions = [a.name for a in task.actions]
schemas = []
for plan in ps:
for a in plan:
counter = 0
name = a.replace("(", "").replace(")", "").split(" ")[0]
item = [name]
for p in a.replace("(", "").replace(")", "").split(" ")[1:]:
for o in task.objects:
if p.upper() == o.name.upper():
item.append(str(o.type_name))
counter = counter + 1
break
if item not in schemas:
schemas.insert(0, item)
return [x for x in schemas if x[0] not in known_actions], [x for x in schemas if x[0] in known_actions]
def get_predicates_schema_from_plans(task):
preds = []
for p in task.predicates:
item = []
if p.name == "=":
continue
item.append(p.name)
for a in p.arguments:
item.append(a.type_name)
preds = preds + [item]
return preds
def get_static_predicates(tasks, predicates):
candidates = set([p[0] for p in predicates])
for task in tasks:
task_candidates = set()
for predicate in candidates:
init_predicates = set([p for p in task.init if p.predicate == predicate])
goal_predicates = set([p for p in task.goal.parts if p.predicate == predicate and p.negated == False])
if init_predicates == goal_predicates:
task_candidates.add(predicate)
candidates = candidates.intersection(task_candidates)
reflexive_static_predicates = dict()
for candidate in candidates:
reflexive_static_predicates[candidate] = True
for task in tasks:
init_predicates = set([p for p in task.init if p.predicate == candidate])
for predicate in init_predicates:
if len(predicate.args) == 1 or len(set(predicate.args)) != 1:
reflexive_static_predicates[candidate] = False
break
return [p for p in predicates if p[0] in candidates], reflexive_static_predicates
def get_static_precondition(predicate, action, plans, tasks):
static_preconditions = set()
params = [pddl.pddl_types.TypedObject("?o" + str(i), action[i]) for i in range(1, len(action))]
params = [x for x in params if x.type_name in predicate[1:]]
num_predicate_params = len(predicate[1:])
possible_param_tuples = list(itertools.combinations(params, num_predicate_params))
for t in possible_param_tuples:
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in t]))
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(t)]))
if len([x for x in action[1:] if x in predicate[1:]]) >= num_predicate_params:
all_instances = set()
for task in tasks:
all_instances.update([p.args for p in task.init if p.predicate == predicate[0]])
all_variables = set(sum(all_instances, ()))
for a in [item for sublist in plans for item in sublist]:
a = a.replace('(','').replace(')','').split(" ")
if a[0] == action[0]:
variables = [x for x in a[1:] if x in all_variables]
possible_tuples = list(itertools.combinations(variables, num_predicate_params))
static_preconditions_candidates = set()
for i in range(len(possible_tuples)):
if possible_tuples[i] in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in possible_param_tuples[i]]))
elif tuple(reversed(possible_tuples[i])) in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(possible_param_tuples[i])]))
static_preconditions = static_preconditions.intersection(static_preconditions_candidates)
return list(static_preconditions)
def possible_pred_for_action(task, p, a, tup):
if (len(p) > len(a)):
return False
action_types = [set([a[int(tup[i])]]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x)) for x in p[1:]]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
return all(fits)
def is_binary_mutex(axiom):
return isinstance(axiom.condition, pddl.UniversalCondition) and isinstance(axiom.condition.parts[0],
pddl.Disjunction) and len(axiom.condition.parts[0].parts) == 2 and isinstance(
axiom.condition.parts[0].parts[0], pddl.NegatedAtom) and isinstance(axiom.condition.parts[0].parts[1],
pddl.NegatedAtom)
def get_binary_mutexes(fd_task):
binary_mutexes = dict()
for axiom in fd_task.axioms:
if is_binary_mutex(axiom):
part1 = axiom.condition.parts[0].parts[0]
part2 = axiom.condition.parts[0].parts[1]
args1 = part1.args
args2 = part2.args
arity1 = len(args1)
arity2 = len(args2)
matchings = list()
if arity1 == 0:
matchings.extend([(-1,i) for i in range(arity2)])
elif arity2 == 0:
matchings.extend([(i, -1) for i in range(arity2)])
else:
for i in range(arity1):
for j in range(arity2):
if args1[i] == args2[j]:
matchings.append((i,j))
# print(part1, part2)
# print(matchings)
for tup in itertools.product(range(1, MAX_VARS+1), repeat=max(arity1, arity2)):
vars = ["var" + str(t) for t in tup]
# print(vars)
m1 = [vars[i] for i in range(arity1)]
for tup2 in itertools.product(vars, repeat=arity2):
m2 = [t for t in tup2]
# print(m1, m2)
match_all = True
for matching in matchings:
if matching[0] == -1 or matching[1] == -1:
continue
else:
match_all = match_all & (m1[matching[0]] == m2[matching[1]])
if match_all:
key = tuple([part1.predicate] + m1)
mutex = tuple([part2.predicate] + m2)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
key = tuple([part2.predicate] + m2)
mutex = tuple([part1.predicate] + m1)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
# print(key, mutex)
return binary_mutexes
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
check_static_predicates = True
sys.argv.remove("-s")
else:
check_static_predicates = False
if "-i" in sys.argv:
program_with_invariants = True
sys.argv.remove("-i")
else:
program_with_invariants = False
if "-r" in sys.argv:
reversible_domain = True
sys.argv.remove("-r")
else:
reversible_domain = False
domain_folder_name = sys.argv[1]
domain_file = sys.argv[2]
problems_prefix_filename = sys.argv[3]
plans_prefix_filename = sys.argv[4]
input_level = int(sys.argv[5])
except:
print "Usage:"
print sys.argv[0] + "[-s] [-i] [-r] <domain> <domain filename> <problems prefix> <plans prefix> <input level (0 plans, 1 steps, 2 len(plan), 3 minimum)>"
sys.exit(-1)
# Reading the example plans
plans = []
i = 0
for filename in sorted(glob.glob(domain_folder_name + "/" + plans_prefix_filename + "*")):
plans.append([])
lcounter = 0
file = open(filename, 'r')
for line in file:
if input_level != config.INPUT_STEPS or (input_level == config.INPUT_STEPS and lcounter % 3 != 0):
plans[i].append(line.replace("\n", "").split(": ")[1])
lcounter = lcounter + 1
file.close()
i = i + 1
# Creating a FD task with the domain and the first problem file
domain_filename = "{}{}.pddl".format(domain_folder_name, domain_file)
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problems = []
fd_tasks = []
counter = 0
for problem_filename in sorted(glob.glob(domain_folder_name + "/" + problems_prefix_filename + "*")):
fd_problems = fd_problems + [pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)]
fd_tasks = fd_tasks + [pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[counter])]
counter = counter + 1
fd_task = copy.deepcopy(fd_tasks[0])
known_action_models = [action for action in fd_task.actions]
MAX_STEPS = get_max_steps_from_plans(plans)
MAX_VARS = get_max_vars_from_plans(plans)
new_actions, known_actions = get_action_schema_from_plans(plans, fd_task)
actions = new_actions + known_actions
predicates = get_predicates_schema_from_plans(fd_task)
static_predicates, reflexive_static_predicates = get_static_predicates(fd_tasks, predicates)
# binary_mutexes = get_binary_mutexes(fd_task)
# Compilation Problem
init_aux = copy.deepcopy(fd_task.init)
fd_task.init = []
fd_task.init.append(pddl.conditions.Atom("modeProg", []))
allpres = []
for a in new_actions: # All possible preconditions are initially programmed
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
# Add all preconditions to the initial model
# fd_task.init.append(
# pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
allpres = allpres + [str("pre_" + "_".join([p[0]] + [a[0]] + vars))]
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i), "i" + str(i + 1)]))
goals = []
for i in range(0, len(plans) + 1):
goals = goals + [pddl.conditions.Atom("test" + str(i), [""])]
fd_task.goal = pddl.conditions.Conjunction(goals)
# Compilation Domain
if input_level <= config.INPUT_LENPLAN:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 2):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
fd_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
for i in range(0, len(plans) + 1):
fd_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if input_level <= config.INPUT_LENPLAN:
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
# for axiom in fd_task.axioms:
# fd_task.predicates.append(pddl.predicates.Predicate(axiom.name, []))
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
fd_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("del_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p[0]] + [a[0]] + vars), []))
if input_level <= config.INPUT_STEPS:
for a in actions:
fd_task.predicates.append(pddl.predicates.Predicate("plan-" + a[0],
[pddl.pddl_types.TypedObject("?i", "step")] + [
pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i
in range(1, len(a))]))
learned_static_preconditions = dict()
# Original domain actions
# old_actions = copy.deepcopy(actions)
for a in actions:
pre = list()
eff = list()
is_known_action = False
# Add derived predicates
# pre.extend([invariant.condition for invariant in fd_task.axioms])
if a in known_actions:
is_known_action = True
for action in fd_task.actions:
if action.name == a[0]:
if isinstance(action.precondition, pddl.conditions.Atom):
pre.append(action.precondition)
else:
pre.extend([x for x in action.precondition.parts])
eff = action.effects
fd_task.actions.remove(action)
break
params = [pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i in range(1, len(a))]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
params = params + [pddl.pddl_types.TypedObject("?i1", "step")]
params = params + [pddl.pddl_types.TypedObject("?i2", "step")]
if check_static_predicates and input_level <= config.INPUT_STEPS:
for static_predicate in static_predicates:
static_preconditions = get_static_precondition(static_predicate, a, plans, fd_tasks)
learned_static_preconditions[a[0]] = list()
for static_precondition in static_preconditions:
pre.append(static_precondition)
learned_static_preconditions[a[0]].append(static_precondition)
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
if input_level <= config.INPUT_PLANS and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("plan-" + a[0], ["?i1"] + ["?o" + str(i) for i in range(1, len(a))])]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("current", ["?i1"])]
pre = pre + [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
disjunction = pddl.conditions.Disjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])] + [
pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup])])
pre = pre + [disjunction]
if input_level < config.INPUT_STEPS:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif input_level < config.INPUT_MINIMUM:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p[0], ["?o" + str(t) for t in tup]))]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup]))]
fd_task.actions.append(pddl.actions.Action(a[0], params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action schema
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
a_vars = ["var" + str(v) for v in var_ids]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
# Program precondition
vars = ["var" + str(t) for t in tup]
params = []
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
if program_with_invariants:
key = tuple([p[0]] + vars)
for mutex in binary_mutexes.get(key, set()):
if set(mutex[1:]).issubset(set(a_vars)):
pre = pre + [
pddl.conditions.NegatedAtom("pre_" + "_".join([mutex[0]] + [a[0]] + [e for e in mutex[1:]]),
[])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_pre_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if reversible_domain:
# Unprogram precondition
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_pre_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p in static_predicates and check_static_predicates:
continue
# Program add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
if program_with_invariants:
key = tuple([p[0]] + vars)
for mutex in binary_mutexes.get(key, set()):
if set(mutex[1:]).issubset(set(a_vars)):
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([mutex[0]] + [a[0]] + [e for e in mutex[1:]]),
[])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_add_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if reversible_domain:
# Unprogram add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_add_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Program del effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
if program_with_invariants:
key = tuple([p[0]] + vars)
for mutex in binary_mutexes.get(key, set()):
if set(mutex[1:]).issubset(set(a_vars)):
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([mutex[0]] + [a[0]] + [e for e in mutex[1:]]),
[])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_del_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Unprogram del effect
if reversible_domain:
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_del_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for validating the tests
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
#pre.extend([invariant.condition for invariant in fd_task.axioms])
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test0", []))]
for f in init_aux:
if f.predicate != "=":
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), f)]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for i in range(0, len(plans[0])):
action = plans[0][i]
name = action[1:-1].split(" ")[0]
params = action[1:-1].split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + name, ["i" + str(i + 1)] + params))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("modeProg", []))]
fd_task.actions.append(pddl.actions.Action("validate_0", [], 0, pddl.conditions.Conjunction(pre), eff, 0))
for i in range(0, len(plans)):
pre = []
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
# pre.extend([invariant.condition for invariant in fd_task.axioms])
for j in range(0, len(plans) + 1):
if j < i + 1:
pre = pre + [pddl.conditions.Atom("test" + str(j), [])]
else:
pre = pre + [pddl.conditions.NegatedAtom("test" + str(j), [])]
if input_level <= config.INPUT_LENPLAN:
pre = pre + [pddl.conditions.Atom("current", ["i" + str(len(plans[i]) + 1)])]
for g in fd_tasks[i].goal.parts:
pre = pre + [g]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test" + str(i + 1), []))]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(len(plans[i]) + 1)]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for j in range(0, len(plans[i])):
name = "plan-" + plans[i][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(name, pars))]
if i < len(plans) - 1:
for j in range(0, len(plans[i + 1])):
name = "plan-" + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(name, pars))]
fd_task.actions.append(
pddl.actions.Action("validate_" + str(i + 1), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
# Writing the compilation output domain and problem
fdomain = open(config.OUTPUT_PATH+"/aux_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
fdomain = open(config.OUTPUT_PATH+"/aux_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fdomain.close()
# Solving the compilation
cmd = "rm " + config.OUTPUT_FILENAME +" "+ config.OUTPUT_PATH+"/planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " "+config.OUTPUT_PATH+"/aux_domain.pddl " + config.OUTPUT_PATH+"/aux_problem.pddl -F " + str(len(plans) + sum([len(p) for p in plans])) + " " + config.PLANNER_PARAMS + " > "+ config.OUTPUT_PATH+"/planner_out.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Reading the plan output by the compilation
pres = [[] for _ in xrange(len(new_actions))]
# pres = [[] for _ in xrange(len(new_actions))]
# pres = [ for p in pres]
dels = [[] for _ in xrange(len(new_actions))]
adds = [[] for _ in xrange(len(new_actions))]
file = open(config.OUTPUT_FILENAME, 'r')
for line in file:
keys = "(program_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
# allpres.remove(str("pre_" + pred[0] + "_" + action[0] + "_" + "_".join(map(str, pred[1:]))))
pres[indexa].append(pred)
keys = "(program_add_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
adds[indexa].append(pred)
keys = "(program_del_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
dels[indexa].append(pred)
keys = "(unprogram_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
# allpres.remove(str("pre_" + pred[0] + "_" + action[0] + "_" + "_".join(map(str, pred[1:]))))
pres[indexa].remove(pred)
keys = "(unprogram_add_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
adds[indexa].remove(pred)
keys = "(unprogram_del_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
dels[indexa].remove(pred)
file.close()
counter = 0
new_fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[0])
new_fd_task.actions = []
for action in new_actions:
params = ["?o" + str(i + 1) for i in range(0, len(action[1:]))]
ps = [pddl.pddl_types.TypedObject(params[i], action[i + 1]) for i in range(0, len(params))]
pre = []
if check_static_predicates:
pre += learned_static_preconditions.get(action[0], [])
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action[0], ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open(config.OUTPUT_PATH+"/learned_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, fd_domain))
fdomain.close()
sys.exit(0)
| 35,360 | 43.20125 | 347 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/FAMA_FSM.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
import numpy as np
def get_max_vars(actions):
max_vars = 0
for a in actions:
max_vars = max(max_vars, a.num_external_parameters)
return max_vars
def get_max_steps(traces):
traces_steps = list()
for trace in traces:
not_empty_states = len([state for state in trace.states if state != []])
not_empty_actions = len([action for action in trace.actions if action != []])
traces_steps.append(max(not_empty_states, not_empty_actions))
return sum(traces_steps), max(traces_steps)
def get_all_types(task, itype):
output=[itype]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def possible_pred_for_action(task, p, a, tup):
if (len(p.arguments) > len(a.parameters)):
return False
action_types = [set([a.parameters[int(tup[i])-1].type_name]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x.type_name)) for x in p.arguments]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
return all(fits)
def get_static_predicates(state_trajectories, predicates):
candidates = set([p.name for p in predicates])
for trajectory in state_trajectories:
trace_candidates = set()
for predicate in candidates:
static = True
init_literals = set([l for l in trajectory[0] if l.predicate == predicate])
for state in trajectory[1:]:
state_literals = set([l for l in state if l.predicate == predicate])
if init_literals != state_literals:
static = False
break
if static:
trace_candidates.add(predicate)
candidates = candidates.intersection(trace_candidates)
# reflexive_static_predicates = dict()
# for candidate in candidates:
# reflexive_static_predicates[candidate] = True
# for trace in traces:
# init_literals = set([l for l in trace.init if l.predicate == candidate])
# for literal in init_literals:
# if len(literal.args) == 1 or len(set(literal.args)) != 1:
# reflexive_static_predicates[candidate] = False
# break
return candidates
# return [p for p in predicates if p.name in candidates]
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
check_static_predicates = True
sys.argv.remove("-s")
else:
check_static_predicates = False
if "-v" in sys.argv:
index = sys.argv.index("-v")
learned_domain = sys.argv[index+1]
validation_mode = True
sys.argv.remove("-v")
sys.argv.remove(learned_domain)
else:
validation_mode = False
if "-f" in sys.argv:
finite_steps = True
sys.argv.remove("-f")
else:
finite_steps = False
if "-t" in sys.argv:
index = sys.argv.index("-t")
trace_prefix = sys.argv[index+1]
sys.argv.remove("-t")
sys.argv.remove(trace_prefix)
else:
trace_prefix = "trace"
if "-nt" in sys.argv:
index = sys.argv.index("-nt")
nottrace_prefix = sys.argv[index+1]
sys.argv.remove("-nt")
sys.argv.remove(nottrace_prefix)
else:
nottrace_prefix = ""
if "-l" in sys.argv:
index = sys.argv.index("-l")
trace_min = int(sys.argv[index+1])
trace_max = int(sys.argv[index+2])
sys.argv.remove("-l")
sys.argv.remove(sys.argv[index])
sys.argv.remove(sys.argv[index])
else:
trace_min = None
domain_folder_name = sys.argv[1]
action_observability = float(0)
state_observability = float(1)
if action_observability == 1 or state_observability == 1:
finite_steps = True
except:
print "Usage:"
print sys.argv[0] + "[-s] [-f] [-v learned_domain] <domain folder> <action observability (0-100)> <state observability (0-100)> -t trace_prefix -l input_limit"
sys.exit(-1)
# trace_filter = ["a", "b", "c", "d", "next", "head"]
# acceptor_state = "states4"
# pres_filter = []-v ../benchmarks/icaps19/au-v ../benchmarks/icaps19/automata/class1/domain.pddl -v ../benchmarks/icaps19/automata/class1/domain.pddl tomata/class1/domain.pddl
# adds_filter = ['states0', 'states1', 'states2', 'states3', 'states4']
# dels_filter = ['states0', 'states1', 'states2', 'states3', 'states4']
profile = open(domain_folder_name+'/profile', 'r')
lines = profile.readlines()
trace_filter = [s.strip().lower() for s in lines[0].strip().split(',')]
acceptor_state = lines[1].strip()
pres_filter = [s.strip().lower() for s in lines[2].strip().split(',')]
adds_filter = [s.strip().lower() for s in lines[3].strip().split(',')]
dels_filter = [s.strip().lower() for s in lines[4].strip().split(',')]
profile.close()
not_acceptor_states = ['states0', 'states1', 'states2', 'states3',]
# Read the domain file
if not validation_mode:
domain_filename = "{}domain".format(domain_folder_name)
else:
domain_filename = learned_domain
domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(domain_pddl)
# Read the input traces
traces = list()
for filename in sorted(glob.glob(domain_folder_name + "/" + trace_prefix + "*")):
trace_pddl = pddl_parser.pddl_file.parse_pddl_file("trace", filename)
traces.append(pddl_parser.parsing_functions.parse_trace_pddl(trace_pddl, predicates, action_observability, state_observability))
for trace in traces:
for i in range(len(trace.states)):
trace.states[i] = [atom for atom in trace.states[i] if atom.predicate in trace_filter]
if trace_min != None:
traces = traces[trace_min:trace_max]
# Negative examples
if nottrace_prefix != "":
nottraces = list()
for filename in sorted(glob.glob(domain_folder_name + "/" + nottrace_prefix + "*")):
trace_pddl = pddl_parser.pddl_file.parse_pddl_file("trace", filename)
nottraces.append(pddl_parser.parsing_functions.parse_trace_pddl(trace_pddl, predicates, action_observability, state_observability))
for trace in nottraces:
for i in range(len(trace.states)):
trace.states[i] = [atom for atom in trace.states[i] if atom.predicate in trace_filter]
if trace_min != None:
nottraces = nottraces[trace_min:trace_max]
# Fix goal state to not include the acceptor state
for trace in nottraces:
trace.goal = [atom for atom in trace.goal if atom.predicate not in not_acceptor_states]
MAX_VARS = get_max_vars(actions)
TOTAL_STEPS, MAX_STEPS = get_max_steps(traces)
# static_predicates, reflexive_static_predicates = get_static_predicates(traces, predicates)
### LEARNING PROBLEM
# The objects of the original domain for the learning task
# is the union of all objects in the input traces
objects = list()
for trace in traces:
objects.extend(trace.objects)
objects = list(set(objects))
# Empty initial state for now
init = []
# Empty goal for now
goal = []
original_task = pddl.Task(domain_name, 'learning_problem', domain_requirements, types, objects,
predicates, functions, init, goal, actions, axioms, True)
learning_task = copy.deepcopy(original_task)
learning_task.actions = []
### LEARNING DOMAIN
# Define "modeProg" predicate
learning_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
# Define "test" predicates
for i in range(1, TOTAL_STEPS+2):
learning_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
# Define "step" domain type
learning_task.types.append(pddl.pddl_types.Type("step", "None"))
# Define "current" predicate. Example (current ?i - step)
learning_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
# Define "inext" predicate. Example (inext ?i1 - step ?i2 - step)
learning_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
# Define action model representation predicates
# Eample (pre_clear_pickup_var1)
for a in actions:
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p.arguments))):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
# if p.name in pres_filter:
# learning_task.predicates.append(
# pddl.predicates.Predicate("pre_" + "_".join([p.name] + [a.name] + vars), []))
# if p.name in dels_filter:
# learning_task.predicates.append(
# pddl.predicates.Predicate("del_" + "_".join([p.name] + [a.name] + vars), []))
if p.name in adds_filter:
learning_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p.name] + [a.name] + vars), []))
# Define action validation predicates
# Example (plan-pickup ?i - step ?x - block)
if action_observability > 0:
for a in actions:
learning_task.predicates.append(pddl.predicates.Predicate("plan-" + a.name,
[pddl.pddl_types.TypedObject("?i", "step")] + a.parameters))
learning_task.predicates.append(pddl.predicates.Predicate("action_applied", []))
# Original domain actions
for a in actions:
original_params = [par.name for par in a.parameters]
params = [pddl.pddl_types.TypedObject("?o" + str(i+1), a.parameters[i].type_name ) for i in range(a.num_external_parameters)]
pre = list()
known_preconditions = list(a.precondition.parts)
for known_precondition in known_preconditions:
pre += [pddl.conditions.Atom(known_precondition.predicate, ["?o" + str(original_params.index(arg) + 1) for arg in known_precondition.args])]
eff = list()
known_effects = list(a.effects)
del_state_effects = [p for p in known_preconditions if p.predicate in adds_filter]
known_effects = [e for e in known_effects if e.literal.predicate not in adds_filter or not e.literal.negated]
for known_effect in known_effects:
if not known_effect.literal.negated and known_effect.literal.predicate not in adds_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(known_effect.literal.predicate, ["?o" + str(original_params.index(arg) + 1) for arg in known_effect.literal.args]))]
elif known_effect.literal.negated and known_effect.literal.predicate not in dels_filter:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(known_effect.literal.predicate, ["?o" + str(original_params.index(arg) + 1) for arg in known_effect.literal.args]))]
# action_applied predicate
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("action_applied", []))]
# Add "step" parameters to the original actions
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
# Add "modeProg" precondition
pre += [pddl.conditions.NegatedAtom("modeProg", [])]
# Define action validation condition
# Example (and (current ?i1) (inext ?i1 ?i2))
validation_condition = [pddl.conditions.Atom("current", ["?i1"])]
validation_condition += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
pre += validation_condition
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
# Add all possible effects as conditional effects
# Example (when (and (del_ontable_put-down_var1 ))(not (ontable ?o1)))
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
# del effects
if p.name in dels_filter:
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p.name, ["?o" + str(t) for t in tup]))]
# add effects
# if p.name in adds_filter and p.name not in [kp.predicate for kp in known_preconditions]:
if p.name in adds_filter:
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p.name] + [a.name] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p.name, ["?o" + str(t) for t in tup]))]
for del_state_effect in del_state_effects:
condition = pddl.conditions.Disjunction(
[pddl.conditions.Atom("add_" + "_".join([s] + [a.name]), []) for s in adds_filter if
s != del_state_effect.predicate])
# condition = pddl.conditions.Disjunction([pddl.conditions.Atom("add_" + "_".join([s] + [a.name] ), []) for s in adds_filter if s != del_state_effect.predicate] + [pddl.conditions.NegatedAtom("add_" + "_".join([del_state_effect.predicate] + [a.name] ), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(del_state_effect.predicate, []))]
learning_task.actions.append(pddl.actions.Action(a.name, params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
filtered_predicates = [p for p in predicates if p.name in adds_filter]
# Actions for programming the action model
for a in actions:
var_ids = []
for i in range(a.num_external_parameters):
var_ids = var_ids + ["" + str(i+1)]
for p in filtered_predicates:
for tup in itertools.product(var_ids, repeat=len(p.arguments)):
if possible_pred_for_action(learning_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
params = []
# Action for inserting a positive effect
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.NegatedAtom("add_" + "_".join([fp.name] + [a.name] + vars), []) for fp in filtered_predicates]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("insert_add_" + "_".join([p.name]+[a.name]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if validation_mode:
# Action for inserting positive effects
pre = []
pre += [pddl.conditions.Atom("modeProg", [])]
pre += [pddl.conditions.Atom("add_" + "_".join([p.name] + [a.name] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"add_" + "_".join([p.name] + [a.name] + vars), []))]
learning_task.actions.append(
pddl.actions.Action("delete_add_" + "_".join([p.name] + [a.name] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
last_state_validations = list()
MAX_ISTEPS = 2
# ACTIONS FOR THE VALIDATION OF THE INPUT TRACES
del_plan_effects = [] # store plan predicates here to delete in the next validate action
# First validate action
# Disables modeProg
pre = [pddl.conditions.Atom("modeProg", [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("modeProg", []))]
# Setups program counter to 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("current", ["i1"]))]
# Setups the initial state of the first trace
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), atom) for atom in traces[0].init]
num_traces = len(traces)
states_seen = 0 # Used for "test" predicates
total_actions_seen = 0
for j in range(len(traces) + len(nottraces)):
if j < num_traces:
trace = traces[j]
else:
trace = nottraces[j-num_traces]
trace_length = len(trace.states)
actions_seen = 0
for step in range(trace_length):
if trace.actions[step] != []:
actions_seen += 1
total_actions_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
del_plan_effects += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("plan-" + trace.actions[step][0],
["i" + str(actions_seen)] + trace.actions[step][1:]))]
if trace.states[step] != []:
states_seen += 1
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
if states_seen != 1:
pre += [pddl.conditions.Atom("test" + str(states_seen - 1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
learning_task.actions.append(
pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
pre = [pddl.conditions.NegatedAtom("modeProg", [])]
# action_applied
pre += [pddl.conditions.Atom("action_applied", [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("action_applied", []))]
pre += [pddl.conditions.Atom("current", ["i2"])]
pre += trace.states[step]
eff = del_plan_effects
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i2"]))]
# eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
# pddl.conditions.NegatedAtom("test" + str(states_seen), []))]
# If it is the last/goal state of the trace but not the last trace
if step == trace_length -1 and j < len(traces) + len(nottraces) - 1:
if j >= num_traces:
# not acceptor_state
pre += [pddl.conditions.NegatedAtom(acceptor_state, [])]
else:
# acceptor state
pre += [pddl.conditions.Atom(acceptor_state, [])]
if j >= num_traces - 1:
next_trace = nottraces[j-num_traces+1]
else:
next_trace = traces[j + 1]
last_state_validations.append(states_seen+1)
next_state = set()
current_state = set()
if j >= num_traces - 1:
next_trace = nottraces[j-num_traces+1]
else:
next_trace = traces[j + 1]
for atom in next_trace.init:
if not atom.negated:
next_state.add(atom)
for atom in trace.goal:
if not atom.negated:
current_state.add(atom)
lost_atoms = current_state.difference(next_state)
new_atoms = next_state.difference(current_state)
for atom in lost_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(atom.predicate, atom.args))]
for atom in new_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom(atom.predicate, atom.args))]
reset_state = [p for p in filtered_predicates if p.name not in [atom.predicate for atom in new_atoms]]
for p in reset_state:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom(p.name, []))]
del_plan_effects = []
actions_seen = 0
states_seen += 1
# acceptor state
if len(nottraces) == 0:
pre += [pddl.conditions.Atom(acceptor_state, [])]
else:
pre += [pddl.conditions.NegatedAtom(acceptor_state, [])]
pre += [pddl.conditions.Atom("test" + str(states_seen-1), [])]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("test" + str(states_seen-1), []))]
eff += [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("test"+str(states_seen), []))]
learning_task.actions.append(pddl.actions.Action("validate_" + str(states_seen), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
last_state_validations.append(states_seen)
### LEARNING PROBLEM
learning_task.goal = pddl.conditions.Conjunction([pddl.conditions.Atom("test"+str(states_seen), [])])
# Add inext fluents
for i in range(2, MAX_ISTEPS+1):
learning_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
# Add step onjects
for i in range(1, MAX_ISTEPS + 1):
learning_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
# Add modeProg fluent
learning_task.init.append(pddl.conditions.Atom("modeProg", []))
# size(M)
model_size = 0
# Add known preconditions and effects
for action in actions:
action_params = [p.name for p in action.parameters]
known_pres = list()
if type(action.precondition) is pddl.Conjunction and len(action.precondition.parts) > 0:
known_pres = action.precondition.parts
elif type(action.precondition) is pddl.Atom:
known_pres = [action.precondition]
filtered_known_pres = [pre for pre in known_pres if pre.predicate in pres_filter]
for pre in filtered_known_pres:
if type(pre) is pddl.conditions.Truth:
continue
model_representation_fluent = "pre_" + "_".join([pre.predicate] + [action.name] + ["var"+str(action_params.index(pre.args[i])+1) for i in range(len(pre.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_adds = [eff for eff in action.effects if not eff.literal.negated and eff.literal.predicate in adds_filter]
for eff in filtered_known_adds:
model_representation_fluent = "add_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
filtered_known_dels = [eff for eff in action.effects if eff.literal.negated and eff.literal.predicate in dels_filter]
for eff in filtered_known_dels:
model_representation_fluent = "del_" + "_".join(
[eff.literal.predicate] + [action.name] + ["var" + str(action_params.index(eff.literal.args[i]) + 1) for i in
range(len(eff.literal.args))])
learning_task.init.append(pddl.conditions.Atom(model_representation_fluent, []))
model_size += 1
### Write the learning task domain and problem to pddl
fdomain = open("learning_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(learning_task, domain_pddl))
fdomain.close()
fdomain = open("learning_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(learning_task, domain_pddl))
fdomain.close()
### Solvie the learning task
# starting_horizon = str(2*TOTAL_STEPS + 3)
validation_steps = max(states_seen-1, total_actions_seen)*2 + 1
if action_observability == 1 and state_observability == 0:
validation_steps = states_seen + total_actions_seen
starting_horizon = str(validation_steps + 2)
if state_observability==1 or action_observability==1:
ending_horizon = " -T " + starting_horizon
else:
ending_horizon = ""
plan_type = ""
if validation_mode:
plan_type = "-P 0"
ending_horizon = ""
cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " learning_domain.pddl learning_problem.pddl -F " + starting_horizon + " " +ending_horizon + " " + plan_type + " " + config.PLANNER_PARAMS + " > planner_out.log"
# print("\n\nExecuting... " + cmd)
os.system(cmd)
### Read the solution plan to the learning task
if not validation_mode:
pres = [[] for _ in xrange(len(actions))]
dels = [[] for _ in xrange(len(actions))]
adds = [[] for _ in xrange(len(actions))]
file = open(config.OUTPUT_FILENAME, 'r')
# Parse programming actions
for line in file:
keys = "(insert_add_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a.name for a in actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
adds[indexa].append(pred)
keys = "(validate_1)"
if keys in line:
break
# Add known preconditions and effects
for indexa in range(len(actions)):
action = actions[indexa]
action_params = [p.name for p in action.parameters]
known_pres = list()
if type(action.precondition) is pddl.Conjunction and len(action.precondition.parts) > 0:
known_pres = action.precondition.parts
elif type(action.precondition) is pddl.Atom:
known_pres = [action.precondition]
for pre in known_pres:
if type(pre) is pddl.conditions.Truth:
continue
precondition = [pre.predicate] + ["var" + str(action_params.index(pre.args[i]) + 1) for i in
range(len(pre.args))]
pres[indexa].append(precondition)
for eff in action.effects:
if not eff.literal.negated:
effect = [eff.literal.predicate] + ["var" + str(action_params.index(eff.literal.args[i]) + 1)
for i in
range(len(eff.literal.args))]
adds[indexa].append(effect)
else:
effect = [eff.literal.predicate] + ["var" + str(action_params.index(eff.literal.args[i]) + 1)
for i in
range(len(eff.literal.args))]
dels[indexa].append(effect)
counter = 0
new_fd_task = copy.deepcopy(original_task)
new_fd_task.actions = []
for action in actions:
ps = [pddl.pddl_types.TypedObject("?o"+str(i+1), action.parameters[i].type_name) for i in range(action.num_external_parameters)]
pre = []
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action.name, ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
# new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open("learned_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, domain_pddl))
fdomain.close()
sys.exit(0)
### Read the solution plan to the evaluation task
inserts = 0
deletes = 0
if validation_mode:
file = open(config.OUTPUT_FILENAME, 'r')
# Parse edition actions
for line in file:
if "insert_" in line:
inserts += 1
elif "delete_" in line:
deletes += 1
else:
break
file.close()
semPrecision = np.float64(model_size - deletes) / model_size
semRecall = np.float64(model_size - deletes) / (model_size - deletes + inserts)
# print("{} & {} & {} \\\\".format(domain_name, semPrecision, semRecall))
# print("{}. Distance: {}".format(domain_name, str(inserts + deletes)))
print(str(inserts + deletes))
| 30,816 | 40.985014 | 272 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/cleanup.py
|
#! /usr/bin/env python
import sys,os
cmd= "rm -rf *.pyc pddl/*.pyc pddl_parser/*.pyc *.*~ test-* plan-* aux_* output* *.log sas* learned_domain.pddl results/*"
print cmd
os.system(cmd)
sys.exit(0)
| 202 | 19.3 | 124 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/model_evaluator.py
|
#! /usr/bin/env python
# from __future__ import division
import sys
import pddl, pddl_parser
import numpy as np
import itertools
def reform_literal(literal, action_args, reformulation):
literal_name = literal.predicate
params = [action_args[reformulation[action_args.index(arg)]-1] for arg in literal.args]
return tuple([literal_name] + [tuple(params)])
def valid_parameter_combination(param_comb, action, action_params_dict):
action_params = action_params_dict[action]
for i in range(len(param_comb)):
if action_params[i] != action_params[param_comb[i]-1]:
return False
return True
def valid_action_combination(comb, action_params_dict):
matched_actions = [x[1][0] for x in comb]
if any(matched_actions.count(x) > 1 for x in matched_actions):
return False
actions_fit = [(set(action_params_dict[p[0][0]]) == set(action_params_dict[p[1][0]])) and (len(action_params_dict[p[0][0]]) == len(action_params_dict[p[1][0]])) for p in comb]
return all(actions_fit)
def evaluate_matching(matchings, eva_actions, ref_actions):
ref_pres = set()
eva_pres = set()
ref_adds = set()
eva_adds = set()
ref_dels = set()
eva_dels = set()
for match in matchings:
action_evaluated = match[0]
matched_action = match[1]
param_reform = match[2:]
# Build the pre/add/del sets
# Each element of the set is a tuple (action name, literal)
for action in ref_actions:
if action.name == action_evaluated:
# Preconditions
if isinstance(action.precondition, pddl.conditions.Atom):
ref_pres.add((action_evaluated, action.precondition.key))
else:
ref_pres.update([(action_evaluated, x.key) for x in action.precondition.parts])
# Effects
for effect in action.effects:
if effect.literal.negated:
ref_dels.add((action_evaluated, effect.literal.key))
else:
ref_adds.add((action_evaluated, effect.literal.key))
break
for action in eva_actions:
if action.name == matched_action:
action_args = [arg.name for arg in action.parameters]
# Preconditions
if isinstance(action.precondition, pddl.conditions.Atom):
eva_pres.add((action_evaluated, reform_literal(action.precondition, action_args, param_reform)))
else:
eva_pres.update([(action_evaluated, reform_literal(x, action_args, param_reform)) for x in action.precondition.parts])
# Effects
for effect in action.effects:
if effect.literal.negated:
eva_dels.add((action_evaluated, reform_literal(effect.literal, action_args, param_reform)))
else:
eva_adds.add((action_evaluated, reform_literal(effect.literal, action_args, param_reform)))
break
# pres_insertions = len(ref_pres) - len(ref_pres.intersection(eva_pres))
# adds_insertions = len(ref_adds) - len(ref_adds.intersection(eva_adds))
# dels_insertions = len(ref_dels) - len(ref_dels.intersection(eva_dels))
pres_deletions = len(eva_pres) - len(ref_pres.intersection(eva_pres))
adds_deletions = len(eva_adds) - len(ref_adds.intersection(eva_adds))
dels_deletions = len(eva_dels) - len(ref_dels.intersection(eva_dels))
pres_missing = len(ref_pres) - len(eva_pres.intersection(ref_pres))
adds_missing = len(ref_adds) - len(eva_adds.intersection(ref_adds))
dels_missing = len(ref_dels) - len(eva_dels.intersection(ref_dels))
model_mistake = pres_deletions + adds_deletions + dels_deletions + pres_missing + adds_missing+ dels_missing
model_size = len(eva_pres.union(ref_pres)) + len(eva_adds.union(ref_adds)) + len(eva_dels.union(ref_dels))
avg_accuracy = 1 - np.nan_to_num(np.float64(model_mistake) / model_size)
# Compute precision and recall
precision_pres = np.nan_to_num(np.float64(len(eva_pres) - pres_deletions) / len(eva_pres))
recall_pres = np.nan_to_num(np.float64(len(eva_pres) - pres_deletions) / len(ref_pres))
precision_adds = np.nan_to_num(np.float64(len(eva_adds) - adds_deletions) / len(eva_adds))
recall_adds = np.nan_to_num(np.float64(len(eva_adds) - adds_deletions) / len(ref_adds))
precision_dels = np.nan_to_num(np.float64(len(eva_dels) - dels_deletions) / len(eva_dels))
recall_dels = np.nan_to_num(np.float64(len(eva_dels) - dels_deletions) / len(ref_dels))
# Micro average
avg_precision = np.nan_to_num(np.float64(len(eva_pres) + len(eva_adds) + len(eva_dels) - pres_deletions - adds_deletions - dels_deletions) / (len(eva_pres) + len(eva_adds) + len(eva_dels)))
avg_recall = np.nan_to_num(
np.float64(len(eva_pres) + len(eva_adds) + len(eva_dels) - pres_deletions - adds_deletions - dels_deletions) / (
len(ref_pres) + len(ref_adds) + len(ref_dels)))
# Macro average
# avg_precision = (precision_pres + precision_adds + precision_dels) / 3
# avg_recall = (recall_pres + recall_adds + recall_dels) / 3
return (avg_accuracy, precision_pres, recall_pres, precision_adds, recall_adds, precision_dels, recall_dels, avg_precision, avg_recall)
def evaluate(evaluation_domain_filename, reference_domain_filename, reformulation, partial_domain_filename = None):
# Creating a FD task with the ref domain and the aux problem file
ref_domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", reference_domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(ref_domain_pddl)
ref_actions = actions
name = domain_name
# Creating a FD task with the domain to evaluate and the aux problem file
eva_domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", evaluation_domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(eva_domain_pddl)
eva_actions = actions
known_actions = list()
if partial_domain_filename:
# Creating a FD task with the partial domain and the aux problem file
partial_domain_pddl = pddl_parser.pddl_file.parse_pddl_file("domain", partial_domain_filename)
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= pddl_parser.parsing_functions.parse_domain_pddl(partial_domain_pddl)
known_actions = [a.name for a in actions]
arities = set()
actions_arity_list = list()
action_params_dict = dict()
for action in ref_actions:
arity = len(action.parameters)
action_name = action.name
action_params = [p.type_name for p in action.parameters]
action_params_dict[action_name] = action_params
if action_name not in known_actions:
actions_arity_list.append((action_name, arity))
arities.add(arity)
if not reformulation:
matches = list()
for action_name, arity in actions_arity_list:
matches.append(tuple([action_name, action_name]+[i+1 for i in range(arity)]))
matching_list = [matches]
else:
actions_by_arity = list()
actions_name_by_arity = list()
for ar in arities:
actions = list()
actions_names = list()
for action,arity in actions_arity_list:
if arity == ar:
params = [i for i in range(1, arity + 1)]
for param_comb in itertools.permutations(params, ar):
if valid_parameter_combination(param_comb, action, action_params_dict):
actions.append(tuple([action] + [x for x in param_comb]))
actions_names.append(action)
actions_by_arity.append((ar, actions))
actions_name_by_arity.append((ar, actions_names))
combinations_by_arity = list()
for i in range(len(actions_by_arity)):
arity = actions_by_arity[i][0]
actions = actions_by_arity[i][1]
proper_actions = [tuple([x]+[i for i in range(1,arity+1)]) for x in actions_name_by_arity[i][1]]
combinations = [zip(proper_actions, x) for x in itertools.permutations(actions, len(proper_actions))]
if len(proper_actions) > 1:
for comb in combinations:
if not valid_action_combination(comb, action_params_dict):
combinations.remove(comb)
combinations_by_arity.append((arity, combinations))
combinations_by_arity = [[[tuple([p[0][0]] + [e for e in p[1]]) for p in comb] for comb in combinations_by_arity[i][1]] for i in range(len(combinations_by_arity))]
action_combinations = combinations_by_arity[0]
for i in range(1, len(combinations_by_arity)):
aux = list()
for c in itertools.product(action_combinations, combinations_by_arity[i]):
aux2 = [x for x in c[0]]
aux2.extend(c[1])
aux.append(aux2)
action_combinations = aux
matching_list = action_combinations
best_score = -1
best_evaluation = None
best_matches = None
for matches in matching_list:
evaluation = evaluate_matching(matches, eva_actions, ref_actions)
if evaluation[6] + evaluation[7] > 0:
f1_score = 2 * (evaluation[6] * evaluation[7]) / (evaluation[6] + evaluation[7])
else:
f1_score = 0.0
if f1_score > best_score:
best_score = f1_score
best_evaluation = evaluation
best_matches = matches
return name, best_evaluation, best_matches
# **************************************#
# MAIN
# **************************************#
if __name__ == "__main__":
try:
cmdargs = sys.argv[1:]
if cmdargs[0] == "-r":
reformulation = True
cmdargs = cmdargs[1:]
else:
reformulation = False
if cmdargs[0] == "-p":
partial_domain_filename = cmdargs[1]
cmdargs = cmdargs[2:]
else:
partial_domain_filename = None
reference_domain_filename = cmdargs[0]
evaluation_domain_filename = cmdargs[1]
except:
print "Usage:"
print sys.argv[0] + " [-r] [-p <partial domain>] <reference domain> <evaluation domain>"
sys.exit(-1)
domain_name, best_evaluation, best_matches = evaluate(evaluation_domain_filename, reference_domain_filename, reformulation)
# print(best_matches)
# print("Pres: precision={}, recall={}".format(best_evaluation[0], best_evaluation[1]))
# print("Adds: precision={}, recall={}".format(best_evaluation[2], best_evaluation[3]))
# print("Dels: precision={}, recall={}".format(best_evaluation[4], best_evaluation[5]))
# print("Total: precision={}, recall={}".format(best_evaluation[6], best_evaluation[7]))
print(" & ".join([domain_name] + [str(round(e, 2)) for e in best_evaluation]) + " \\\\" + " % {}".format(best_matches))
sys.exit(0)
| 11,499 | 44.098039 | 193 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/meta-example-generator.py
|
#! /usr/bin/env python
import sys, os, glob
import pddl, pddl_parser
import fdtask_to_pddl, planning
#**************************************#
# MAIN
#**************************************#
try:
source_folder_name = sys.argv[1]
destination_folder_name = sys.argv[2]
except:
print "Usage:"
print sys.argv[0] + " <source folder name> <destination folder name>"
sys.exit(-1)
experiments = {'blocks': "../benchmarks/generator/blocks/domain.pddl ../benchmarks/generator/blocks/problem3.pddl M 10 100",
'childsnack': None,
'driverlog': "../benchmarks/generator/driverlog/domain.pddl ../benchmarks/generator/driverlog/problem1.pddl M 10 100",
'ferry': "../benchmarks/generator/ferry/domain.pddl ../benchmarks/generator/ferry/problem1.pddl M 10 100",
'floortile': "../benchmarks/generator/floortile/domain.pddl ../benchmarks/generator/floortile/problem1.pddl M 10 100",
'grid': "../benchmarks/generator/grid/domain.pddl ../benchmarks/generator/grid/problem1.pddl M 10 100",
'gripper': "../benchmarks/generator/gripper/domain.pddl ../benchmarks/generator/gripper/problem1.pddl M 10 100",
'hanoi': "../benchmarks/generator/hanoi/domain.pddl ../benchmarks/generator/hanoi/problem1.pddl LPG 10 100",
'hiking': None,
'miconic': "../benchmarks/generator/miconic/domain.pddl ../benchmarks/generator/miconic/problem1.pddl M 10 100",
'npuzzle': "../benchmarks/generator/npuzzle/domain.pddl ../benchmarks/generator/npuzzle/problem1.pddl LPG 10 100",
'parking': None,
'pegsol': None,
'satellite': "../benchmarks/generator/satellite/domain.pddl ../benchmarks/generator/satellite/problem1.pddl M 10 100",
'sokoban': None,
'transport': "../benchmarks/generator/transport/domain.pddl ../benchmarks/generator/transport/problem1.pddl M 10 100",
'visitall': "../benchmarks/generator/transport/domain.pddl ../benchmarks/generator/transport/problem1.pddl M 10 100",
'zenotravel': "../benchmarks/generator/transport/domain.pddl ../benchmarks/generator/transport/problem1.pddl M 10 100"
}
for item in sorted(glob.glob(source_folder_name+"/*")):
domain_name = item[len(source_folder_name)+1:]
domain_filename = source_folder_name + "/" + domain_name + "/domain.pddl"
problem_filename = source_folder_name + "/" + domain_name + "/test-1.pddl"
cmd = "mkdir " + destination_folder_name + "/" + domain_name+ "/"
print("\n\nExecuting... " + cmd)
os.system(cmd)
if experiments[domain_name]==None:
continue
cmd = "./example-generator.py " + experiments[domain_name]
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Loging the output files
cmd = "mkdir " + destination_folder_name + "/" + domain_name+ "/"
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "mv test-* " + destination_folder_name + "/" + domain_name + "/"
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "mv plan-* " + destination_folder_name + "/" + domain_name + "/"
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "mv ten-observation-* " + destination_folder_name + "/" + domain_name + "/"
print("\n\nExecuting... " + cmd)
os.system(cmd)
sys.exit(0)
| 3,395 | 46.166667 | 133 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/utils.py
|
#! /usr/bin/env python
import pddl
def compute_constants(task,domain):
constants=set()
for i in range(0,len(domain)):
if domain [i][0]==":constants":
btype=False
for c in domain[i][1:]:
if c=="-":
btype=True
elif btype==False:
io=[o.name for o in task.objects].index(c)
constants.add(task.objects[io])
else:
btype=False
for action in task.actions:
for atom in action.precondition.parts:
if isinstance(atom,pddl.conditions.Literal):
for arg in atom.args:
if not "?" in str(arg):
io=[o.name for o in task.objects].index(arg)
constants.add(task.objects[io])
return constants
def get_predicate(name,task):
for p in task.predicates:
if name==p.name:
return p
def compute_dynamic_predicates(task):
dynamic_predicates=set()
for a in task.actions:
for e in a.effects:
dynamic_predicates.add(get_predicate(e.literal.predicate,task))
return dynamic_predicates
def inliterals(l,ls):
if type(l)==str:
n1 = l.replace("(","").replace(")","").split(" ")[0]
a1 = " ".join(map(str, l.replace("(","").replace(")","").split(" ")[0]))
b1 = True
if isinstance(l,pddl.predicates.Predicate):
n1 = l.name
a1 = " ".join(map(str, [aux.name for aux in l.arguments]))
b1 = True
if isinstance(l,pddl.conditions.Literal):
n1 = l.predicate
a1 = " ".join(map(str, l.args))
b1 = not(l.negated)
for l2 in ls:
if isinstance(l2,pddl.predicates.Predicate):
n2=l2.name
a2=a1
b2=b1
if isinstance(l2,pddl.conditions.Literal):
n2=l2.predicate
a2=" ".join(map(str, l2.args))
b2=not(l2.negated)
if isinstance(l2,pddl.effects.Effect):
n2=l2.literal.predicate
a2=" ".join(map(str, l2.literal.args))
b2=not(l2.literal.negated)
if n1==n2 and a1==a2 and b1==b2:
return True
return False
def get_outcomes(aname,task):
outcomes=[]
for a in task.actions:
if (aname == a.name.split("_DETDUP_")[0]):
outcomes.append(a.effects)
return outcomes
def is_special_predicate(name):
if "xstacktopx" in name.lower():
return True
if "xDEL".lower() in name.lower():
return True
if "xADD".lower() in name.lower():
return True
if "=" in name:
return True
return False
| 2,611 | 24.607843 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/testset-evaluator.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools, math
import pddl, pddl_parser
import config, fdtask_to_pddl
def get_all_types(task, itype):
output=[itype]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def get_max_steps_from_plans(ps):
iout = 0
for plan in ps:
iout = max(iout, len(plan))
return iout
def get_max_vars_from_plans(ps):
iout = 0
for plan in ps:
for a in plan:
iout = max(iout, len(a.split(" ")) - 1)
return iout
def get_action_schema_from_plans(ps, task):
schemas = []
for plan in ps:
for a in plan:
counter = 0
name = a.replace("(", "").replace(")", "").split(" ")[0]
item = [name]
for p in a.replace("(", "").replace(")", "").split(" ")[1:]:
for o in task.objects:
if p.upper() == o.name.upper():
item.append(str(o.type_name))
counter = counter + 1
break
if item not in schemas:
schemas.insert(0, item)
return schemas
def get_predicates_schema_from_plans(task):
preds = []
for p in task.predicates:
item = []
if p.name == "=":
continue
item.append(p.name)
for a in p.arguments:
item.append(a.type_name)
preds = preds + [item]
return preds
def possible_pred_for_action(task, p, a, tup):
if (len(p) > len(a)):
return False
action_types = [set([a[int(tup[i])]]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x)) for x in p[1:]]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
return all(fits)
def get_fluents_from_model(task):
fluents = list()
for a in task.actions:
params = [x.name for x in a.parameters]
for pre in a.precondition.parts:
vars = list()
for arg in pre.args:
pos = params.index(arg)
vars.append("var"+str(pos+1))
fluents += [str("pre_" + "_".join([pre.predicate] + [a.name] + vars))]
for eff in a.effects:
vars = list()
for arg in eff.literal.args:
pos = params.index(arg)
vars.append("var" + str(pos + 1))
if eff.literal.negated:
fluents += [str("del_" + "_".join([eff.literal.predicate] + [a.name] + vars))]
else:
fluents += [str("add_" + "_".join([eff.literal.predicate] + [a.name] + vars))]
return fluents
# **************************************#
# MAIN
# **************************************#
try:
test_folder_name = sys.argv[2]
domain_file = sys.argv[1]
problems_prefix_filename = sys.argv[3]
plans_prefix_filename = sys.argv[4]
input_level = int(sys.argv[5])
except:
print "Usage:"
print sys.argv[0] + "<domain> <test folder> <problems prefix> <plans prefix> <input level (0 plans, 1 steps, 2 len(plan), 3 minimum)>"
sys.exit(-1)
# Reading the example plans
plans = []
i = 0
for filename in sorted(glob.glob(test_folder_name + "/" + plans_prefix_filename + "*")):
plans.append([])
lcounter = 0
file = open(filename, 'r')
for line in file:
if input_level != config.INPUT_STEPS or (input_level == config.INPUT_STEPS and lcounter % 3 != 0):
plans[i].append(line.replace("\n", "").split(": ")[1])
lcounter = lcounter + 1
file.close()
i = i + 1
# Creating a FD task with the domain and the first problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_file)
fd_problems = []
fd_tasks = []
counter = 0
for problem_filename in sorted(glob.glob(test_folder_name + "/" + problems_prefix_filename + "*")):
fd_problems = fd_problems + [pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)]
fd_tasks = fd_tasks + [pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[counter])]
counter = counter + 1
fd_task = copy.deepcopy(fd_tasks[0])
MAX_STEPS = get_max_steps_from_plans(plans)
MAX_VARS = get_max_vars_from_plans(plans)
actions = get_action_schema_from_plans(plans, fd_task)
predicates = get_predicates_schema_from_plans(fd_task)
learned_model_fluents = get_fluents_from_model(fd_task)
# Compilation Problem
init_aux = copy.deepcopy(fd_task.init)
fd_task.init = []
fd_task.init.append(pddl.conditions.Atom("modeProg", []))
for fluent in learned_model_fluents:
fd_task.init.append(pddl.conditions.Atom(fluent, []))
allpres = []
for a in actions: # All possible preconditions are initially programmed
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
# fd_task.init.append(
# pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
allpres = allpres + [str("pre_" + "_".join([p[0]] + [a[0]] + vars))]
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i), "i" + str(i + 1)]))
goals = []
for i in range(0, len(plans) + 1):
goals = goals + [pddl.conditions.Atom("test" + str(i), [""])]
fd_task.goal = pddl.conditions.Conjunction(goals)
# Compilation Domain
if input_level <= config.INPUT_LENPLAN:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 2):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
fd_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
for i in range(0, len(plans) + 1):
fd_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if input_level <= config.INPUT_LENPLAN:
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
for a in actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
fd_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("del_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p[0]] + [a[0]] + vars), []))
if input_level <= config.INPUT_STEPS:
for a in actions:
fd_task.predicates.append(pddl.predicates.Predicate("plan-" + a[0],
[pddl.pddl_types.TypedObject("?i", "step")] + [
pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i
in range(1, len(a))]))
fd_task.actions = []
# Original domain actions
# old_actions = copy.deepcopy(actions)
for a in actions:
pre = list()
eff = list()
params = [pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i in range(1, len(a))]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
params = params + [pddl.pddl_types.TypedObject("?i1", "step")]
params = params + [pddl.pddl_types.TypedObject("?i2", "step")]
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
if input_level <= config.INPUT_PLANS and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("plan-" + a[0], ["?i1"] + ["?o" + str(i) for i in range(1, len(a))])]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("current", ["?i1"])]
pre = pre + [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
disjunction = pddl.conditions.Disjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])] + [
pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup])])
pre = pre + [disjunction]
if input_level < config.INPUT_STEPS:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif input_level < config.INPUT_MINIMUM:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p[0], ["?o" + str(t) for t in tup]))]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup]))]
fd_task.actions.append(pddl.actions.Action(a[0], params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action schema
for a in actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
a_vars = ["var" + str(v) for v in var_ids]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
# Program precondition
vars = ["var" + str(t) for t in tup]
params = []
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_pre_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Unprogram precondition
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_pre_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Program add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_add_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Unprogram add effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_add_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Program del effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_del_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Unprogram del effect
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("unprogram_del_" + "_".join([p[0]] + [a[0]] + vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for validating the tests
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
#pre.extend([invariant.condition for invariant in fd_task.axioms])
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test0", []))]
for f in init_aux:
if f.predicate != "=":
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), f)]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for i in range(0, len(plans[0])):
action = plans[0][i]
name = action[1:-1].split(" ")[0]
params = action[1:-1].split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + name, ["i" + str(i + 1)] + params))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("modeProg", []))]
fd_task.actions.append(pddl.actions.Action("validate_0", [], 0, pddl.conditions.Conjunction(pre), eff, 0))
for i in range(0, len(plans)):
pre = []
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
# pre.extend([invariant.condition for invariant in fd_task.axioms])
for j in range(0, len(plans) + 1):
if j < i + 1:
pre = pre + [pddl.conditions.Atom("test" + str(j), [])]
else:
pre = pre + [pddl.conditions.NegatedAtom("test" + str(j), [])]
if input_level <= config.INPUT_LENPLAN:
pre = pre + [pddl.conditions.Atom("current", ["i" + str(len(plans[i]) + 1)])]
for g in fd_tasks[i].goal.parts:
pre = pre + [g]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test" + str(i + 1), []))]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(len(plans[i]) + 1)]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for j in range(0, len(plans[i])):
name = "plan-" + plans[i][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(name, pars))]
if i < len(plans) - 1:
for j in range(0, len(plans[i + 1])):
name = "plan-" + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(name, pars))]
fd_task.actions.append(
pddl.actions.Action("validate_" + str(i + 1), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
# Writing the compilation output domain and problem
fdomain = open("aux_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
fdomain = open("aux_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fdomain.close()
# Solving the compilation
cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " aux_domain.pddl aux_problem.pddl -F " + str(len(plans) + sum([len(p) for p in plans])) + " " + config.PLANNER_PARAMS + " > planner_out.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Reading the plan output by the compilation
modifications = set()
file = open(config.OUTPUT_FILENAME, 'r')
for line in file:
key = "(program_"
if key in line:
aux = "(unprogram_"+line[13:]
if aux in modifications:
modifications.remove(aux)
else:
modifications.add(line)
key = "(unprogram_"
if key in line:
aux = "(program_" + line[15:]
if aux in modifications:
modifications.remove(aux)
else:
modifications.add(line)
file.close()
num_modifications = len(modifications)
worst_case = len(allpres)*3
mean = worst_case/2
# p = 1/(1+math.exp(-float(mean-num_modifications)/float(mean/6)))
p = float(worst_case-num_modifications)/worst_case
print("Modifications: {}".format(num_modifications))
print("Worst case modifications: {}".format(worst_case))
print("P(M|O) = {}".format(round(p, 2)))
# print("P(M|O) = {}".format())
sys.exit(0)
| 20,969 | 40.772908 | 252 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/planning.py
|
#! /usr/bin/env python
import sys, os, copy
from dependencies.fama.src import pddl, pddl_parser
from dependencies.fama.src import config
class Literal:
def __init__(self, n, ags):
self.name = n
self.args = ags
def __str__(self):
return "("+self.name +" " + " ".join(self.args)+")"
def __hash__(self):
return hash(tuple([self.name, tuple(self.args)]))
def __eq__(self, other):
return self.name == other.name and self.args == other.args
def __repr__(self) -> str:
return "(%s %s)" % (self.name, self.args)
class State:
def __init__(self, ls):
self.literals = ls
def __str__(self):
return " ".join([str(s) for s in self.literals])
def findLiteral(self, lit):
i=0
for l in self.literals:
if str(lit).upper()== str(l).upper():
return i
i = i + 1
return -1
def addLiteral(self, lit):
if self.findLiteral(lit)==-1:
self.literals = self.literals + [lit]
return
def delLiteral(self, lit):
i=self.findLiteral(lit)
if i!=-1:
self.literals.pop(i)
return
def filter_literals_byName(self, names):
aux = self.literals
self.literals = [item for item in aux if names.count(item.name)==0]
return
def __hash__(self):
return hash("".join(sorted(self.__str__())))
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class Plan:
def __init__(self, acs):
self.actions = acs
def read_plan(self, plan_filename):
file = open(plan_filename, 'r')
for line in file:
if line[0] in ";" or line=="\n":
continue
istart=line.find("(")+1
iend=line.find(")")
name = line.lower()[istart:iend].split(" ")[0]
args = line.lower()[istart:iend].split(" ")[1:]
self.actions = self.actions + [Literal(name,args)]
file.close()
def __str__(self):
str_out = ""
for a in self.actions:
str_out = str_out + str(a) + "\n"
return str_out
def write_plan(self, plan_filename):
file = open(plan_filename, 'w')
file.write(str(self))
file.close()
class Rule:
def __init__(self, s, a):
self.state = s
self.action = a
def __str__(self):
return "IF: " + str(self.state) + "\nTHEN: " + str(self.action)
class Policy:
def __init__(self, rs):
self.rules = rs
def findRule(self, rul):
i=0
for r in self.rules:
if str(rul).upper()== str(r).upper():
return i
i = i + 1
return -1
def addRule(self, rul):
if self.findRule(rul)==-1:
self.rules = self.rules + [rul]
return
def read_policy(self, policy_filename):
file = open(policy_filename, 'r')
self.rules = []
state = None
for line in file:
if "IF: " in line:
ls = []
for l in line.replace("IF: ","").replace("\n","").split(") ("):
name = l.replace("(","").replace(")","").split(" ")[0]
args = l.replace("(","").replace(")","").split(" ")[1:]
ls = ls + [Literal(name,args)]
state = State(ls)
if "THEN: " in line:
name = line.replace("THEN: ","").replace("\n","").replace("(","").replace(")","").split(" ")[0]
args = line.replace("THEN: ","").replace("\n","").replace("(","").replace(")","").split(" ")[1:]
action = Literal(name,args)
if state != None:
self.addRule(Rule(state,action))
state = None
file.close()
return
def __str__(self):
return "\n".join([str(s) for s in self.rules])
# Auxiliary functions
def VAL_computation_state_trajectory(domain_filename, problem_filename, plan_filename, VAL_out = config.VAL_OUT):
# Creating a FD task with the domain and the problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
# Creating the initial state
state=State([])
for l in fd_task.init:
if not isinstance(l,pddl.f_expression.FunctionAssignment) and l.predicate!="=":
state.addLiteral(Literal(l.predicate,[str(arg) for arg in l.args]))
# Running VAL
cmd = "rm " + VAL_out + ";"+config.VAL_PATH+"/validate -v " + domain_filename + " " + problem_filename + " " + plan_filename + " > " + VAL_out
# print("\n\nExecuting... " + cmd)
os.system(cmd)
# Executing the VAL output
file = open(VAL_out, 'r')
actions = []
states = []
action_id = 0
plan_size = 0
baction = False
for line in file:
# Reading an action
if baction==True:
name = line.replace("\n","").replace("(","").replace(")","").split(" ")[0]
args = line.replace("\n","").replace("(","").replace(")","").split(" ")[1:]
baction = False
name=name.split("_detdup_")[0]
actions = actions + [Literal(name,args)]
if "Plan size: " in line:
plan_size=int(line.split("Plan size: ")[1])
if action_id < plan_size and str(action_id)+":" in line:
baction = True
action_id = action_id +1
# Adding a new policy rule
if "Checking next happening (time " in line:
step = int(line.replace(")\n","").split("Checking next happening (time ")[1])
states=states+[copy.deepcopy(state)]
if "Deleting " in line:
name = line.replace("Deleting ","").replace("(","").replace(")\n","").split(" ")[0]
args = line.replace("Deleting ","").replace("(","").replace(")\n","").split(" ")[1:]
state.delLiteral(Literal(name,args))
if "Adding " in line:
name = line.replace("Adding ","").replace("(","").replace(")\n","").split(" ")[0]
args = line.replace("Adding ","").replace("(","").replace(")\n","").split(" ")[1:]
state.addLiteral(Literal(name,args))
file.close()
states=states+[copy.deepcopy(state)]
return states
| 6,709 | 31.259615 | 146 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/graph.py
|
#! /usr/bin/env python
from __future__ import print_function
class Graph:
def __init__(self, nodes):
self.nodes = nodes
self.neighbours = dict((u, set()) for u in nodes)
def connect(self, u, v):
self.neighbours[u].add(v)
self.neighbours[v].add(u)
def connected_components(self):
remaining_nodes = set(self.nodes)
result = []
def dfs(node):
result[-1].append(node)
remaining_nodes.remove(node)
for neighbour in self.neighbours[node]:
if neighbour in remaining_nodes:
dfs(neighbour)
while remaining_nodes:
node = next(iter(remaining_nodes))
result.append([])
dfs(node)
result[-1].sort()
return sorted(result)
def transitive_closure(pairs):
# Warshall's algorithm.
result = set(pairs)
nodes = set(u for (u, v) in pairs) | set(v for (u, v) in pairs)
for k in nodes:
for i in nodes:
for j in nodes:
if (i, j) not in result and (i, k) in result and (k, j) in result:
result.add((i, j))
return sorted(result)
if __name__ == "__main__":
g = Graph([1, 2, 3, 4, 5, 6])
g.connect(1, 2)
g.connect(1, 3)
g.connect(4, 5)
print(g.connected_components())
| 1,345 | 27.638298 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/example-generator.py
|
#! /usr/bin/env python
import sys, os, copy, glob, itertools
import pddl, pddl_parser
import fdtask_to_pddl, planning
import config
import fileinput
import copy
# Madagascar details
M_PATH=config.PLANNER_PATH
M_CALL="/"+config.PLANNER_NAME
M_PARAMS=" -W " + config.PLANNER_PARAMS
# FD details
FD_PATH="/home/slimbook/software/fd/"
FD_CALL="/fast-downward.py --alias seq-sat-lama-2011 "
FD_PARAMS=""
# LPG details
LPG_PATH="/home/slimbook/software/LPG-td-1.0/"
LPG_CALL="lpg-td-1.0 "
LPG_PARAMS=" -n 1 -v off -out sas_plan"
# ff details
FF_PATH=config.PLANNER_PATH
FF_CALL="/"+config.PLANNER_NAME
FF_PARAMS=""
#**************************************#
# MAIN
#**************************************#
try:
domain_filename = sys.argv[1]
problem_filename = sys.argv[2]
planner = sys.argv[3]
nsteps = int(sys.argv[4])
if planner == "M":
PLANNER_PATH = config.ROOT_PATH + "/madagascar"
PLANNER_NAME = "M"
OUTPUT_FILENAME = config.OUTPUT_PATH + "/sas_plan"
PLANNER_PARAMS = "-S 1 -Q -o " + OUTPUT_FILENAME
elif planner == "ff":
PLANNER_PATH = config.ROOT_PATH + "/FF"
PLANNER_NAME = "ff"
OUTPUT_FILENAME = config.OUTPUT_PATH + "/sas_plan"
PLANNER_PARAMS = ""
FF_PATH = PLANNER_PATH
FF_CALL = "/" + PLANNER_NAME
FF_PARAMS = ""
else:
PLANNER_PATH = config.PLANNER_PATH
PLANNER_NAME = config.PLANNER_NAME
OUTPUT_FILENAME = config.OUTPUT_FILENAME
PLANNER_PARAMS = config.PLANNER_PARAMS
if "-h" in sys.argv:
sys.argv.remove("-h")
nhorizon = int(sys.argv[5])
else:
nhorizon = 0
except:
print("Usage:")
print(sys.argv[0] + " <domain> <problem> <planner> <steps> <-h horizon>")
sys.exit(-1)
try:
os.stat(config.OUTPUT_PATH)
except:
os.makedirs(config.OUTPUT_PATH)
try:
os.stat(config.OUTPUT_PATH+config.PROBLEM_DIR)
except:
os.makedirs(config.OUTPUT_PATH+config.PROBLEM_DIR)
try:
os.stat(config.OUTPUT_PATH+config.PLAN_DIR)
except:
os.makedirs(config.OUTPUT_PATH+config.PLAN_DIR)
try:
os.stat(config.OUTPUT_PATH+config.OBSERVATIONS_DIR)
except:
os.makedirs(config.OUTPUT_PATH+config.OBSERVATIONS_DIR)
# Creating a FD task with the domain and the problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
# Modifying domain and problem when planning for horizon
if nhorizon > 0:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"), pddl.pddl_types.TypedObject("?i2", "step")]))
for a in fd_task.actions:
params = []
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
pre = []
pre += [pddl.conditions.Atom("current", ["?i1"])]
pre += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.NegatedAtom("current", ["?i1"]))]
a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.Atom("current", ["?i2"]))]
for i in range(1, nhorizon + 1):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
for i in range(2, nhorizon+1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
fd_task.init.append(pddl.conditions.Atom("current", ["i1"]))
fd_task.goal = pddl.conditions.Conjunction([pddl.conditions.Atom("current", ["i"+str(nhorizon)])])
aux_domain_filename = config.OUTPUT_PATH+"/aux_domain.pddl"
fdomain = open(aux_domain_filename, "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
aux_problem_filename = config.OUTPUT_PATH+"/aux_problem.pddl"
fproblem = open(aux_problem_filename, "w")
fproblem.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fproblem.close()
# Running the planner
PLANNER_OUT=config.OUTPUT_PATH+"/aux_planner.log"
if planner == "FD":
cmd = "rm sas_plan*; ulimit -t 200;" + FD_PATH + FD_CALL + " " + aux_domain_filename + " " + aux_problem_filename + " " + FD_PARAMS+ " > " + PLANNER_OUT
if planner == "M":
cmd = "rm "+config.OUTPUT_PATH+"/sas_plan*; ulimit -t 200;" + M_PATH + M_CALL + " " + aux_domain_filename + " " + aux_problem_filename + " " + M_PARAMS+ " > " + PLANNER_OUT
if planner == "ff":
cmd = "rm "+config.OUTPUT_PATH+"/sas_plan*;" + FF_PATH + FF_CALL + " -o " + aux_domain_filename + " -f " + aux_problem_filename + " " + FF_PARAMS+ " > " + PLANNER_OUT
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Loading the plan
if planner == "ff":
f = open(PLANNER_OUT, "r")
_plan_found = False
_plan = ""
step = 0
for x in f:
if ("found legal plan as follows"):
_plan_found = True
if not(_plan_found):
continue
if str(step)+":" in x:
k = copy.deepcopy(x)
_plan += str(step) + " : (" + k.lower().rstrip().split(":")[-1].lstrip() + ")\n"
# print(k.lower().rstrip().split(":")[-1].lstrip() )#+ ")\n"
step += 1
if "time spent" in x:
break
f.close()
f = open(config.OUTPUT_PATH+"/sas_plan", "w")
f.write(_plan)
f.close()
plan_files = glob.glob(config.OUTPUT_PATH+"/sas_plan*")
plan_files.sort()
plan_filename = plan_files[-1]
plan = planning.Plan([])
plan.read_plan(plan_filename)
plan.write_plan(plan_filename)
# Generating the state trajectory
states = planning.VAL_computation_state_trajectory(aux_domain_filename,aux_problem_filename,plan_filename)
# Output the examples problems
counter = copy.deepcopy(config.COUNTER)
aux = [o for o in fd_task.objects if o.type_name!="step" and o.name!="kitchen"]
fd_task.objects = aux
for i in range(0,len(states)):
fd_task.init=[]
if ((i%nsteps)==0 and i>0) or (i==len(states)-1 and i>0):
# Positive
if ((i%nsteps)==0 and i>0):
for l in states[i-nsteps].literals:
if l.name != "inext" and l.name != "current":
fd_task.init.append(pddl.conditions.Atom(l.name,l.args))
else:
for l in states[(counter-config.COUNTER)*nsteps].literals:
if l.name != "inext" and l.name != "current":
fd_task.init.append(pddl.conditions.Atom(l.name,l.args))
goals = []
for l in states[i].literals:
if l.name != "inext" and l.name != "current":
goals = goals + [pddl.conditions.Atom(l.name,l.args)]
fd_task.goal=pddl.conditions.Conjunction(goals)
# Writing the compilation output domain and problem
fdomain=open(config.OUTPUT_PATH+config.PROBLEM_DIR+"/test-"+str(counter).zfill(3) +".pddl","w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task,fd_problem))
fdomain.close()
counter=counter+1
# Output the examples plans
counter = copy.deepcopy(config.COUNTER)
for i in range(0,len(plan.actions)):
if (i%nsteps)==0:
fdomain=open(config.OUTPUT_PATH+config.PLAN_DIR+"/plan-"+str(counter).zfill(3) +".txt","w")
index=0
counter=counter+1
fdomain.write(str(index)+": "+str(plan.actions[i])+"\n")
index = index + 1
if (i%nsteps)==(nsteps-1):
fdomain.close()
# Output the observations
counter = copy.deepcopy(config.COUNTER)
for i in range(0,len(plan.actions)):
if (i%nsteps)==0:
# HEAD
fdomain=open(config.OUTPUT_PATH+config.OBSERVATIONS_DIR+"/observation-"+str(counter).zfill(3) +".txt","w")
index=0
counter=counter+1
fdomain.write("(solution \n")
fdomain.write("(:objects ")
str_out = ""
for o in set(fd_task.objects):
str_out = str_out + str(o).replace(":"," - ") + " "
str_out = str_out + ")\n"
states[0].filter_literals_byName(["inext","current"])
str_out = str_out +"(:init " + str(states[i]) + ")\n\n"
fdomain.write(str_out)
# BODY
states[i].filter_literals_byName(["inext","current"])
fdomain.write("(:observations " + str(states[i])+")\n\n")
fdomain.write(str(plan.actions[i])+"\n\n")
index = index + 1
if (i%nsteps)==(nsteps-1):
# TAIL
str_out = ""
states[-1].filter_literals_byName(["inext","current"])
str_out = str_out +"(:goal " + str(states[i+1]) + "))\n"
fdomain.write(str_out)
fdomain.close()
# to delete incomplete observation file and corresponding test and plan files.
if (len(plan.actions)%nsteps) != 0:
os.remove(config.OUTPUT_PATH+config.PROBLEM_DIR+"/test-"+str(counter-1).zfill(3) +".pddl")
os.remove(config.OUTPUT_PATH + config.PLAN_DIR + "/plan-" + str(counter - 1).zfill(3) + ".txt")
os.remove(config.OUTPUT_PATH + config.OBSERVATIONS_DIR + "/observation-" + str(counter - 1).zfill(3) + ".txt")
counter = counter - 1
# char =
search_exp = 'COUNTER = '
# newchar = 'blocks2'
for line in fileinput.input(config.PROJECT_PATH+"/src/config.py", inplace=True):
if search_exp in line:
line = line.replace(search_exp+str(config.COUNTER), search_exp+str(counter))
sys.stdout.write(line)
sys.exit(0)
| 9,545 | 32.97153 | 177 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/walk-generator.py
|
#! /usr/bin/env python
import sys, os, copy, glob, itertools, random
import pddl, pddl_parser
import fdtask_to_pddl, planning
import config
# Madagascar details
M_PATH=config.PLANNER_PATH
M_CALL="/"+config.PLANNER_NAME
M_PARAMS=" -W " + config.PLANNER_PARAMS
# FD details
FD_PATH="/home/slimbook/software/fd/"
FD_CALL="/fast-downward.py --alias seq-sat-lama-2011 "
FD_PARAMS=""
# LPG details
LPG_PATH="/home/slimbook/software/LPG-td-1.0/"
LPG_CALL="lpg-td-1.0 "
LPG_PARAMS=" -n 1 -v off -out sas_plan"
#**************************************#
# MAIN
#**************************************#
try:
domain_filename = sys.argv[1]
problem_filename = sys.argv[2]
planner = sys.argv[3]
nsteps = int(sys.argv[4])
if "-h" in sys.argv:
sys.argv.remove("-h")
nhorizon = int(sys.argv[5])
else:
nhorizon = 0
except:
print "Usage:"
print sys.argv[0] + " <domain> <problem> <planner> <steps> <-h horizon>"
sys.exit(-1)
# Creating a FD task with the domain and the problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
# Modifying domain and problem when planning for horizon
if nhorizon > 0:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"), pddl.pddl_types.TypedObject("?i2", "step")]))
for a in fd_task.actions:
fd_task.predicates.append(pddl.predicates.Predicate("applied-"+a.name, []))
for a in fd_task.actions:
params = []
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
a.parameters += params
pre = []
pre += [pddl.conditions.Atom("current", ["?i1"])]
pre += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if a.precondition.parts:
original_pre = list(a.precondition.parts)
else:
original_pre = [a.precondition]
a.precondition = pddl.conditions.Conjunction(original_pre + pre)
# a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.NegatedAtom("current", ["?i1"]))]
# a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.Atom("current", ["?i2"]))]
a.effects += [
pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
a.effects += [
pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
a.effects += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("applied-"+a.name, []))]
for i in range(1, nhorizon + 1):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
for i in range(2, nhorizon+1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
fd_task.init.append(pddl.conditions.Atom("current", ["i1"]))
new_goals = []
# for a in fd_task.actions:
# new_goals.append(pddl.conditions.Atom("applied-"+a.name, []))
# while(len(new_goals)>6):
# new_goals.pop(random.randint(0,len(new_goals)-1))
new_goals.append(pddl.conditions.Atom("current", ["i"+str(nhorizon)]))
fd_task.goal = pddl.conditions.Conjunction(new_goals)
aux_domain_filename = "aux_domain.pddl"
fdomain = open(aux_domain_filename, "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
aux_problem_filename = "aux_problem.pddl"
fproblem = open(aux_problem_filename, "w")
fproblem.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fproblem.close()
# Running the planner
PLANNER_OUT="aux_planner.log"
if planner == "FD":
cmd = "rm sas_plan*; ulimit -t 200;" + FD_PATH + FD_CALL + " " + aux_domain_filename + " " + aux_problem_filename + " " + FD_PARAMS+ " > " + PLANNER_OUT
if planner == "M":
cmd = "rm sas_plan*; ulimit -t 200;" + M_PATH + M_CALL + " " + aux_domain_filename + " " + aux_problem_filename + " " + M_PARAMS+ " > " + PLANNER_OUT
if planner == "LPG":
cmd = "rm sas_plan*; ulimit -t 200;" + LPG_PATH + LPG_CALL + " -o " + aux_domain_filename + " -f " + aux_problem_filename + " " + LPG_PARAMS+ " > " + PLANNER_OUT
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Loading the plan
plan_files = glob.glob("sas_plan*")
plan_files.sort()
plan_filename = plan_files[-1]
plan = planning.Plan([])
plan.read_plan(plan_filename)
# for action in plan.actions:
# action.args = action.args[:-2]
plan.write_plan(plan_filename)
# Generating the state trajectory
states = planning.VAL_computation_state_trajectory(aux_domain_filename,aux_problem_filename,plan_filename)
# Output the examples problems
toskip = ["inext","current"]+[str("applied-"+a.name) for a in fd_task.actions]
counter = 1
aux = [o for o in fd_task.objects if o.type_name!="step" and o.name!="kitchen"]
fd_task.objects = aux
for i in range(0,len(states)):
fd_task.init=[]
if ((i%nsteps)==0 and i>0) or (i==len(states)-1 and i>0):
# Positive
if ((i%nsteps)==0 and i>0):
for l in states[i-nsteps].literals:
if not l.name in toskip:
fd_task.init.append(pddl.conditions.Atom(l.name,l.args))
else:
for l in states[(counter-1)*nsteps].literals:
if not l.name in toskip:
fd_task.init.append(pddl.conditions.Atom(l.name,l.args))
goals = []
for l in states[i].literals:
if not l.name in toskip:
goals = goals + [pddl.conditions.Atom(l.name,l.args)]
fd_task.goal=pddl.conditions.Conjunction(goals)
# Writing the compilation output domain and problem
fdomain=open("test-"+str(counter).zfill(2) +".pddl","w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task,fd_problem))
fdomain.close()
counter=counter+1
# Remove step objects from the actions
for action in plan.actions:
action.args = action.args[:-2]
# Output the examples plans
counter = 1
for i in range(0,len(plan.actions)):
if (i%nsteps)==0:
fdomain=open("plan-"+str(counter).zfill(2) +".txt","w")
index=0
counter=counter+1
fdomain.write(str(index)+": "+str(plan.actions[i])+"\n")
index = index + 1
if (i%nsteps)==(nsteps-1):
fdomain.close()
# Output the observations
counter = 1
for i in range(0,len(plan.actions)):
if (i%nsteps)==0:
# HEAD
fdomain=open("ten-observation-"+str(counter).zfill(2) +".txt","w")
index=0
counter=counter+1
fdomain.write("(solution \n")
fdomain.write("(:objects ")
str_out = ""
for o in sorted(set(fd_task.objects)):
str_out = str_out + str(o).replace(":"," - ") + " "
str_out = str_out + ")\n"
states[0].filter_literals_byName(["inext","current"]+[str("applied-"+a.name) for a in fd_task.actions])
str_out = str_out +"(:init " + str(states[0]) + ")\n\n"
fdomain.write(str_out)
# BODY
states[i].filter_literals_byName(["inext","current"]+[str("applied-"+a.name) for a in fd_task.actions])
fdomain.write("(:observations " + str(states[i])+")\n\n")
fdomain.write(str(plan.actions[i])+"\n\n")
index = index + 1
if (i%nsteps)==(nsteps-1):
# TAIL
str_out = ""
states[-1].filter_literals_byName(["inext","current"]+[str("applied-"+a.name) for a in fd_task.actions])
str_out = str_out +"(:goal " + str(states[-1]) + "))\n"
fdomain.write(str_out)
fdomain.close()
sys.exit(0)
| 8,022 | 34.5 | 166 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/config.py
|
#! /usr/bin/env python2
import os
DOMAIN = "blocks_rash"
ROOT_PATH = os.getcwd()+"/dependencies/"
PROJECT_PATH = ROOT_PATH+"fama"
COUNTER = 140
OUTPUT_PATH = PROJECT_PATH + "/output/" + DOMAIN
PROBLEM_DIR = "/tests"
PLAN_DIR = "/plans"
OBSERVATIONS_DIR = "/observations"
VAL_PATH = ROOT_PATH + "VAL"
VAL_OUT=OUTPUT_PATH+"/val.log"
# PLANNER_PATH = PROJECT_PATH + "/util/FF-v2.3"
# PLANNER_NAME = "ff"
# OUTPUT_FILENAME = OUTPUT_PATH+"/sas_plan"
# PLANNER_PARAMS = "-S 1 -Q -o "+OUTPUT_FILENAME
PLANNER_PATH = ROOT_PATH + "madagascar"
PLANNER_NAME = "M"
OUTPUT_FILENAME = OUTPUT_PATH+"/sas_plan"
PLANNER_PARAMS = "-S 1 -Q -o "+OUTPUT_FILENAME
# PLANNER_PATH = ROOT_PATH + "/downward"
# PLANNER_NAME = "fast-downward.py"
# OUTPUT_FILENAME = OUTPUT_PATH+"/sas_plan"
# PLANNER_PARAMS = " --alias seq-sat-lama-2011 "+OUTPUT_FILENAME
# Diferent compiler modes
INPUT_PLANS = 0
INPUT_STEPS = 1
INPUT_LENPLAN = 2
INPUT_MINIMUM = 3
| 931 | 23.526316 | 64 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/compiler.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools
import pddl, pddl_parser
import config, fdtask_to_pddl
def get_all_types(task, itype):
output=[itype]
# for i in task.types:
# if itype in i.name:
# if i.basetype_name!="object":
# output = output + [str(i.basetype_name)]
for t in task.types:
if t.basetype_name == itype:
output.append(str(t.name))
return output
def get_max_steps_from_plans(ps):
iout = 0
for plan in ps:
iout = max(iout, len(plan))
return iout
def get_max_vars_from_plans(ps):
iout = 0
for plan in ps:
for a in plan:
iout = max(iout, len(a.split(" ")) - 1)
return iout
def get_action_schema_from_plans(ps, task):
known_actions = [a.name for a in task.actions]
schemas = []
for plan in ps:
for a in plan:
counter = 0
name = a.replace("(", "").replace(")", "").split(" ")[0]
item = [name]
for p in a.replace("(", "").replace(")", "").split(" ")[1:]:
for o in task.objects:
if p.upper() == o.name.upper():
item.append(str(o.type_name))
counter = counter + 1
break
if item not in schemas:
schemas.insert(0, item)
return [x for x in schemas if x[0] not in known_actions], [x for x in schemas if x[0] in known_actions]
def get_predicates_schema_from_plans(task):
preds = []
for p in task.predicates:
item = []
if p.name == "=":
continue
item.append(p.name)
for a in p.arguments:
item.append(a.type_name)
preds = preds + [item]
return preds
def get_static_predicates(tasks, predicates):
candidates = set([p[0] for p in predicates])
for task in tasks:
task_candidates = set()
for predicate in candidates:
init_predicates = set([p for p in task.init if p.predicate == predicate])
goal_predicates = set([p for p in task.goal.parts if p.predicate == predicate and p.negated == False])
if init_predicates == goal_predicates:
task_candidates.add(predicate)
candidates = candidates.intersection(task_candidates)
reflexive_static_predicates = dict()
for candidate in candidates:
reflexive_static_predicates[candidate] = True
for task in tasks:
init_predicates = set([p for p in task.init if p.predicate == candidate])
for predicate in init_predicates:
if len(predicate.args) == 1 or len(set(predicate.args)) != 1:
reflexive_static_predicates[candidate] = False
break
return [p for p in predicates if p[0] in candidates], reflexive_static_predicates
def get_static_precondition(predicate, action, plans, tasks):
static_preconditions = set()
params = [pddl.pddl_types.TypedObject("?o" + str(i), action[i]) for i in range(1, len(action))]
params = [x for x in params if x.type_name in predicate[1:]]
num_predicate_params = len(predicate[1:])
possible_param_tuples = list(itertools.combinations(params, num_predicate_params))
for t in possible_param_tuples:
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in t]))
static_preconditions.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(t)]))
if len([x for x in action[1:] if x in predicate[1:]]) >= num_predicate_params:
all_instances = set()
for task in tasks:
all_instances.update([p.args for p in task.init if p.predicate == predicate[0]])
all_variables = set(sum(all_instances, ()))
for a in [item for sublist in plans for item in sublist]:
a = a.replace('(','').replace(')','').split(" ")
if a[0] == action[0]:
variables = [x for x in a[1:] if x in all_variables]
possible_tuples = list(itertools.combinations(variables, num_predicate_params))
static_preconditions_candidates = set()
for i in range(len(possible_tuples)):
if possible_tuples[i] in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in possible_param_tuples[i]]))
elif tuple(reversed(possible_tuples[i])) in all_instances:
static_preconditions_candidates.add(pddl.conditions.Atom(predicate[0], [x.name for x in reversed(possible_param_tuples[i])]))
static_preconditions = static_preconditions.intersection(static_preconditions_candidates)
return list(static_preconditions)
def possible_pred_for_action(task, p, a, tup):
if (len(p) > len(a)):
return False
# action_types = [set(get_all_types(task, str(a[int(tup[i])]))) for i in range(len(tup))]
action_types = [set([a[int(tup[i])]]) for i in range(len(tup))]
predicate_types = [set(get_all_types(task, x)) for x in p[1:]]
fits = [len(action_types[i].intersection(predicate_types[i])) >= 1 for i in range(len(action_types))]
# for i in range(0, len(tup)):
# bfound = False
# for t in get_all_types(task, str(a[int(tup[i])])):
# if t in get_all_types(task, str(p[i + 1])):
# bfound = True
# if bfound == False:
# return False
return all(fits)
def is_binary_mutex(axiom):
return isinstance(axiom.condition, pddl.UniversalCondition) and isinstance(axiom.condition.parts[0],
pddl.Disjunction) and len(axiom.condition.parts[0].parts) == 2 and isinstance(
axiom.condition.parts[0].parts[0], pddl.NegatedAtom) and isinstance(axiom.condition.parts[0].parts[1],
pddl.NegatedAtom)
def get_binary_mutexes(fd_task):
binary_mutexes = dict()
for axiom in fd_task.axioms:
if is_binary_mutex(axiom):
part1 = axiom.condition.parts[0].parts[0]
part2 = axiom.condition.parts[0].parts[1]
args1 = part1.args
args2 = part2.args
arity1 = len(args1)
arity2 = len(args2)
matchings = list()
if arity1 == 0:
matchings.extend([(-1,i) for i in range(arity2)])
elif arity2 == 0:
matchings.extend([(i, -1) for i in range(arity2)])
else:
for i in range(arity1):
for j in range(arity2):
if args1[i] == args2[j]:
matchings.append((i,j))
# print(part1, part2)
# print(matchings)
for tup in itertools.product(range(1, MAX_VARS+1), repeat=max(arity1, arity2)):
vars = ["var" + str(t) for t in tup]
# print(vars)
m1 = [vars[i] for i in range(arity1)]
for tup2 in itertools.product(vars, repeat=arity2):
m2 = [t for t in tup2]
# print(m1, m2)
match_all = True
for matching in matchings:
if matching[0] == -1 or matching[1] == -1:
continue
else:
match_all = match_all & (m1[matching[0]] == m2[matching[1]])
if match_all:
key = tuple([part1.predicate] + m1)
mutex = tuple([part2.predicate] + m2)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
key = tuple([part2.predicate] + m2)
mutex = tuple([part1.predicate] + m1)
if key != mutex:
aux = binary_mutexes.get(key, set())
aux.add(mutex)
binary_mutexes[key] = aux
# print(key, mutex)
return binary_mutexes
# **************************************#
# MAIN
# **************************************#
try:
if "-s" in sys.argv:
check_static_predicates = True
sys.argv.remove("-s")
else:
check_static_predicates = False
if "-i" in sys.argv:
program_with_invariants = True
sys.argv.remove("-i")
else:
program_with_invariants = False
domain_folder_name = sys.argv[1]
domain_file = sys.argv[2]
problems_prefix_filename = sys.argv[3]
plans_prefix_filename = sys.argv[4]
input_level = int(sys.argv[5])
except:
print "Usage:"
print sys.argv[0] + "[-s] [-i] <domain> <domain filename> <problems prefix> <plans prefix> <input level (0 plans, 1 steps, 2 len(plan), 3 minimum)>"
sys.exit(-1)
# Reading the example plans
plans = []
i = 0
for filename in sorted(glob.glob(domain_folder_name + "/" + plans_prefix_filename + "*")):
plans.append([])
lcounter = 0
file = open(filename, 'r')
for line in file:
if input_level != config.INPUT_STEPS or (input_level == config.INPUT_STEPS and lcounter % 3 != 0):
plans[i].append(line.replace("\n", "").split(": ")[1])
lcounter = lcounter + 1
file.close()
i = i + 1
# Creating a FD task with the domain and the first problem file
domain_filename = "{}{}.pddl".format(domain_folder_name, domain_file)
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problems = []
fd_tasks = []
counter = 0
for problem_filename in sorted(glob.glob(domain_folder_name + "/" + problems_prefix_filename + "*")):
fd_problems = fd_problems + [pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)]
fd_tasks = fd_tasks + [pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[counter])]
counter = counter + 1
fd_task = copy.deepcopy(fd_tasks[0])
known_action_models = [action for action in fd_task.actions]
MAX_STEPS = get_max_steps_from_plans(plans)
MAX_VARS = get_max_vars_from_plans(plans)
new_actions, known_actions = get_action_schema_from_plans(plans, fd_task)
actions = new_actions + known_actions
predicates = get_predicates_schema_from_plans(fd_task)
static_predicates, reflexive_static_predicates = get_static_predicates(fd_tasks, predicates)
binary_mutexes = get_binary_mutexes(fd_task)
# Compilation Problem
init_aux = copy.deepcopy(fd_task.init)
fd_task.init = []
fd_task.init.append(pddl.conditions.Atom("modeProg", []))
allpres = []
for a in new_actions: # All possible preconditions are initially programmed
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
fd_task.init.append(
pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
allpres = allpres + [str("pre_" + "_".join([p[0]] + [a[0]] + vars))]
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i), "i" + str(i + 1)]))
goals = []
for i in range(0, len(plans) + 1):
goals = goals + [pddl.conditions.Atom("test" + str(i), [""])]
fd_task.goal = pddl.conditions.Conjunction(goals)
# Compilation Domain
if input_level <= config.INPUT_LENPLAN:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
if input_level <= config.INPUT_LENPLAN:
for i in range(1, MAX_STEPS + 2):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
fd_task.predicates.append(pddl.predicates.Predicate("modeProg", []))
for i in range(0, len(plans) + 1):
fd_task.predicates.append(pddl.predicates.Predicate("test" + str(i), []))
if input_level <= config.INPUT_LENPLAN:
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"),
pddl.pddl_types.TypedObject("?i2", "step")]))
# for axiom in fd_task.axioms:
# fd_task.predicates.append(pddl.predicates.Predicate(axiom.name, []))
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
fd_task.predicates.append(
pddl.predicates.Predicate("pre_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("del_" + "_".join([p[0]] + [a[0]] + vars), []))
fd_task.predicates.append(
pddl.predicates.Predicate("add_" + "_".join([p[0]] + [a[0]] + vars), []))
if input_level <= config.INPUT_STEPS:
for a in actions:
fd_task.predicates.append(pddl.predicates.Predicate("plan-" + a[0],
[pddl.pddl_types.TypedObject("?i", "step")] + [
pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i
in range(1, len(a))]))
learned_static_preconditions = dict()
# Original domain actions
# old_actions = copy.deepcopy(actions)
for a in actions:
pre = list()
eff = list()
is_known_action = False
# Add derived predicates
pre.extend([invariant.condition for invariant in fd_task.axioms])
if a in known_actions:
is_known_action = True
for action in fd_task.actions:
if action.name == a[0]:
if isinstance(action.precondition, pddl.conditions.Atom):
pre.append(action.precondition)
else:
pre.extend([x for x in action.precondition.parts])
eff = action.effects
fd_task.actions.remove(action)
break
params = [pddl.pddl_types.TypedObject("?o" + str(i), a[i]) for i in range(1, len(a))]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
params = params + [pddl.pddl_types.TypedObject("?i1", "step")]
params = params + [pddl.pddl_types.TypedObject("?i2", "step")]
if check_static_predicates and input_level <= config.INPUT_STEPS:
for static_predicate in static_predicates:
static_preconditions = get_static_precondition(static_predicate, a, plans, fd_tasks)
learned_static_preconditions[a[0]] = list()
for static_precondition in static_preconditions:
pre.append(static_precondition)
learned_static_preconditions[a[0]].append(static_precondition)
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
if input_level <= config.INPUT_PLANS and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("plan-" + a[0], ["?i1"] + ["?o" + str(i) for i in range(1, len(a))])]
if input_level <= config.INPUT_LENPLAN and input_level < config.INPUT_MINIMUM:
pre = pre + [pddl.conditions.Atom("current", ["?i1"])]
pre = pre + [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
disjunction = pddl.conditions.Disjunction(
[pddl.conditions.NegatedAtom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])] + [
pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup])])
pre = pre + [disjunction]
if input_level < config.INPUT_STEPS:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
elif input_level < config.INPUT_MINIMUM:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("current", ["?i1"]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["?i2"]))]
if not is_known_action:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("del_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.NegatedAtom(p[0], ["?o" + str(t) for t in tup]))]
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if check_static_predicates and p in static_predicates:
continue
vars = ["var" + str(t) for t in tup]
condition = pddl.conditions.Conjunction(
[pddl.conditions.Atom("add_" + "_".join([p[0]] + [a[0]] + vars), [])])
eff = eff + [
pddl.effects.Effect([], condition, pddl.conditions.Atom(p[0], ["?o" + str(t) for t in tup]))]
fd_task.actions.append(pddl.actions.Action(a[0], params, len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for programming the action schema
for a in new_actions:
var_ids = []
for i in range(1, len(a)):
var_ids = var_ids + ["" + str(i)]
for p in predicates:
for tup in itertools.product(var_ids, repeat=(len(p) - 1)):
if possible_pred_for_action(fd_task, p, a, tup):
if p in static_predicates and check_static_predicates:
if input_level <= config.INPUT_STEPS:
continue
elif not reflexive_static_predicates.get(p[0]) and len(set(tup)) == 1:
continue
vars = ["var" + str(t) for t in tup]
params = []
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [pddl.conditions.Atom("pre_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_pre_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
if p in static_predicates and check_static_predicates:
continue
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
pre = pre + [
pddl.conditions.NegatedAtom("del_" + "_".join([p[0]] + [a[0]] + vars), [])]
if program_with_invariants:
aux = [pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
key = tuple([p[0]] + vars)
for mutex in binary_mutexes.get(key, set()):
aux = aux + [
pddl.conditions.NegatedAtom("add_" + "_".join([mutex[0]] + [a[0]] + [e for e in mutex[1:]]),
[])]
pre = pre + [pddl.conditions.Conjunction(aux)]
else:
pre = pre + [
pddl.conditions.NegatedAtom("add_" + "_".join([p[0]] + [a[0]] + vars), [])]
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Atom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []), pddl.conditions.Atom(
"del_" + "_".join([p[0]] + [a[0]] + vars), []))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.NegatedAtom(
"pre_" + "_".join([p[0]] + [a[0]] + vars), []), pddl.conditions.Atom(
"add_" + "_".join([p[0]] + [a[0]] + vars), []))]
fd_task.actions.append(
pddl.actions.Action("program_eff_" + "_".join([p[0]]+[a[0]]+vars), params,
len(params), pddl.conditions.Conjunction(pre), eff, 0))
# Actions for validating the tests
pre = []
pre = pre + [pddl.conditions.Atom("modeProg", [])]
#pre.extend([invariant.condition for invariant in fd_task.axioms])
eff = [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test0", []))]
for f in init_aux:
if f.predicate != "=":
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), f)]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for i in range(0, len(plans[0])):
action = plans[0][i]
name = action[1:-1].split(" ")[0]
params = action[1:-1].split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.Atom("plan-" + name, ["i" + str(i + 1)] + params))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom("modeProg", []))]
fd_task.actions.append(pddl.actions.Action("validate_0", [], 0, pddl.conditions.Conjunction(pre), eff, 0))
for i in range(0, len(plans)):
pre = []
pre = pre + [pddl.conditions.NegatedAtom("modeProg", [])]
pre.extend([invariant.condition for invariant in fd_task.axioms])
for j in range(0, len(plans) + 1):
if j < i + 1:
pre = pre + [pddl.conditions.Atom("test" + str(j), [])]
else:
pre = pre + [pddl.conditions.NegatedAtom("test" + str(j), [])]
if input_level <= config.INPUT_LENPLAN:
pre = pre + [pddl.conditions.Atom("current", ["i" + str(len(plans[i]) + 1)])]
current_state = set()
for g in fd_tasks[i].goal.parts:
pre = pre + [g]
if isinstance(g, pddl.Atom):
current_state.add(g)
eff = []
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("test" + str(i + 1), []))]
if i < len(plans)-1:
next_state = set()
for atom in fd_tasks[i+1].init:
if atom.predicate != "=":
next_state.add(atom)
lost_atoms = current_state.difference(next_state)
new_atoms = next_state.difference(current_state)
for atom in lost_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(atom.predicate, atom.args))]
for atom in new_atoms:
eff += [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(atom.predicate, atom.args))]
if input_level <= config.INPUT_LENPLAN:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(),
pddl.conditions.NegatedAtom("current", ["i" + str(len(plans[i]) + 1)]))]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom("current", ["i1"]))]
if input_level <= config.INPUT_STEPS:
for j in range(0, len(plans[i])):
name = "plan-" + plans[i][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(name, pars))]
if i < len(plans) - 1:
for j in range(0, len(plans[i + 1])):
name = "plan-" + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[0]
pars = ["i" + str(j + 1)] + plans[i + 1][j].replace("(", "").replace(")", "").split(" ")[1:]
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(name, pars))]
fd_task.actions.append(
pddl.actions.Action("validate_" + str(i + 1), [], 0, pddl.conditions.Conjunction(pre), eff, 0))
# Writing the compilation output domain and problem
fdomain = open("aux_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(fd_task, fd_domain))
fdomain.close()
fdomain = open("aux_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fdomain.close()
# Solving the compilation
if input_level <= config.INPUT_LENPLAN:
starting_horizon = str(len(plans) + sum([len(p) for p in plans]))
else:
starting_horizon = str(len(plans))
cmd = "rm " + config.OUTPUT_FILENAME + " planner_out.log;" + config.PLANNER_PATH + "/" + config.PLANNER_NAME + " aux_domain.pddl aux_problem.pddl -F " + starting_horizon + " " + config.PLANNER_PARAMS + " > planner_out.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Reading the plan output by the compilation
pres = [[[p.split("_")[1]] + p.split("_")[3:] for p in allpres if "_"+new_actions[i][0] in p] for i in xrange(len(new_actions))]
# pres = [[] for _ in xrange(len(new_actions))]
# pres = [ for p in pres]
dels = [[] for _ in xrange(len(new_actions))]
adds = [[] for _ in xrange(len(new_actions))]
file = open(config.OUTPUT_FILENAME, 'r')
for line in file:
keys = "(program_pre_"
if keys in line:
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
# allpres.remove(str("pre_" + pred[0] + "_" + action[0] + "_" + "_".join(map(str, pred[1:]))))
pres[indexa].remove(pred)
keys = "(program_eff_"
if keys in line:
# act = p.split("_")[2]
# pred = [p.split("_")[1]] + p.split("_")[3:]
# indexa = [a[0] for a in new_actions].index(act)
aux = line.replace("\n", "").replace(")", "").split(keys)[1].split(" ")
action = aux[0].split("_")[1:] + aux[1:]
indexa = [a[0] for a in new_actions].index(action[0])
pred = [aux[0].split("_")[0]]
if [aux[0].split("_")[2:]][0] != ['']:
pred = pred + [aux[0].split("_")[2:]][0]
if not pred in pres[indexa]:
adds[indexa].append(pred)
else:
dels[indexa].append(pred)
file.close()
counter = 0
new_fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problems[0])
new_fd_task.actions = []
for action in new_actions:
params = ["?o" + str(i + 1) for i in range(0, len(action[1:]))]
ps = [pddl.pddl_types.TypedObject(params[i], action[i + 1]) for i in range(0, len(params))]
pre = []
if check_static_predicates:
pre += learned_static_preconditions.get(action[0], [])
for p in pres[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
pre = pre + [pddl.conditions.Atom(p[0], args)]
eff = []
for p in dels[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.NegatedAtom(p[0], args))]
for p in adds[counter]:
args = ["?o" + i.replace("var", "") for i in p[1:]]
ball = True
for arg in args:
if not arg in [x.name for x in ps]:
ball = False
if ball:
eff = eff + [pddl.effects.Effect([], pddl.conditions.Truth(), pddl.conditions.Atom(p[0], args))]
new_fd_task.actions.append(pddl.actions.Action(action[0], ps, len(ps), pddl.conditions.Conjunction(pre), eff, 0))
counter = counter + 1
new_fd_task.actions.extend(known_action_models)
# Writing the compilation output domain and problem
fdomain = open("learned_domain.pddl", "w")
fdomain.write(fdtask_to_pddl.format_domain(new_fd_task, fd_domain))
fdomain.close()
sys.exit(0)
| 30,740 | 42.115007 | 222 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/evaluator2.py
|
#! /usr/bin/env python
# from __future__ import division
import sys
import pddl, pddl_parser
import numpy as np
import itertools
def evaluate_matching(matchings, fd_eva_task, fd_ref_task):
ref_pres = set()
eva_pres = set()
ref_adds = set()
eva_adds = set()
ref_dels = set()
eva_dels = set()
for match in matchings:
action_evaluated = match[0]
matched_action = match[1]
# Build the pre/add/del sets
# Each element of the set is a tuple (action name, literal)
for action in fd_ref_task.actions:
if action.name == action_evaluated:
# Preconditions
if isinstance(action.precondition, pddl.conditions.Atom):
ref_pres.add((action_evaluated, action.precondition))
else:
ref_pres.update([(action_evaluated, x) for x in action.precondition.parts])
# Effects
for effect in action.effects:
if effect.literal.negated:
ref_dels.add((action_evaluated, effect.literal))
else:
ref_adds.add((action_evaluated, effect.literal))
break
for action in fd_eva_task.actions:
if action.name == matched_action:
# Preconditions
if isinstance(action.precondition, pddl.conditions.Atom):
eva_pres.add((action_evaluated, action.precondition))
else:
eva_pres.update([(action_evaluated, x) for x in action.precondition.parts])
# Effects
for effect in action.effects:
if effect.literal.negated:
eva_dels.add((action_evaluated, effect.literal))
else:
eva_adds.add((action_evaluated, effect.literal))
break
# Compute precision and recall
precision_pres = np.float64(len(ref_pres.intersection(eva_pres))) / len(eva_pres)
recall_pres = np.float64(len(ref_pres.intersection(eva_pres))) / len(ref_pres)
precision_adds = np.float64(len(ref_adds.intersection(eva_adds))) / len(eva_adds)
recall_adds = np.float64(len(ref_adds.intersection(eva_adds))) / len(ref_adds)
precision_dels = np.float64(len(ref_dels.intersection(eva_dels))) / len(eva_dels)
recall_dels = np.float64(len(ref_dels.intersection(eva_dels))) / len(ref_dels)
avg_precision = (precision_pres + precision_adds + precision_dels) / 3
avg_recall = (recall_pres + recall_adds + recall_dels) / 3
return (precision_pres, recall_pres, precision_adds, recall_adds, precision_dels, recall_dels, avg_precision, avg_recall)
# **************************************#
# MAIN
# **************************************#
try:
cmdargs = sys.argv[1:]
if cmdargs[0] == "-r":
reformulation = True
cmdargs = cmdargs[1:]
else:
reformulation = False
if cmdargs[0] == "-p":
partial_domain_filename = cmdargs[1]
cmdargs = cmdargs[2:]
else:
partial_domain_filename = None
reference_domain_filename = cmdargs[0]
evaluation_domain_filename = cmdargs[1]
aux_problem_filename = cmdargs[2]
except:
print "Usage:"
print sys.argv[0] + " [-r] [-p <partial domain>] <reference domain> <evaluation domain> <aux problem>"
sys.exit(-1)
# reference_domain_filename = "../benchmarks/reference/blocks/domain.pddl"
# evaluation_domain_filename = "learned_domain.pddl"
# aux_problem_filename = "../benchmarks/handpicked/blocks/test-1.pddl"
# Creating a FD task with the ref domain and the aux problem file
fd_ref_domain = pddl_parser.pddl_file.parse_pddl_file("domain", reference_domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", aux_problem_filename)
fd_ref_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_ref_domain, fd_problem)
# Creating a FD task with the domain to evaluate and the aux problem file
fd_eva_domain = pddl_parser.pddl_file.parse_pddl_file("domain", evaluation_domain_filename)
fd_eva_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_eva_domain, fd_problem)
known_actions = list()
if partial_domain_filename:
# Creating a FD task with the partial domain and the aux problem file
fd_par_domain = pddl_parser.pddl_file.parse_pddl_file("domain", partial_domain_filename)
fd_par_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_par_domain, fd_problem)
known_actions = [a.name for a in fd_par_task.actions]
arities = set()
actions_arity_list = list()
for action in fd_ref_task.actions:
arity = len(action.parameters)
action_name = action.name
if action_name not in known_actions:
actions_arity_list.append((action_name, arity))
arities.add(arity)
if not reformulation:
matches = list()
for action_name, arity in actions_arity_list:
matches.append((action_name, action_name))
matching_list = [matches]
else:
actions_by_arity = list()
for ar in arities:
actions = list()
for action,arity in actions_arity_list:
if arity == ar:
actions.append(action)
actions_by_arity.append((ar, actions))
combinations_by_arity = list()
for arity, actions in actions_by_arity:
# combinations = list(itertools.combinations_with_replacement(actions, 2))
combinations = [zip(x, actions) for x in itertools.permutations(actions, len(actions))]
combinations_by_arity.append((arity, combinations))
# print(combinations)
action_combinations = combinations_by_arity[0][1]
for i in range(1, len(combinations_by_arity)):
# action_combinations = [zip(x, combinations_by_arity[i][1]) for x in itertools.permutations(action_combinations, len(combinations_by_arity[i][1]))]
aux = list()
for c in itertools.product(action_combinations, combinations_by_arity[i][1]):
aux2 = [x for x in c[0]]
aux2.extend(c[1])
aux.append(aux2)
action_combinations = aux
matching_list = action_combinations
best_score = -1
best_evaluation = None
for matches in matching_list:
evaluation = evaluate_matching(matches, fd_eva_task, fd_ref_task)
if evaluation[6] + evaluation[7] > 0:
f1_score = 2 * (evaluation[6] * evaluation[7]) / (evaluation[6] + evaluation[7])
else:
f1_score = 0.0
if f1_score > best_score:
best_score = f1_score
best_evaluation = evaluation
# print(f1_score, matches)
print("Pres: precision={}, recall={}".format(best_evaluation[0], best_evaluation[1]))
print("Adds: precision={}, recall={}".format(best_evaluation[2], best_evaluation[3]))
print("Dels: precision={}, recall={}".format(best_evaluation[4], best_evaluation[5]))
print("Total: precision={}, recall={}".format(best_evaluation[6], best_evaluation[7]))
print(" & ".join([fd_ref_task.domain_name] + [str(round(e, 2)) for e in best_evaluation]) + " \\\\")
sys.exit(0)
| 7,048 | 37.102703 | 156 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/generate_observations.py
|
"""
For computing plans using madagascar, state trajectories using VAL, and observation files
"""
import os, glob
from dependencies.fama.src import planning, pddl, pddl_parser
N_HORIZON = 10
# Can be set/overrided by calling set_paths function and
# providing the domain name, base directory, and example directory
DOMAIN = None
BASE_DIR = None
EXAMPLE_DIR = None
MADAGASCAR_PATH = None
DOMAIN_PATH = None
PROBLEM_DIR = None
PLAN_DIR = None
LOGFILE_PATH = None
OBSERVATIONS_DIR = None
VAL_OUT_PATH = None
def create_fd_task(domain_filename,problem_filename):
# Creating a FD task with the domain and the problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
# Modifying domain and problem when planning for horizon
if N_HORIZON > 0:
fd_task.types.append(pddl.pddl_types.Type("step", "None"))
fd_task.predicates.append(pddl.predicates.Predicate("current", [pddl.pddl_types.TypedObject("?i", "step")]))
fd_task.predicates.append(pddl.predicates.Predicate("inext", [pddl.pddl_types.TypedObject("?i1", "step"), pddl.pddl_types.TypedObject("?i2", "step")]))
for a in fd_task.actions:
params = []
params += [pddl.pddl_types.TypedObject("?i1", "step")]
params += [pddl.pddl_types.TypedObject("?i2", "step")]
pre = []
pre += [pddl.conditions.Atom("current", ["?i1"])]
pre += [pddl.conditions.Atom("inext", ["?i1", "?i2"])]
a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.NegatedAtom("current", ["?i1"]))]
a.effects += [pddl.effects.Effect(params, pddl.conditions.Conjunction(pre), pddl.conditions.Atom("current", ["?i2"]))]
for i in range(1, N_HORIZON + 1):
fd_task.objects.append(pddl.pddl_types.TypedObject("i" + str(i), "step"))
for i in range(2, N_HORIZON+1):
fd_task.init.append(pddl.conditions.Atom("inext", ["i" + str(i-1), "i" + str(i)]))
fd_task.init.append(pddl.conditions.Atom("current", ["i1"]))
fd_task.goal = pddl.conditions.Conjunction([pddl.conditions.Atom("current", ["i"+str(N_HORIZON)])])
return fd_task
def generate_obs(plan,states,fd_task,observation_file):
fdomain=open(observation_file,"w")
for i in range(0,len(plan.actions)):
if i==0:
# HEAD
fdomain.write("(solution \n")
fdomain.write("(:objects ")
str_out = ""
for o in set(fd_task.objects):
str_out = str_out + str(o).replace(":"," - ") + " "
str_out = str_out + ")\n"
states[0].filter_literals_byName(["inext","current"])
str_out = str_out +"(:init " + str(states[i]) + ")\n\n"
fdomain.write(str_out)
# BODY
states[i].filter_literals_byName(["inext","current"])
fdomain.write("(:observations " + str(states[i])+")\n\n")
fdomain.write("(:action "+str(plan.actions[i])+")\n\n")
if i==(len(plan.actions)-1):
# TAIL
str_out = ""
states[-1].filter_literals_byName(["inext","current"])
str_out = str_out +"(:goal " + str(states[i+1]) + "))\n"
fdomain.write(str_out)
fdomain.close()
def set_paths(domain_name, domain_file, base_dir, example_dir, observation_dir):
global DOMAIN, BASE_DIR, EXAMPLE_DIR, MADAGASCAR_PATH, DOMAIN_PATH, PROBLEM_DIR, PLAN_DIR, LOGFILE_PATH, OBSERVATIONS_DIR, VAL_OUT_PATH
DOMAIN = domain_name
BASE_DIR = base_dir
EXAMPLE_DIR = example_dir
OBSERVATIONS_DIR = observation_dir
MADAGASCAR_PATH = BASE_DIR+"dependencies/madagascar/M"
DOMAIN_PATH = EXAMPLE_DIR+domain_file
PROBLEM_DIR = EXAMPLE_DIR+"instances/"
PLAN_DIR = EXAMPLE_DIR+"plans/"
LOGFILE_PATH = EXAMPLE_DIR+"madagascar_output.txt"
VAL_OUT_PATH = EXAMPLE_DIR+"val.log"
if not os.path.exists(PLAN_DIR):
os.mkdir(PLAN_DIR)
if not os.path.exists(OBSERVATIONS_DIR):
os.mkdir(OBSERVATIONS_DIR)
def compute_plans_and_observations(domain_name, domain_file, base_dir, example_dir, observation_dir):
set_paths(domain_name, domain_file, base_dir, example_dir, observation_dir)
if os.path.exists(LOGFILE_PATH):
os.remove(LOGFILE_PATH)
if os.path.exists(VAL_OUT_PATH):
os.remove(VAL_OUT_PATH)
print("EXAMPLE_DIR: ",example_dir)
problem_files = glob.glob(PROBLEM_DIR+"*")
for problem_path in problem_files:
# find a plan
plan_file = PLAN_DIR+"plan-"+problem_path.split("-")[1].split(".")[0] + ".txt"
command = MADAGASCAR_PATH+" "+DOMAIN_PATH+" "+problem_path+" -W -S 1 -Q -o "+plan_file+" > "+LOGFILE_PATH
# print("Planning..")
os.system(command)
plan = planning.Plan([])
plan.read_plan(plan_file)
# compute state trajectory
# print("Computing state trajectory..")
states = planning.VAL_computation_state_trajectory(DOMAIN_PATH, problem_path, plan_file, VAL_OUT_PATH)
# create fd_task
fd_task = create_fd_task(DOMAIN_PATH,problem_path)
# generate observations i.e. state and action sequence
observation_file = OBSERVATIONS_DIR+"/observation-"+ problem_path.split("-")[1].split(".")[0] +".txt"
generate_obs(plan,states,fd_task,observation_file)
if __name__=="__main__":
domain = "blocks_rash"
base_dir = os.getcwd()+"/"
example_dir = BASE_DIR+"dependencies/fama/output/"+domain+"/"
observation_dir = example_dir+"observations/"
compute_plans_and_observations(domain, base_dir, example_dir, observation_dir)
| 5,875 | 40.380282 | 159 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/unsupervised-evaluator.py
|
#! /usr/bin/env python
import sys, os, glob
import pddl, pddl_parser
import config, fdtask_to_pddl
#**************************************#
# MAIN
#**************************************#
try:
domain_folder_name = sys.argv[1]
problem_prefix_filename = sys.argv[2]
plan_prefix_filename = sys.argv[3]
learned_domain_filename = sys.argv[4]
except:
print "Usage:"
print sys.argv[0] + " <domain folder> <problem prelfix> <plan prefix> <learned domain>"
sys.exit(-1)
errors = {}
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", learned_domain_filename)
# Running VAL for each problem and plan
for plan_filename in sorted(glob.glob(domain_folder_name + "/" + plan_prefix_filename + "*")):
problem_filename=plan_filename.replace(plan_prefix_filename,problem_prefix_filename).replace(".txt",".pddl")
aux_problem_file = problem_filename
bmissing_pre=True
while bmissing_pre:
# Creating a FD task with the domain and the problem file
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", aux_problem_file)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
cmd = "rm " + config.VAL_OUT + ";"+config.VAL_PATH+"/validate -v " + learned_domain_filename + " " + aux_problem_file + " " + plan_filename + " > " + config.VAL_OUT
print("\n\nExecuting... " + cmd)
os.system(cmd)
log_file = open(config.VAL_OUT, 'r')
bmissing_pre=False
incomplete_action=""
for line in log_file:
if "has an unsatisfied precondition at time" in line:
incomplete_action= line.split(")")[0][1:].split(" ")[0]
bmissing_pre=True
if bmissing_pre and "(Set (" in line and ") to true)" in line:
try:
errors[incomplete_action]=errors[incomplete_action]+1
except:
errors[incomplete_action]=1
if "and (Set " in line:
missing_fact = line.replace("and (Set ","")[5:-10]
elif " (Set " in line:
missing_fact = line.replace(" (Set ","")[5:-10]
else:
missing_fact = line[5:-10]
fact_name = missing_fact.replace("(","").replace(")","").split(" ")[0]
fact_args = missing_fact.replace("(","").replace(")","").split(" ")[1:]
fd_task.init.append(pddl.conditions.Atom(fact_name, fact_args))
log_file.close()
fdomain = open("aux_problem.pddl", "w")
fdomain.write(fdtask_to_pddl.format_problem(fd_task, fd_domain))
fdomain.close()
aux_problem_file = "aux_problem.pddl"
print errors
sys.exit(0)
| 2,908 | 39.402778 | 170 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/__init__.py
| 0 | 0 | 0 |
py
|
|
DAAISy
|
DAAISy-main/dependencies/fama/src/experimenter_new.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools, math, time
import pddl, pddl_parser
import fdtask_to_pddl
import numpy as np
import model_evaluator
import config
# **************************************#
# MAIN
# **************************************#
try:
# print("Command: ", sys.argv)
if "-s" in sys.argv:
check_static_predicates = "-s "
sys.argv.remove("-s")
else:
check_static_predicates = ""
if "-t" in sys.argv:
index = sys.argv.index("-t")
trace_prefix = sys.argv[index+1]
sys.argv.remove("-t")
sys.argv.remove(trace_prefix)
else:
trace_prefix = "trace"
if "-l" in sys.argv:
index = sys.argv.index("-l")
trace_min = int(sys.argv[index+1])
trace_max = int(sys.argv[index+2])
sys.argv.pop(index)
sys.argv.pop(index)
sys.argv.pop(index)
else:
trace_min = None
if "-m" in sys.argv:
index = sys.argv.index("-m")
model_file = sys.argv[index+1]
sys.argv.remove("-m")
sys.argv.remove(model_file)
else:
model_file = "learned_domain.pddl"
if "-c" in sys.argv:
index = sys.argv.index("-c")
cnt = int(sys.argv[index+1])
sys.argv.pop(index)
sys.argv.pop(index)
else:
cnt = 0
domain_folder_name = sys.argv[1]
action_observability = sys.argv[2]
state_observability = sys.argv[3]
except:
print "Usage:"
print sys.argv[0] + "[-s] <domain folder> <action observability (0-100)> <state observability (0-100)>"
print("Error")
sys.exit(-1)
try:
os.stat(config.OUTPUT_PATH+"/models")
except:
os.mkdir(config.OUTPUT_PATH+"/models")
LEARNED_DOMAIN_PATH = config.OUTPUT_PATH+"/learned_domain.pddl"
# outdir = "results/" + s + "-"+ d + "/"
# cmd = "mkdir " + outdir
# print("\n\nExecuting... " + cmd)
# os.system(cmd)
cmd = config.PROJECT_PATH + "/src/compiler_new.py " + check_static_predicates + domain_folder_name + " " + action_observability + " " + state_observability + " -ct " + str(cnt) + " -t " + trace_prefix + " -m " + model_file
if trace_min != None:
cmd += " -l {} {}".format(trace_min, trace_max)
# print("\n\nExecuting... " + cmd)
tic = time.time()
os.system(cmd)
toc = time.time()
processing_time = toc - tic
# try:
# print(LEARNED_DOMAIN_PATH)
# print(domain_folder_name+"domain.pddl")
domain_name, best_evaluation, best_matches = model_evaluator.evaluate(config.OUTPUT_PATH+"/models/"+model_file,
domain_folder_name+"/domain.pddl", True)
print(" , ".join(
[domain_name] + [str(cnt)] + [str(round(processing_time/cnt,2))]+ [str(round(e, 2)) for e in best_evaluation]) + ", {}".format(round(processing_time,2)))
# except:
# print("No solution found")
sys.exit(0)
| 2,866 | 27.107843 | 222 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/trace_generator.py
|
#! /usr/bin/env python
import sys, os, copy, glob, itertools, random
import pddl, pddl_parser
import fdtask_to_pddl, planning
import config
# Madagascar details
M_PATH = config.PLANNER_PATH
M_CALL = "/" + config.PLANNER_NAME
M_PARAMS = " -W " + config.PLANNER_PARAMS
# FD details
FD_PATH = "/home/slimbook/software/fd/"
FD_CALL = "/fast-downward.py --alias seq-sat-lama-2011 "
FD_PARAMS = ""
# LPG details
LPG_PATH = config.PROJECT_PATH + "util/lpg/"
LPG_CALL = "lpg-td-1.0 "
LPG_PARAMS = " -n 1 -v off -out sas_plan"
# **************************************#
# MAIN
# **************************************#
try:
domain_filename = sys.argv[1]
problem_filename = sys.argv[2]
planner = sys.argv[3]
except:
print "Usage:"
print sys.argv[0] + " <domain> <problem> <planner> "
sys.exit(-1)
# Creating a FD task with the domain and the problem file
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_filename)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_filename)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
# Running the planner
PLANNER_OUT = "aux_planner.log"
if planner == "FD":
cmd = "rm sas_plan*; ulimit -t 200;" + FD_PATH + FD_CALL + " " + domain_filename + " " + problem_filename + " " + FD_PARAMS + " > " + PLANNER_OUT
if planner == "M":
cmd = "rm sas_plan*; ulimit -t 200;" + M_PATH + M_CALL + " " + domain_filename + " " + problem_filename + " " + M_PARAMS + " > " + PLANNER_OUT
if planner == "LPG":
cmd = "rm sas_plan*; ulimit -t 20;" + LPG_PATH + LPG_CALL + " -o " + domain_filename + " -f " + problem_filename + " " + LPG_PARAMS + " > " + PLANNER_OUT
print("\n\nExecuting... " + cmd)
os.system(cmd)
try:
# Loading the plan
plan_files = glob.glob("sas_plan*")
plan_files.sort()
plan_filename = plan_files[-1]
plan = planning.Plan([])
plan.read_plan(plan_filename)
plan.write_plan(plan_filename)
except:
print("No plan generated")
sys.exit(-1)
# Generating the state trajectory
states = planning.VAL_computation_state_trajectory(domain_filename, problem_filename, plan_filename)
# Output the trace
counter = 1
for i in range(0, len(plan.actions)):
if (i == 0):
# HEAD
fdomain = open("observation-" + str(counter).zfill(2) + ".txt", "w")
index = 0
counter = counter + 1
fdomain.write("(solution \n")
fdomain.write("(:objects ")
str_out = ""
for o in sorted(set(fd_task.objects)):
str_out = str_out + str(o).replace(":", " - ") + " "
str_out = str_out + ")\n"
str_out = str_out + "(:init " + str(states[0]) + ")\n\n"
fdomain.write(str_out)
# BODY
fdomain.write("(:observations " + str(states[i]) + ")\n\n")
fdomain.write(str(plan.actions[i]) + "\n\n")
index = index + 1
if (i == len(plan.actions) - 1):
# TAIL
str_out = ""
states[-1].filter_literals_byName(["inext", "current"] + [str("applied-" + a.name) for a in fd_task.actions])
str_out = str_out + "(:goal " + str(states[-1]) + "))\n"
fdomain.write(str_out)
fdomain.close()
sys.exit(0)
| 3,182 | 31.814433 | 157 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/experimenter.py
|
#! /usr/bin/env python
import glob, os, sys, copy, itertools, math
import pddl, pddl_parser
import fdtask_to_pddl
#**************************************#
# MAIN
#**************************************#
try:
input_level = int(sys.argv[1])
except:
print "Usage:"
print sys.argv[0] + "<input level (0 plans, 1 steps, 2 len(plan), 3 minimum)>"
sys.exit(-1)
SOURCES = ["handpicked"]
domains= ["blocks", "gripper", "miconic", "visitall"]
str_out = ""
for s in SOURCES:
for d in domains:
outdir = "results/"+s+"-"+d+"/"
cmd = "mkdir " + outdir
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "./compiler.py ../benchmarks/"+s+"/"+d+"/ test plan "+str(input_level)+" > " + outdir + "compiler.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "./evaluator.py ../benchmarks/reference/"+d+"/domain.pddl learned_domain.pddl ../benchmarks/"+s+"/"+d+"/test-1.pddl > " + outdir + "evaluator.log"
print("\n\nExecuting... " + cmd)
os.system(cmd)
cmd = "mv learned_domain.pddl " + outdir
print("\n\nExecuting... " + cmd)
os.system(cmd)
# Results Summary
bnext=False
evafile = open (outdir + "evaluator.log","r")
for line in evafile:
if bnext==True:
str_out = str_out + s + "/" + d + " " + line
if "PrecAverage PrecVariance DelAverage DelVariance AddAverage AddVariance" in line:
bnext=True
evafile.close()
print
print str_out
sys.exit(0)
| 1,560 | 27.907407 | 158 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/f_expression.py
|
from __future__ import print_function
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
if value != int(value):
raise ValueError("Fractional numbers are not supported")
self.value = int(value)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
self.hash = hash((self.__class__, self.symbol, self.args))
def __hash__(self):
return self.hash
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.symbol == other.symbol
and self.args == other.args)
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
args = [var_mapping.get(arg, arg) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert self.symbol != "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
for fact in init_facts:
if isinstance(fact, FunctionAssignment):
if fact.fluent == pne:
return fact.expression
assert False, "Could not find instantiation for PNE!"
class FunctionAssignment(object):
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| 3,606 | 39.52809 | 87 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/pddl_types.py
|
# Renamed from types.py to avoid clash with stdlib module.
# In the future, use explicitly relative imports or absolute
# imports as a better solution.
import itertools
def _get_type_predicate_name(type_name):
# PDDL allows mixing types and predicates, but some PDDL files
# have name collisions between types and predicates. We want to
# support both the case where such name collisions occur and the
# case where types are used as predicates.
#
# We internally give types predicate names that cannot be confused
# with non-type predicates. When the input uses a PDDL type as a
# predicate, we automatically map it to this internal name.
return "type@%s" % type_name
class Type(object):
def __init__(self, name, basetype_name=None):
self.name = name
self.basetype_name = basetype_name
def __str__(self):
return self.name
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.basetype_name)
def get_predicate_name(self):
return _get_type_predicate_name(self.name)
class TypedObject(object):
def __init__(self, name, type_name):
self.name = name
self.type_name = type_name
def __hash__(self):
return hash((self.name, self.type_name))
def __eq__(self, other):
return self.name == other.name and self.type_name == other.type_name
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (self.name, self.type_name)
def __repr__(self):
return "<TypedObject %s: %s>" % (self.name, self.type_name)
def uniquify_name(self, type_map, renamings):
if self.name not in type_map:
type_map[self.name] = self.type_name
return self
for counter in itertools.count(1):
new_name = self.name + str(counter)
if new_name not in type_map:
renamings[self.name] = new_name
type_map[new_name] = self.type_name
return TypedObject(new_name, self.type_name)
def get_atom(self):
# TODO: Resolve cyclic import differently.
from . import conditions
predicate_name = _get_type_predicate_name(self.type_name)
return conditions.Atom(predicate_name, [self.name])
| 2,306 | 31.492958 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/effects.py
|
from __future__ import print_function
from . import conditions
def cartesian_product(*sequences):
# TODO: Also exists in tools.py outside the pddl package (defined slightly
# differently). Not good. Need proper import paths.
if not sequences:
yield ()
else:
for tup in cartesian_product(*sequences[1:]):
for item in sequences[0]:
yield (item,) + tup
class Effect(object):
def __init__(self, parameters, condition, literal):
self.parameters = parameters
self.condition = condition
self.literal = literal
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.condition == other.condition and
self.literal == other.literal)
def dump(self):
indent = " "
if self.parameters:
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
indent += " "
if self.condition != conditions.Truth():
print("%sif" % indent)
self.condition.dump(indent + " ")
print("%sthen" % indent)
indent += " "
print("%s%s" % (indent, self.literal))
def copy(self):
return Effect(self.parameters, self.condition, self.literal)
def uniquify_variables(self, type_map):
renamings = {}
self.parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
self.condition = self.condition.uniquify_variables(type_map, renamings)
self.literal = self.literal.rename_variables(renamings)
def instantiate(self, var_mapping, init_facts, fluent_facts,
objects_by_type, result):
if self.parameters:
var_mapping = var_mapping.copy() # Will modify this.
object_lists = [objects_by_type.get(par.type_name, [])
for par in self.parameters]
for object_tuple in cartesian_product(*object_lists):
for (par, obj) in zip(self.parameters, object_tuple):
var_mapping[par.name] = obj
self._instantiate(var_mapping, init_facts, fluent_facts, result)
else:
self._instantiate(var_mapping, init_facts, fluent_facts, result)
def _instantiate(self, var_mapping, init_facts, fluent_facts, result):
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return
effects = []
self.literal.instantiate(var_mapping, init_facts, fluent_facts, effects)
assert len(effects) <= 1
if effects:
result.append((condition, effects[0]))
def relaxed(self):
if self.literal.negated:
return None
else:
return Effect(self.parameters, self.condition.relaxed(), self.literal)
def simplified(self):
return Effect(self.parameters, self.condition.simplified(), self.literal)
class ConditionalEffect(object):
def __init__(self, condition, effect):
if isinstance(effect, ConditionalEffect):
self.condition = conditions.Conjunction([condition, effect.condition])
self.effect = effect.effect
else:
self.condition = condition
self.effect = effect
def dump(self, indent=" "):
print("%sif" % (indent))
self.condition.dump(indent + " ")
print("%sthen" % (indent))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)
new_effects.append(ConditionalEffect(self.condition, effect))
return ConjunctiveEffect(new_effects)
elif isinstance(norm_effect, UniversalEffect):
child = norm_effect.effect
cond_effect = ConditionalEffect(self.condition, child)
return UniversalEffect(norm_effect.parameters, cond_effect)
else:
return ConditionalEffect(self.condition, norm_effect)
def extract_cost(self):
return None, self
class UniversalEffect(object):
def __init__(self, parameters, effect):
if isinstance(effect, UniversalEffect):
self.parameters = parameters + effect.parameters
self.effect = effect.effect
else:
self.parameters = parameters
self.effect = effect
def dump(self, indent=" "):
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)\
or isinstance(effect, UniversalEffect)
new_effects.append(UniversalEffect(self.parameters, effect))
return ConjunctiveEffect(new_effects)
else:
return UniversalEffect(self.parameters, norm_effect)
def extract_cost(self):
return None, self
class ConjunctiveEffect(object):
def __init__(self, effects):
flattened_effects = []
for effect in effects:
if isinstance(effect, ConjunctiveEffect):
flattened_effects += effect.effects
else:
flattened_effects.append(effect)
self.effects = flattened_effects
def dump(self, indent=" "):
print("%sand" % (indent))
for eff in self.effects:
eff.dump(indent + " ")
def normalize(self):
new_effects = []
for effect in self.effects:
new_effects.append(effect.normalize())
return ConjunctiveEffect(new_effects)
def extract_cost(self):
new_effects = []
cost_effect = None
for effect in self.effects:
if isinstance(effect, CostEffect):
cost_effect = effect
else:
new_effects.append(effect)
return cost_effect, ConjunctiveEffect(new_effects)
class SimpleEffect(object):
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
return None, self
class CostEffect(object):
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
return self, None # this would only happen if
#an action has no effect apart from the cost effect
| 7,115 | 38.533333 | 97 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/functions.py
|
class Function(object):
def __init__(self, name, arguments, type_name):
self.name = name
self.arguments = arguments
if type_name != "number":
raise SystemExit("Error: object fluents not supported\n" +
"(function %s has type %s)" % (name, type_name))
self.type_name = type_name
def __str__(self):
result = "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
if self.type_name:
result += ": %s" % self.type_name
return result
| 550 | 35.733333 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/actions.py
|
from __future__ import print_function
import copy
from . import conditions
class Action(object):
def __init__(self, name, parameters, num_external_parameters,
precondition, effects, cost):
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
# num_external_parameters denotes how many of the parameters
# are "external", i.e., should be part of the grounded action
# name. Usually all parameters are external, but "invisible"
# parameters can be created when compiling away existential
# quantifiers in conditions.
self.num_external_parameters = num_external_parameters
self.precondition = precondition
self.effects = effects
self.cost = cost
self.uniquify_variables() # TODO: uniquify variables in cost?
def __repr__(self):
return "<Action %r at %#x>" % (self.name, id(self))
def dump(self):
print("%s(%s)" % (self.name, ", ".join(map(str, self.parameters))))
print("Precondition:")
self.precondition.dump()
print("Effects:")
for eff in self.effects:
eff.dump()
print("Cost:")
if(self.cost):
self.cost.dump()
else:
print(" None")
def uniquify_variables(self):
self.type_map = dict([(par.name, par.type_name)
for par in self.parameters])
self.precondition = self.precondition.uniquify_variables(self.type_map)
for effect in self.effects:
effect.uniquify_variables(self.type_map)
def relaxed(self):
new_effects = []
for eff in self.effects:
relaxed_eff = eff.relaxed()
if relaxed_eff:
new_effects.append(relaxed_eff)
return Action(self.name, self.parameters, self.num_external_parameters,
self.precondition.relaxed().simplified(),
new_effects)
def untyped(self):
# We do not actually remove the types from the parameter lists,
# just additionally incorporate them into the conditions.
# Maybe not very nice.
result = copy.copy(self)
parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
new_precondition = self.precondition.untyped()
result.precondition = conditions.Conjunction(parameter_atoms + [new_precondition])
result.effects = [eff.untyped() for eff in self.effects]
return result
def instantiate(self, var_mapping, init_facts, fluent_facts,
objects_by_type, metric):
"""Return a PropositionalAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
whilte instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s %s)" % (self.name, " ".join(arg_list))
precondition = []
try:
self.precondition.instantiate(var_mapping, init_facts,
fluent_facts, precondition)
except conditions.Impossible:
return None
effects = []
for eff in self.effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
objects_by_type, effects)
if effects:
if metric:
if self.cost is None:
cost = 0
else:
cost = int(self.cost.instantiate(var_mapping, init_facts).expression.value)
else:
cost = 1
return PropositionalAction(name, precondition, effects, cost)
else:
return None
class PropositionalAction:
def __init__(self, name, precondition, effects, cost):
self.name = name
self.precondition = precondition
self.add_effects = []
self.del_effects = []
for condition, effect in effects:
if not effect.negated:
self.add_effects.append((condition, effect))
# Warning: This is O(N^2), could be turned into O(N).
# But that might actually harm performance, since there are
# usually few effects.
# TODO: Measure this in critical domains, then use sets if acceptable.
for condition, effect in effects:
if effect.negated and (condition, effect.negate()) not in self.add_effects:
self.del_effects.append((condition, effect.negate()))
self.cost = cost
def __repr__(self):
return "<PropositionalAction %r at %#x>" % (self.name, id(self))
def dump(self):
print(self.name)
for fact in self.precondition:
print("PRE: %s" % fact)
for cond, fact in self.add_effects:
print("ADD: %s -> %s" % (", ".join(map(str, cond)), fact))
for cond, fact in self.del_effects:
print("DEL: %s -> %s" % (", ".join(map(str, cond)), fact))
print("cost:", self.cost)
| 5,451 | 39.385185 | 95 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/predicates.py
|
class Predicate(object):
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
def get_arity(self):
return len(self.arguments)
| 286 | 25.090909 | 74 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/__init__.py
|
from .pddl_types import Type
from .pddl_types import TypedObject
from .tasks import Task
from .tasks import Requirements
from .traces import Trace
from .predicates import Predicate
from .functions import Function
from .actions import Action
from .actions import PropositionalAction
from .axioms import Axiom
from .axioms import PropositionalAxiom
from .conditions import Literal
from .conditions import Atom
from .conditions import NegatedAtom
from .conditions import Falsity
from .conditions import Truth
from .conditions import Conjunction
from .conditions import Disjunction
from .conditions import UniversalCondition
from .conditions import ExistentialCondition
from .effects import ConditionalEffect
from .effects import ConjunctiveEffect
from .effects import CostEffect
from .effects import Effect
from .effects import SimpleEffect
from .effects import UniversalEffect
from .f_expression import Assign
from .f_expression import Increase
from .f_expression import NumericConstant
from .f_expression import PrimitiveNumericExpression
| 1,047 | 25.2 | 52 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/tasks.py
|
from __future__ import print_function
from . import axioms
from . import predicates
class Task(object):
def __init__(self, domain_name, task_name, requirements,
types, objects, predicates, functions, init, goal,
actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = "new-axiom@%d" % self.axiom_counter
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
class Requirements(object):
def __init__(self, requirements):
self.requirements = requirements
for req in requirements:
assert req in (
":strips", ":adl", ":typing", ":negation", ":equality",
":negative-preconditions", ":disjunctive-preconditions",
":existential-preconditions", ":universal-preconditions",
":quantified-preconditions", ":conditional-effects",
":derived-predicates", ":action-costs"), req
def __str__(self):
return ", ".join(self.requirements)
| 2,469 | 32.835616 | 74 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/traces.py
|
from __future__ import print_function
class Trace(object):
def __init__(self, objects, init, goal, actions, states):
self.objects = objects
self.init = init
self.goal = goal
self.actions = actions
self.states = states
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
| 1,109 | 28.210526 | 65 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/axioms.py
|
from __future__ import print_function
from . import conditions
class Axiom(object):
def __init__(self, name, parameters, num_external_parameters, condition):
# For an explanation of num_external_parameters, see the
# related Action class. Note that num_external_parameters
# always equals the arity of the derived predicate.
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
self.num_external_parameters = num_external_parameters
self.condition = condition
self.uniquify_variables()
def dump(self):
args = map(str, self.parameters[:self.num_external_parameters])
print("Axiom %s(%s)" % (self.name, ", ".join(args)))
self.condition.dump()
def uniquify_variables(self):
self.type_map = dict([(par.name, par.type_name)
for par in self.parameters])
self.condition = self.condition.uniquify_variables(self.type_map)
def instantiate(self, var_mapping, init_facts, fluent_facts):
# The comments for Action.instantiate apply accordingly.
arg_list = [self.name] + [var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s)" % " ".join(arg_list)
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return None
effect_args = [var_mapping.get(arg.name, arg.name)
for arg in self.parameters[:self.num_external_parameters]]
effect = conditions.Atom(self.name, effect_args)
return PropositionalAxiom(name, condition, effect)
class PropositionalAxiom:
def __init__(self, name, condition, effect):
self.name = name
self.condition = condition
self.effect = effect
def clone(self):
return PropositionalAxiom(self.name, list(self.condition), self.effect)
def dump(self):
if self.effect.negated:
print("not", end=' ')
print(self.name)
for fact in self.condition:
print("PRE: %s" % fact)
print("EFF: %s" % self.effect)
@property
def key(self):
return (self.name, self.condition, self.effect)
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return '<PropositionalAxiom %s %s -> %s>' % (
self.name, self.condition, self.effect)
| 2,689 | 33.050633 | 88 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl/conditions.py
|
from __future__ import print_function
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition(object):
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom().negate() for par in self.parameters]
return UniversalCondition(self.parameters,
[Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom() for par in self.parameters]
return ExistentialCondition(self.parameters,
[Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, result)
def has_existential_part(self):
return True
class Literal(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = []
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __ne__(self, other):
return not self == other
@property
def key(self):
return str(self.predicate), self.args
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def __repr__(self):
return '<%s>' % self
def _dump(self):
return str(self)
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
new_args = tuple(renamings.get(arg, arg) for arg in self.args)
return self.__class__(self.predicate, new_args)
def replace_argument(self, position, new_arg):
new_args = list(self.args)
new_args[position] = new_arg
return self.__class__(self.predicate, new_args)
def free_variables(self):
return set(arg for arg in self.args if arg[0] == "?")
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom not in init_facts:
raise Impossible()
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
| 11,108 | 36.657627 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl_parser/lisp_parser.py
|
__all__ = ["ParseError", "parse_nested_list"]
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
# Basic functions for parsing PDDL (Lisp) files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = next(tokens)
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_aux(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
for line in input:
line = line.split(";", 1)[0] # Strip comments.
try:
line.encode("ascii")
except UnicodeEncodeError:
raise ParseError("Non-ASCII character outside comment: %s" %
line[0:-1])
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
for token in line.split():
splitted_token = token.split("-")
if len(splitted_token)==2 and splitted_token[0]=="" and token != "-":
yield "-"
yield splitted_token[1].lower()
else:
yield token.lower()
# yield token.lower()
def parse_list_aux(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = next(tokenstream)
except StopIteration:
raise ParseError("Missing ')'")
if token == ")":
return
elif token == "(":
yield list(parse_list_aux(tokenstream))
else:
yield token
| 1,680 | 31.960784 | 81 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl_parser/pddl_file.py
|
try:
# Python 3.x
from builtins import open as file_open
except ImportError:
# Python 2.x
from codecs import open as file_open
from . import lisp_parser
from . import parsing_functions
def parse_pddl_file(type, filename):
try:
# The builtin open function is shadowed by this module's open function.
# We use the Latin-1 encoding (which allows a superset of ASCII, of the
# Latin-* encodings and of UTF-8) to allow special characters in
# comments. In all other parts, we later validate that only ASCII is
# used.
return lisp_parser.parse_nested_list(file_open(filename,
encoding='ISO-8859-1'))
except IOError as e:
raise SystemExit("Error: Could not read file: %s\nReason: %s." %
(e.filename, e))
except lisp_parser.ParseError as e:
raise SystemExit("Error: Could not parse %s file: %s\nReason: %s." %
(type, filename, e))
def open(domain_filename=None, task_filename=None):
task_filename = task_filename
domain_filename = domain_filename
domain_pddl = parse_pddl_file("domain", domain_filename)
task_pddl = parse_pddl_file("task", task_filename)
return parsing_functions.parse_task(domain_pddl, task_pddl)
| 1,330 | 35.972222 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl_parser/__init__.py
|
from .pddl_file import open
| 28 | 13.5 | 27 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl_parser/parsing_functions.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import copy
import dependencies.fama.src.graph as graph
import dependencies.fama.src.pddl as pddl
import random
import itertools
def parse_typed_list(alist, only_variables=False,
constructor=pddl.TypedObject,
default_type="object"):
aux = copy.deepcopy(alist)
alist = []
for item in aux:
if item.startswith("-") and not item == "-":
alist.append("-")
alist.append(item[1:])
else:
alist.append(item)
result = []
while alist:
try:
separator_position = alist.index("-")
pass
except ValueError:
items = alist
_type = default_type
alist = []
else:
items = alist[:separator_position]
_type = alist[separator_position + 1]
alist = alist[separator_position + 2:]
for item in items:
assert not only_variables or item.startswith("?"), \
"Expected item to be a variable: %s in (%s)" % (
item, " ".join(items))
entry = constructor(item, _type)
result.append(entry)
return result
def set_supertypes(type_list):
# TODO: This is a two-stage construction, which is perhaps
# not a great idea. Might need more thought in the future.
type_name_to_type = {}
child_types = []
for type in type_list:
type.supertype_names = []
type_name_to_type[type.name] = type
if type.basetype_name:
child_types.append((type.name, type.basetype_name))
for (desc_name, anc_name) in graph.transitive_closure(child_types):
type_name_to_type[desc_name].supertype_names.append(anc_name)
def parse_predicate(alist):
name = alist[0]
arguments = parse_typed_list(alist[1:], only_variables=True)
return pddl.Predicate(name, arguments)
def parse_function(alist, type_name):
name = alist[0]
arguments = parse_typed_list(alist[1:])
return pddl.Function(name, arguments, type_name)
def parse_condition(alist, type_dict, predicate_dict):
condition = parse_condition_aux(alist, False, type_dict, predicate_dict)
# TODO: The next line doesn't appear to do anything good,
# since uniquify_variables doesn't modify the condition in place.
# Conditions in actions or axioms are uniquified elsewhere, but
# it looks like goal conditions are never uniquified at all
# (which would be a bug).
condition.uniquify_variables({})
return condition
def parse_condition_aux(alist, negated, type_dict, predicate_dict):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = list()
for arg in alist[1:]:
if arg[0] == "=":
continue
if arg[0] == "not" and arg[1][0] == "=":
continue
args.append(arg)
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(
args[0], not negated, type_dict, predicate_dict)
elif tag in ("forall", "exists"):
parameters = parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
else:
return parse_literal(alist, type_dict, predicate_dict, negated=negated)
if tag == "imply":
parts = [parse_condition_aux(
args[0], not negated, type_dict, predicate_dict),
parse_condition_aux(
args[1], negated, type_dict, predicate_dict)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated, type_dict, predicate_dict) for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return pddl.Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return pddl.Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return pddl.UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return pddl.ExistentialCondition(parameters, parts)
def parse_literal(alist, type_dict, predicate_dict, negated=False):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
negated = not negated
pred_id, arity = _get_predicate_id_and_arity(
alist[0], type_dict, predicate_dict)
if arity != len(alist) - 1:
raise SystemExit("predicate used with wrong arity: (%s)"
% " ".join(alist))
if negated:
return pddl.NegatedAtom(pred_id, alist[1:])
else:
return pddl.Atom(pred_id, alist[1:])
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = False
def _get_predicate_id_and_arity(text, type_dict, predicate_dict):
global SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH
the_type = type_dict.get(text)
the_predicate = predicate_dict.get(text)
if the_type is None and the_predicate is None:
raise SystemExit("Undeclared predicate: %s" % text)
elif the_predicate is not None:
if the_type is not None and not SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH:
msg = ("Warning: name clash between type and predicate %r.\n"
"Interpreting as predicate in conditions.") % text
print(msg, file=sys.stderr)
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = True
return the_predicate.name, the_predicate.get_arity()
else:
assert the_type is not None
return the_type.get_predicate_name(), 1
def parse_effects(alist, result, type_dict, predicate_dict):
"""Parse a PDDL effect (any combination of simple, conjunctive, conditional, and universal)."""
tmp_effect = parse_effect(alist, type_dict, predicate_dict)
normalized = tmp_effect.normalize()
cost_eff, rest_effect = normalized.extract_cost()
add_effect(rest_effect, result)
if cost_eff:
return cost_eff.effect
else:
return None
def add_effect(tmp_effect, result):
"""tmp_effect has the following structure:
[ConjunctiveEffect] [UniversalEffect] [ConditionalEffect] SimpleEffect."""
if isinstance(tmp_effect, pddl.ConjunctiveEffect):
for effect in tmp_effect.effects:
add_effect(effect, result)
return
else:
parameters = []
condition = pddl.Truth()
if isinstance(tmp_effect, pddl.UniversalEffect):
parameters = tmp_effect.parameters
if isinstance(tmp_effect.effect, pddl.ConditionalEffect):
condition = tmp_effect.effect.condition
assert isinstance(tmp_effect.effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect.effect
else:
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
elif isinstance(tmp_effect, pddl.ConditionalEffect):
condition = tmp_effect.condition
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
else:
assert isinstance(tmp_effect, pddl.SimpleEffect)
effect = tmp_effect.effect
assert isinstance(effect, pddl.Literal)
# Check for contradictory effects
condition = condition.simplified()
new_effect = pddl.Effect(parameters, condition, effect)
contradiction = pddl.Effect(parameters, condition, effect.negate())
### REMOVED CONTRADICTION CHECK
result.append(new_effect)
# if not contradiction in result:
# result.append(new_effect)
# else:
# # We use add-after-delete semantics, keep positive effect
# if isinstance(contradiction.literal, pddl.NegatedAtom):
# result.remove(contradiction)
# result.append(new_effect)
def parse_effect(alist, type_dict, predicate_dict):
tag = alist[0]
if tag == "and":
return pddl.ConjunctiveEffect(
[parse_effect(eff, type_dict, predicate_dict) for eff in alist[1:]])
elif tag == "forall":
assert len(alist) == 3
parameters = parse_typed_list(alist[1])
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.UniversalEffect(parameters, effect)
elif tag == "when":
assert len(alist) == 3
condition = parse_condition(
alist[1], type_dict, predicate_dict)
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.ConditionalEffect(condition, effect)
elif tag == "increase":
assert len(alist) == 3
assert alist[1] == ['total-cost']
assignment = parse_assignment(alist)
return pddl.CostEffect(assignment)
else:
# We pass in {} instead of type_dict here because types must
# be static predicates, so cannot be the target of an effect.
return pddl.SimpleEffect(parse_literal(alist, {}, predicate_dict))
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return pddl.PrimitiveNumericExpression(functionsymbol, exp[1:])
elif exp.replace(".", "").isdigit():
return pddl.NumericConstant(float(exp))
elif exp[0] == "-":
raise ValueError("Negative numbers are not supported")
else:
return pddl.PrimitiveNumericExpression(exp, [])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return pddl.Assign(head, exp)
elif op == "increase":
return pddl.Increase(head, exp)
else:
assert False, "Assignment operator not supported."
def parse_action(alist, type_dict, predicate_dict):
iterator = iter(alist)
action_tag = next(iterator)
assert action_tag == ":action"
name = next(iterator)
parameters_tag_opt = next(iterator)
if parameters_tag_opt == ":parameters":
parameters = parse_typed_list(next(iterator),
only_variables=True)
precondition_tag_opt = next(iterator)
else:
parameters = []
precondition_tag_opt = parameters_tag_opt
if precondition_tag_opt == ":precondition":
precondition_list = next(iterator)
if not precondition_list:
# Note that :precondition () is allowed in PDDL.
precondition = pddl.Conjunction([])
else:
precondition = parse_condition(
precondition_list, type_dict, predicate_dict)
precondition = precondition.simplified()
effect_tag = next(iterator)
else:
precondition = pddl.Conjunction([])
effect_tag = precondition_tag_opt
assert effect_tag == ":effect"
effect_list = next(iterator)
eff = []
if effect_list:
try:
cost = parse_effects(
effect_list, eff, type_dict, predicate_dict)
except ValueError as e:
raise SystemExit("Error in Action %s\nReason: %s." % (name, e))
for rest in iterator:
assert False, rest
# if eff:
# return pddl.Action(name, parameters, len(parameters),
# precondition, eff, cost)
# else:
# return None
return pddl.Action(name, parameters, len(parameters),
precondition, eff, None)
def parse_axiom(alist, type_dict, predicate_dict):
assert len(alist) == 3
assert alist[0] == ":derived"
predicate = parse_predicate(alist[1])
condition = parse_condition(
alist[2], type_dict, predicate_dict)
return pddl.Axiom(predicate.name, predicate.arguments,
len(predicate.arguments), condition)
def parse_task(domain_pddl, task_pddl):
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= parse_domain_pddl(domain_pddl)
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric = parse_task_pddl(task_pddl, type_dict, predicate_dict)
assert domain_name == task_domain_name
requirements = pddl.Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
# init += [pddl.Atom("=", (obj.name, obj.name)) for obj in objects]
return pddl.Task(
domain_name, task_name, requirements, types, objects,
predicates, functions, init, goal, actions, axioms, use_metric)
def parse_domain_pddl(domain_pddl):
iterator = iter(domain_pddl)
define_tag = next(iterator)
assert define_tag == "define"
domain_line = next(iterator)
if domain_line[0] == "domain":
assert domain_line[0] == "domain" and len(domain_line) == 2
else:
domain_line = ["domain", "unknown"]
yield domain_line[1]
## We allow an arbitrary order of the requirement, types, constants,
## predicates and functions specification. The PDDL BNF is more strict on
## this, so we print a warning if it is violated.
requirements = pddl.Requirements([":strips"])
the_types = [pddl.Type("object")]
constants, the_predicates, the_functions = [], [], []
correct_order = [":requirements", ":types", ":constants", ":predicates",
":functions"]
seen_fields = []
first_action = None
for opt in iterator:
field = opt[0]
if field not in correct_order:
first_action = opt
break
if field in seen_fields:
raise SystemExit("Error in domain specification\n" +
"Reason: two '%s' specifications." % field)
if (seen_fields and
correct_order.index(seen_fields[-1]) > correct_order.index(field)):
msg = "\nWarning: %s specification not allowed here (cf. PDDL BNF)" % field
print(msg, file=sys.stderr)
seen_fields.append(field)
if field == ":requirements":
requirements = pddl.Requirements(opt[1:])
elif field == ":types":
the_types.extend(parse_typed_list(
opt[1:], constructor=pddl.Type))
elif field == ":constants":
constants = parse_typed_list(opt[1:])
elif field == ":predicates":
the_predicates = [parse_predicate(entry)
for entry in opt[1:]]
# the_predicates += [pddl.Predicate("=",
# [pddl.TypedObject("?x", "object"),
# pddl.TypedObject("?y", "object")])]
elif field == ":functions":
the_functions = parse_typed_list(
opt[1:],
constructor=parse_function,
default_type="number")
set_supertypes(the_types)
yield requirements
yield the_types
type_dict = dict((type.name, type) for type in the_types)
yield type_dict
yield constants
yield the_predicates
predicate_dict = dict((pred.name, pred) for pred in the_predicates)
yield predicate_dict
yield the_functions
entries = []
if first_action is not None:
entries.append(first_action)
entries.extend(iterator)
the_axioms = []
the_actions = []
for entry in entries:
if entry[0] == ":derived":
axiom = parse_axiom(entry, type_dict, predicate_dict)
the_axioms.append(axiom)
else:
action = parse_action(entry, type_dict, predicate_dict)
if action is not None:
the_actions.append(action)
yield the_actions
yield the_axioms
def parse_task_pddl(task_pddl, type_dict, predicate_dict):
iterator = iter(task_pddl)
define_tag = next(iterator)
assert define_tag == "define"
problem_line = next(iterator)
assert problem_line[0] == "problem" and len(problem_line) == 2
yield problem_line[1]
domain_line = next(iterator)
assert domain_line[0] == ":domain" and len(domain_line) == 2
yield domain_line[1]
requirements_opt = next(iterator)
if requirements_opt[0] == ":requirements":
requirements = requirements_opt[1:]
objects_opt = next(iterator)
else:
requirements = []
objects_opt = requirements_opt
yield pddl.Requirements(requirements)
if objects_opt[0] == ":objects":
yield parse_typed_list(objects_opt[1:])
init = next(iterator)
else:
yield []
init = objects_opt
assert init[0] == ":init"
initial = []
initial_true = set()
initial_false = set()
initial_assignments = dict()
for fact in init[1:]:
if fact[0] == "=":
try:
assignment = parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
pddl.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in initial_assignments:
prev = initial_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
initial_assignments[assignment.fluent] = assignment
initial.append(assignment)
elif fact[0] == "not":
atom = pddl.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, initial_false, initial_true, False)
initial_false.add(atom)
else:
atom = pddl.Atom(fact[0], fact[1:])
check_atom_consistency(atom, initial_true, initial_false)
initial_true.add(atom)
initial.extend(initial_true)
yield initial
goal = next(iterator)
assert goal[0] == ":goal" and len(goal) == 2
yield parse_condition(goal[1], type_dict, predicate_dict)
use_metric = False
for entry in iterator:
if entry[0] == ":metric":
if entry[1]=="minimize" and entry[2][0] == "total-cost":
use_metric = True
else:
assert False, "Unknown metric."
yield use_metric
for entry in iterator:
assert False, entry
def get_static_predicates(trajectory, predicates):
candidates = set([p.name for p in predicates])
trace_candidates = set()
for predicate in candidates:
static = True
init_literals = set([l for l in trajectory[0] if l.predicate == predicate])
for state in trajectory[1:]:
state_literals = set([l for l in state if l.predicate == predicate])
if init_literals != state_literals:
static = False
break
if static:
trace_candidates.add(predicate)
candidates = candidates.intersection(trace_candidates)
return candidates
def parse_trace_pddl(trace_pddl, predicates, action_observability=1, state_observability=1, goal_observability=1, positive_goals=False, finite_steps=False):
random.seed(123)
iterator = iter(trace_pddl)
solution_tag = next(iterator)
assert solution_tag == "solution"
objects_opt = next(iterator)
assert objects_opt[0] == ":objects"
object_list = parse_typed_list(objects_opt[1:])
all_literals = set()
for predicate in predicates:
args = list()
for i in range(len(predicate.arguments)):
iargs = list()
for object in object_list:
if object.type_name == predicate.arguments[i].type_name:
iargs.append(object.name)
args.append(iargs)
for tup in itertools.product(*args):
all_literals.add(pddl.Atom(predicate.name, tup))
init = next(iterator)
assert init[0] == ":init"
initial = parse_state(init[1:], all_literals)
actions = list()
states = list()
for token in iterator:
if token[0] == ':observations':
aux_state = parse_state(token[1:], all_literals)
# new_state = [literal for literal in aux_state if random.random() <= state_observability]
# if len(new_state) == 0 and finite_steps:
# new_state = [aux_state[random.randint(0, len(aux_state))]]
# states.append(new_state)
states.append(aux_state)
elif token[0] == ':goal':
goal = parse_state(token[1:], all_literals)
# aux_goal = parse_state(token[1:], all_literals)
# if positive_goals:
# aux_goal = [literal for literal in aux_goal if not literal.negated]
#
# goal = [literal for literal in aux_goal if random.random() <= goal_observability]
# if len(goal) == 0:
# goal = [aux_goal[random.randint(0, len(aux_goal))]]
else:
if random.random() <= action_observability:
actions.append(token)
else:
actions.append([])
states = states[1:]
if positive_goals:
static_predicates = get_static_predicates(states + [goal], predicates)
# Apply observability
for i in range(len(states)):
state = states[i]
new_state = [literal for literal in state if random.random() <= state_observability]
if len(new_state) == 0 and finite_steps:
new_state = [state[random.randint(0, len(state))]]
states[i] = new_state
if positive_goals:
aux_goal = [literal for literal in goal if not literal.negated and not literal.predicate in static_predicates]
else:
aux_goal = [literal for literal in goal]
goal = [literal for literal in aux_goal if random.random() <= goal_observability]
if len(goal) == 0:
goal = [aux_goal[random.randint(0, len(aux_goal))]]
states = states + [goal]
return pddl.Trace(object_list, initial, goal, actions, states)
def parse_state(new_state, all_literals):
state = []
state_true = set()
state_false = set()
state_assignments = dict()
for fact in new_state:
if fact[0] == "=":
try:
assignment = parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
pddl.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in state_assignments:
prev = state_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
state_assignments[assignment.fluent] = assignment
state.append(assignment)
elif fact[0] == "not":
atom = pddl.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, state_false, state_true, False)
state_false.add(atom)
else:
atom = pddl.Atom(fact[0], fact[1:])
check_atom_consistency(atom, state_true, state_false)
state_true.add(atom)
state.extend(state_true)
for atom in all_literals.difference(state_true):
state.append(pddl.NegatedAtom(atom.predicate, atom.args))
return sorted(state)
def check_atom_consistency(atom, same_truth_value, other_truth_value, atom_is_true=True):
if atom in other_truth_value:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s is true and false." % atom)
if atom in same_truth_value:
if not atom_is_true:
atom = atom.negate()
print("Warning: %s is specified twice in initial state specification" % atom)
def check_for_duplicates(elements, errmsg, finalmsg):
seen = set()
errors = []
for element in elements:
if element in seen:
errors.append(errmsg % element)
else:
seen.add(element)
if errors:
raise SystemExit("\n".join(errors) + "\n" + finalmsg)
| 25,474 | 36.027616 | 156 |
py
|
DAAISy
|
DAAISy-main/dependencies/fama/src/pddl_parser/pretty_print.py
|
from __future__ import print_function
import io
import textwrap
__all__ = ["print_nested_list"]
def tokenize_list(obj):
if isinstance(obj, list):
yield "("
for item in obj:
for elem in tokenize_list(item):
yield elem
yield ")"
else:
yield obj
def wrap_lines(lines):
for line in lines:
indent = " " * (len(line) - len(line.lstrip()) + 4)
line = line.replace("-", "_") # textwrap breaks on "-", but not "_"
line = textwrap.fill(line, subsequent_indent=indent, break_long_words=False)
yield line.replace("_", "-")
def print_nested_list(nested_list):
stream = io.StringIO()
indent = 0
startofline = True
pendingspace = False
for token in tokenize_list(nested_list):
if token == "(":
if not startofline:
stream.write("\n")
stream.write("%s(" % (" " * indent))
indent += 2
startofline = False
pendingspace = False
elif token == ")":
indent -= 2
stream.write(")")
startofline = False
pendingspace = False
else:
if startofline:
stream.write(" " * indent)
if pendingspace:
stream.write(" ")
stream.write(token)
startofline = False
pendingspace = True
for line in wrap_lines(stream.getvalue().splitlines()):
print(line)
| 1,492 | 26.648148 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/fast-downward.py
|
#! /usr/bin/env python3
if __name__ == "__main__":
from driver.main import main
main()
| 96 | 15.166667 | 32 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/build_configs.py
|
release = ["-DCMAKE_BUILD_TYPE=Release"]
debug = ["-DCMAKE_BUILD_TYPE=Debug"]
releasenolp = ["-DCMAKE_BUILD_TYPE=Release", "-DUSE_LP=NO"]
debugnolp = ["-DCMAKE_BUILD_TYPE=Debug", "-DUSE_LP=NO"]
minimal = ["-DCMAKE_BUILD_TYPE=Release", "-DDISABLE_PLUGINS_BY_DEFAULT=YES"]
DEFAULT = "release"
DEBUG = "debug"
| 308 | 33.333333 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/build.py
|
#!/usr/bin/env python3
import errno
import glob
import multiprocessing
import os
import subprocess
import sys
CONFIGS = {}
script_dir = os.path.dirname(__file__)
for config_file in sorted(glob.glob(os.path.join(script_dir, "*build_configs.py"))):
with open(config_file) as f:
config_file_content = f.read()
exec(config_file_content, globals(), CONFIGS)
DEFAULT_CONFIG_NAME = CONFIGS.pop("DEFAULT")
DEBUG_CONFIG_NAME = CONFIGS.pop("DEBUG")
CMAKE = "cmake"
DEFAULT_MAKE_PARAMETERS = []
if os.name == "posix":
MAKE = "make"
try:
num_cpus = multiprocessing.cpu_count()
except NotImplementedError:
pass
else:
DEFAULT_MAKE_PARAMETERS.append('-j{}'.format(num_cpus))
CMAKE_GENERATOR = "Unix Makefiles"
elif os.name == "nt":
MAKE = "nmake"
CMAKE_GENERATOR = "NMake Makefiles"
else:
print("Unsupported OS: " + os.name)
sys.exit(1)
def print_usage():
script_name = os.path.basename(__file__)
configs = []
for name, args in sorted(CONFIGS.items()):
if name == DEFAULT_CONFIG_NAME:
name += " (default)"
if name == DEBUG_CONFIG_NAME:
name += " (default with --debug)"
configs.append(name + "\n " + " ".join(args))
configs_string = "\n ".join(configs)
cmake_name = os.path.basename(CMAKE)
make_name = os.path.basename(MAKE)
generator_name = CMAKE_GENERATOR.lower()
default_config_name = DEFAULT_CONFIG_NAME
debug_config_name = DEBUG_CONFIG_NAME
print("""Usage: {script_name} [BUILD [BUILD ...]] [--all] [--debug] [MAKE_OPTIONS]
Build one or more predefined build configurations of Fast Downward. Each build
uses {cmake_name} to generate {generator_name} and then uses {make_name} to compile the
code. Build configurations differ in the parameters they pass to {cmake_name}.
By default, the build uses N threads on a machine with N cores if the number of
cores can be determined. Use the "-j" option for {cmake_name} to override this default
behaviour.
Build configurations
{configs_string}
--all Alias to build all build configurations.
--debug Alias to build the default debug build configuration.
--help Print this message and exit.
Make options
All other parameters are forwarded to {make_name}.
Example usage:
./{script_name} # build {default_config_name} in #cores threads
./{script_name} -j4 # build {default_config_name} in 4 threads
./{script_name} debug # build debug
./{script_name} --debug # build {debug_config_name}
./{script_name} release debug # build release and debug configs
./{script_name} --all VERBOSE=true # build all build configs with detailed logs
""".format(**locals()))
def get_project_root_path():
import __main__
return os.path.dirname(__main__.__file__)
def get_builds_path():
return os.path.join(get_project_root_path(), "builds")
def get_src_path():
return os.path.join(get_project_root_path(), "src")
def get_build_path(config_name):
return os.path.join(get_builds_path(), config_name)
def try_run(cmd, cwd):
print('Executing command "{}" in directory "{}".'.format(" ".join(cmd), cwd))
try:
subprocess.check_call(cmd, cwd=cwd)
except OSError as exc:
if exc.errno == errno.ENOENT:
print("Could not find '%s' on your PATH. For installation instructions, "
"see http://www.fast-downward.org/ObtainingAndRunningFastDownward." %
cmd[0])
sys.exit(1)
else:
raise
def build(config_name, cmake_parameters, make_parameters):
print("Building configuration {config_name}.".format(**locals()))
build_path = get_build_path(config_name)
rel_src_path = os.path.relpath(get_src_path(), build_path)
try:
os.makedirs(build_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(build_path):
pass
else:
raise
try_run([CMAKE, "-G", CMAKE_GENERATOR] + cmake_parameters + [rel_src_path],
cwd=build_path)
try_run([MAKE] + make_parameters, cwd=build_path)
print("Built configuration {config_name} successfully.".format(**locals()))
def main():
config_names = set()
make_parameters = DEFAULT_MAKE_PARAMETERS
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h":
print_usage()
sys.exit(0)
elif arg == "--debug":
config_names.add(DEBUG_CONFIG_NAME)
elif arg == "--all":
config_names |= set(CONFIGS.keys())
elif arg in CONFIGS:
config_names.add(arg)
else:
make_parameters.append(arg)
if not config_names:
config_names.add(DEFAULT_CONFIG_NAME)
for config_name in config_names:
build(config_name, CONFIGS[config_name], make_parameters)
if __name__ == "__main__":
main()
| 4,963 | 31.233766 | 87 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/pddl_to_prolog.py
|
#! /usr/bin/env python3
import itertools
import normalize
import pddl
import timers
class PrologProgram:
def __init__(self):
self.facts = []
self.rules = []
self.objects = set()
def predicate_name_generator():
for count in itertools.count():
yield "p$%d" % count
self.new_name = predicate_name_generator()
def add_fact(self, atom):
self.facts.append(Fact(atom))
self.objects |= set(atom.args)
def add_rule(self, rule):
self.rules.append(rule)
def dump(self, file=None):
for fact in self.facts:
print(fact, file=file)
for rule in self.rules:
print(getattr(rule, "type", "none"), rule, file=file)
def normalize(self):
# Normalized prolog programs have the following properties:
# 1. Each variable that occurs in the effect of a rule also occurs in its
# condition.
# 2. The variables that appear in each effect or condition are distinct.
# 3. There are no rules with empty condition.
self.remove_free_effect_variables()
self.split_duplicate_arguments()
self.convert_trivial_rules()
def split_rules(self):
import split_rules
# Splits rules whose conditions can be partitioned in such a way that
# the parts have disjoint variable sets, then split n-ary joins into
# a number of binary joins, introducing new pseudo-predicates for the
# intermediate values.
new_rules = []
for rule in self.rules:
new_rules += split_rules.split_rule(rule, self.new_name)
self.rules = new_rules
def remove_free_effect_variables(self):
"""Remove free effect variables like the variable Y in the rule
p(X, Y) :- q(X). This is done by introducing a new predicate
@object, setting it true for all objects, and translating the above
rule to p(X, Y) :- q(X), @object(Y).
After calling this, no new objects should be introduced!"""
# Note: This should never be necessary for typed domains.
# Leaving it in at the moment regardless.
must_add_predicate = False
for rule in self.rules:
eff_vars = get_variables([rule.effect])
cond_vars = get_variables(rule.conditions)
if not eff_vars.issubset(cond_vars):
must_add_predicate = True
eff_vars -= cond_vars
for var in sorted(eff_vars):
rule.add_condition(pddl.Atom("@object", [var]))
if must_add_predicate:
print("Unbound effect variables: Adding @object predicate.")
self.facts += [Fact(pddl.Atom("@object", [obj])) for obj in self.objects]
def split_duplicate_arguments(self):
"""Make sure that no variable occurs twice within the same symbolic fact,
like the variable X does in p(X, Y, X). This is done by renaming the second
and following occurrences of the variable and adding equality conditions.
For example p(X, Y, X) is translated to p(X, Y, X@0) with the additional
condition =(X, X@0); the equality predicate must be appropriately instantiated
somewhere else."""
printed_message = False
for rule in self.rules:
if rule.rename_duplicate_variables() and not printed_message:
print("Duplicate arguments: Adding equality conditions.")
printed_message = True
def convert_trivial_rules(self):
"""Convert rules with an empty condition into facts.
This must be called after bounding rule effects, so that rules with an
empty condition must necessarily have a variable-free effect.
Variable-free effects are the only ones for which a distinction between
ground and symbolic atoms is not necessary."""
must_delete_rules = []
for i, rule in enumerate(self.rules):
if not rule.conditions:
assert not get_variables([rule.effect])
self.add_fact(pddl.Atom(rule.effect.predicate, rule.effect.args))
must_delete_rules.append(i)
if must_delete_rules:
print("Trivial rules: Converted to facts.")
for rule_no in must_delete_rules[::-1]:
del self.rules[rule_no]
def get_variables(symbolic_atoms):
variables = set()
for sym_atom in symbolic_atoms:
variables |= {arg for arg in sym_atom.args if arg[0] == "?"}
return variables
class Fact:
def __init__(self, atom):
self.atom = atom
def __str__(self):
return "%s." % self.atom
class Rule:
def __init__(self, conditions, effect):
self.conditions = conditions
self.effect = effect
def add_condition(self, condition):
self.conditions.append(condition)
def get_variables(self):
return get_variables(self.conditions + [self.effect])
def _rename_duplicate_variables(self, atom, new_conditions):
used_variables = set()
for i, var_name in enumerate(atom.args):
if var_name[0] == "?":
if var_name in used_variables:
new_var_name = "%s@%d" % (var_name, len(new_conditions))
atom = atom.replace_argument(i, new_var_name)
new_conditions.append(pddl.Atom("=", [var_name, new_var_name]))
else:
used_variables.add(var_name)
return atom
def rename_duplicate_variables(self):
extra_conditions = []
self.effect = self._rename_duplicate_variables(
self.effect, extra_conditions)
old_conditions = self.conditions
self.conditions = []
for condition in old_conditions:
self.conditions.append(self._rename_duplicate_variables(
condition, extra_conditions))
self.conditions += extra_conditions
return bool(extra_conditions)
def __str__(self):
cond_str = ", ".join(map(str, self.conditions))
return "%s :- %s." % (self.effect, cond_str)
def translate_typed_object(prog, obj, type_dict):
supertypes = type_dict[obj.type_name].supertype_names
for type_name in [obj.type_name] + supertypes:
prog.add_fact(pddl.TypedObject(obj.name, type_name).get_atom())
def translate_facts(prog, task):
type_dict = {type.name: type for type in task.types}
for obj in task.objects:
translate_typed_object(prog, obj, type_dict)
for fact in task.init:
assert isinstance(fact, pddl.Atom) or isinstance(fact, pddl.Assign)
if isinstance(fact, pddl.Atom):
prog.add_fact(fact)
def translate(task):
# Note: The function requires that the task has been normalized.
with timers.timing("Generating Datalog program"):
prog = PrologProgram()
translate_facts(prog, task)
for conditions, effect in normalize.build_exploration_rules(task):
prog.add_rule(Rule(conditions, effect))
with timers.timing("Normalizing Datalog program", block=True):
# Using block=True because normalization can output some messages
# in rare cases.
prog.normalize()
prog.split_rules()
return prog
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
normalize.normalize(task)
prog = translate(task)
prog.dump()
| 7,436 | 40.316667 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/build_model.py
|
#! /usr/bin/env python3
import sys
import itertools
import pddl
import timers
from functools import reduce
def convert_rules(prog):
RULE_TYPES = {
"join": JoinRule,
"product": ProductRule,
"project": ProjectRule,
}
result = []
for rule in prog.rules:
RuleType = RULE_TYPES[rule.type]
new_effect, new_conditions = variables_to_numbers(
rule.effect, rule.conditions)
rule = RuleType(new_effect, new_conditions)
rule.validate()
result.append(rule)
return result
def variables_to_numbers(effect, conditions):
new_effect_args = list(effect.args)
rename_map = {}
for i, arg in enumerate(effect.args):
if arg[0] == "?":
rename_map[arg] = i
new_effect_args[i] = i
new_effect = pddl.Atom(effect.predicate, new_effect_args)
# There are three possibilities for arguments in conditions:
# 1. They are variables that occur in the effect. In that case,
# they are replaced by the corresponding position in the
# effect, as indicated by the rename_map.
# 2. They are constants. In that case, the unifier must guarantee
# that they are matched appropriately. In that case, they are
# not modified (remain strings denoting objects).
# 3. They are variables that don't occur in the effect (are
# projected away). This is only allowed in projection rules.
# Such arguments are also not modified (remain "?x" strings).
new_conditions = []
for cond in conditions:
new_cond_args = [rename_map.get(arg, arg) for arg in cond.args]
new_conditions.append(pddl.Atom(cond.predicate, new_cond_args))
return new_effect, new_conditions
class BuildRule:
def prepare_effect(self, new_atom, cond_index):
effect_args = list(self.effect.args)
cond = self.conditions[cond_index]
for var_no, obj in zip(cond.args, new_atom.args):
if isinstance(var_no, int):
effect_args[var_no] = obj
return effect_args
def __str__(self):
return "%s :- %s" % (self.effect, ", ".join(map(str, self.conditions)))
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self)
class JoinRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
left_args = conditions[0].args
right_args = conditions[1].args
left_vars = {var for var in left_args if isinstance(var, int)}
right_vars = {var for var in right_args if isinstance(var, int)}
common_vars = sorted(left_vars & right_vars)
self.common_var_positions = [
[args.index(var) for var in common_vars]
for args in (list(left_args), list(right_args))]
self.atoms_by_key = ({}, {})
def validate(self):
assert len(self.conditions) == 2, self
left_args = self.conditions[0].args
right_args = self.conditions[1].args
eff_args = self.effect.args
left_vars = {v for v in left_args
if isinstance(v, int) or v[0] == "?"}
right_vars = {v for v in right_args
if isinstance(v, int) or v[0] == "?"}
eff_vars = {v for v in eff_args
if isinstance(v, int) or v[0] == "?"}
assert left_vars & right_vars, self
assert (left_vars | right_vars) == (left_vars & right_vars) | eff_vars, self
def update_index(self, new_atom, cond_index):
ordered_common_args = [
new_atom.args[position]
for position in self.common_var_positions[cond_index]]
key = tuple(ordered_common_args)
self.atoms_by_key[cond_index].setdefault(key, []).append(new_atom)
def fire(self, new_atom, cond_index, enqueue_func):
effect_args = self.prepare_effect(new_atom, cond_index)
ordered_common_args = [
new_atom.args[position]
for position in self.common_var_positions[cond_index]]
key = tuple(ordered_common_args)
other_cond_index = 1 - cond_index
other_cond = self.conditions[other_cond_index]
for atom in self.atoms_by_key[other_cond_index].get(key, []):
for var_no, obj in zip(other_cond.args, atom.args):
if isinstance(var_no, int):
effect_args[var_no] = obj
enqueue_func(self.effect.predicate, effect_args)
class ProductRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
self.atoms_by_index = [[] for c in self.conditions]
self.empty_atom_list_no = len(self.conditions)
def validate(self):
assert len(self.conditions) >= 2, self
cond_vars = [{v for v in cond.args
if isinstance(v, int) or v[0] == "?"}
for cond in self.conditions]
all_cond_vars = reduce(set.union, cond_vars)
eff_vars = {v for v in self.effect.args
if isinstance(v, int) or v[0] == "?"}
assert len(all_cond_vars) == len(eff_vars), self
assert len(all_cond_vars) == sum([len(c) for c in cond_vars])
def update_index(self, new_atom, cond_index):
atom_list = self.atoms_by_index[cond_index]
if not atom_list:
self.empty_atom_list_no -= 1
atom_list.append(new_atom)
def _get_bindings(self, atom, cond):
return [(var_no, obj) for var_no, obj in zip(cond.args, atom.args)
if isinstance(var_no, int)]
def fire(self, new_atom, cond_index, enqueue_func):
if self.empty_atom_list_no:
return
# Binding: a (var_no, object) pair
# Bindings: List-of(Binding)
# BindingsFactor: List-of(Bindings)
# BindingsFactors: List-of(BindingsFactor)
bindings_factors = []
for pos, cond in enumerate(self.conditions):
if pos == cond_index:
continue
atoms = self.atoms_by_index[pos]
assert atoms, "if we have no atoms, this should never be called"
factor = [self._get_bindings(atom, cond) for atom in atoms]
bindings_factors.append(factor)
eff_args = self.prepare_effect(new_atom, cond_index)
for bindings_list in itertools.product(*bindings_factors):
bindings = itertools.chain(*bindings_list)
for var_no, obj in bindings:
eff_args[var_no] = obj
enqueue_func(self.effect.predicate, eff_args)
class ProjectRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
def validate(self):
assert len(self.conditions) == 1
def update_index(self, new_atom, cond_index):
pass
def fire(self, new_atom, cond_index, enqueue_func):
effect_args = self.prepare_effect(new_atom, cond_index)
enqueue_func(self.effect.predicate, effect_args)
class Unifier:
def __init__(self, rules):
self.predicate_to_rule_generator = {}
for rule in rules:
for i, cond in enumerate(rule.conditions):
self._insert_condition(rule, i)
def unify(self, atom):
result = []
generator = self.predicate_to_rule_generator.get(atom.predicate)
if generator:
generator.generate(atom, result)
return result
def _insert_condition(self, rule, cond_index):
condition = rule.conditions[cond_index]
root = self.predicate_to_rule_generator.get(condition.predicate)
if not root:
root = LeafGenerator()
constant_arguments = [
(arg_index, arg)
for (arg_index, arg) in enumerate(condition.args)
if not isinstance(arg, int) and arg[0] != "?"]
newroot = root._insert(constant_arguments, (rule, cond_index))
self.predicate_to_rule_generator[condition.predicate] = newroot
def dump(self):
predicates = sorted(self.predicate_to_rule_generator)
print("Unifier:")
for pred in predicates:
print(" %s:" % pred)
rule_gen = self.predicate_to_rule_generator[pred]
rule_gen.dump(" " * 2)
class LeafGenerator:
index = sys.maxsize
def __init__(self):
self.matches = []
def empty(self):
return not self.matches
def generate(self, atom, result):
result += self.matches
def _insert(self, args, value):
if not args:
self.matches.append(value)
return self
else:
root = LeafGenerator()
root.matches.append(value)
for arg_index, arg in args[::-1]:
new_root = MatchGenerator(arg_index, LeafGenerator())
new_root.match_generator[arg] = root
root = new_root
root.matches = self.matches # can be swapped in C++
return root
def dump(self, indent):
for match in self.matches:
print("%s%s" % (indent, match))
class MatchGenerator:
def __init__(self, index, next):
self.index = index
self.matches = []
self.match_generator = {}
self.next = next
def empty(self):
return False
def generate(self, atom, result):
result += self.matches
generator = self.match_generator.get(atom.args[self.index])
if generator:
generator.generate(atom, result)
self.next.generate(atom, result)
def _insert(self, args, value):
if not args:
self.matches.append(value)
return self
else:
arg_index, arg = args[0]
if self.index < arg_index:
self.next = self.next._insert(args, value)
return self
elif self.index > arg_index:
new_parent = MatchGenerator(arg_index, self)
new_branch = LeafGenerator()._insert(args[1:], value)
new_parent.match_generator[arg] = new_branch
return new_parent
else:
branch_generator = self.match_generator.get(arg)
if not branch_generator:
branch_generator = LeafGenerator()
self.match_generator[arg] = branch_generator._insert(
args[1:], value)
return self
def dump(self, indent):
for match in self.matches:
print("%s%s" % (indent, match))
for key in sorted(self.match_generator.keys()):
print("%sargs[%s] == %s:" % (indent, self.index, key))
self.match_generator[key].dump(indent + " ")
if not self.next.empty():
assert isinstance(self.next, MatchGenerator)
print("%s[*]" % indent)
self.next.dump(indent + " ")
class Queue:
def __init__(self, atoms):
self.queue = atoms
self.queue_pos = 0
self.enqueued = {(atom.predicate,) + tuple(atom.args)
for atom in self.queue}
self.num_pushes = len(atoms)
def __bool__(self):
return self.queue_pos < len(self.queue)
__nonzero__ = __bool__
def push(self, predicate, args):
self.num_pushes += 1
eff_tuple = (predicate,) + tuple(args)
if eff_tuple not in self.enqueued:
self.enqueued.add(eff_tuple)
self.queue.append(pddl.Atom(predicate, list(args)))
def pop(self):
result = self.queue[self.queue_pos]
self.queue_pos += 1
return result
def compute_model(prog):
with timers.timing("Preparing model"):
rules = convert_rules(prog)
unifier = Unifier(rules)
# unifier.dump()
fact_atoms = sorted(fact.atom for fact in prog.facts)
queue = Queue(fact_atoms)
print("Generated %d rules." % len(rules))
with timers.timing("Computing model"):
relevant_atoms = 0
auxiliary_atoms = 0
while queue:
next_atom = queue.pop()
pred = next_atom.predicate
if isinstance(pred, str) and "$" in pred:
auxiliary_atoms += 1
else:
relevant_atoms += 1
matches = unifier.unify(next_atom)
for rule, cond_index in matches:
rule.update_index(next_atom, cond_index)
rule.fire(next_atom, cond_index, queue.push)
print("%d relevant atoms" % relevant_atoms)
print("%d auxiliary atoms" % auxiliary_atoms)
print("%d final queue length" % len(queue.queue))
print("%d total queue pushes" % queue.num_pushes)
return queue.queue
if __name__ == "__main__":
import pddl_parser
import normalize
import pddl_to_prolog
print("Parsing...")
task = pddl_parser.open()
print("Normalizing...")
normalize.normalize(task)
print("Writing rules...")
prog = pddl_to_prolog.translate(task)
model = compute_model(prog)
for atom in model:
print(atom)
print("%d atoms" % len(model))
| 13,114 | 37.125 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/invariant_finder.py
|
#! /usr/bin/env python3
from collections import deque, defaultdict
import itertools
import time
import invariants
import options
import pddl
import timers
class BalanceChecker:
def __init__(self, task, reachable_action_params):
self.predicates_to_add_actions = defaultdict(set)
self.action_to_heavy_action = {}
for act in task.actions:
action = self.add_inequality_preconds(act, reachable_action_params)
too_heavy_effects = []
create_heavy_act = False
heavy_act = action
for eff in action.effects:
too_heavy_effects.append(eff)
if eff.parameters: # universal effect
create_heavy_act = True
too_heavy_effects.append(eff.copy())
if not eff.literal.negated:
predicate = eff.literal.predicate
self.predicates_to_add_actions[predicate].add(action)
if create_heavy_act:
heavy_act = pddl.Action(action.name, action.parameters,
action.num_external_parameters,
action.precondition, too_heavy_effects,
action.cost)
# heavy_act: duplicated universal effects and assigned unique names
# to all quantified variables (implicitly in constructor)
self.action_to_heavy_action[action] = heavy_act
def get_threats(self, predicate):
return self.predicates_to_add_actions.get(predicate, set())
def get_heavy_action(self, action):
return self.action_to_heavy_action[action]
def add_inequality_preconds(self, action, reachable_action_params):
if reachable_action_params is None or len(action.parameters) < 2:
return action
inequal_params = []
combs = itertools.combinations(range(len(action.parameters)), 2)
for pos1, pos2 in combs:
for params in reachable_action_params[action]:
if params[pos1] == params[pos2]:
break
else:
inequal_params.append((pos1, pos2))
if inequal_params:
precond_parts = [action.precondition]
for pos1, pos2 in inequal_params:
param1 = action.parameters[pos1].name
param2 = action.parameters[pos2].name
new_cond = pddl.NegatedAtom("=", (param1, param2))
precond_parts.append(new_cond)
precond = pddl.Conjunction(precond_parts).simplified()
return pddl.Action(
action.name, action.parameters, action.num_external_parameters,
precond, action.effects, action.cost)
else:
return action
def get_fluents(task):
fluent_names = set()
for action in task.actions:
for eff in action.effects:
fluent_names.add(eff.literal.predicate)
return [pred for pred in task.predicates if pred.name in fluent_names]
def get_initial_invariants(task):
for predicate in get_fluents(task):
all_args = list(range(len(predicate.arguments)))
for omitted_arg in [-1] + all_args:
order = [i for i in all_args if i != omitted_arg]
part = invariants.InvariantPart(predicate.name, order, omitted_arg)
yield invariants.Invariant((part,))
def find_invariants(task, reachable_action_params):
limit = options.invariant_generation_max_candidates
candidates = deque(itertools.islice(get_initial_invariants(task), 0, limit))
print(len(candidates), "initial candidates")
seen_candidates = set(candidates)
balance_checker = BalanceChecker(task, reachable_action_params)
def enqueue_func(invariant):
if len(seen_candidates) < limit and invariant not in seen_candidates:
candidates.append(invariant)
seen_candidates.add(invariant)
start_time = time.process_time()
while candidates:
candidate = candidates.popleft()
if time.process_time() - start_time > options.invariant_generation_max_time:
print("Time limit reached, aborting invariant generation")
return
if candidate.check_balance(balance_checker, enqueue_func):
yield candidate
def useful_groups(invariants, initial_facts):
predicate_to_invariants = defaultdict(list)
for invariant in invariants:
for predicate in invariant.predicates:
predicate_to_invariants[predicate].append(invariant)
nonempty_groups = set()
overcrowded_groups = set()
for atom in initial_facts:
if isinstance(atom, pddl.Assign):
continue
for invariant in predicate_to_invariants.get(atom.predicate, ()):
group_key = (invariant, tuple(invariant.get_parameters(atom)))
if group_key not in nonempty_groups:
nonempty_groups.add(group_key)
else:
overcrowded_groups.add(group_key)
useful_groups = nonempty_groups - overcrowded_groups
for (invariant, parameters) in useful_groups:
yield [part.instantiate(parameters) for part in sorted(invariant.parts)]
def get_groups(task, reachable_action_params=None):
with timers.timing("Finding invariants", block=True):
invariants = sorted(find_invariants(task, reachable_action_params))
with timers.timing("Checking invariant weight"):
result = list(useful_groups(invariants, task.init))
return result
if __name__ == "__main__":
import normalize
import pddl_parser
print("Parsing...")
task = pddl_parser.open()
print("Normalizing...")
normalize.normalize(task)
print("Finding invariants...")
print("NOTE: not passing in reachable_action_params.")
print("This means fewer invariants might be found.")
for invariant in find_invariants(task, None):
print(invariant)
print("Finding fact groups...")
groups = get_groups(task)
for group in groups:
print("[%s]" % ", ".join(map(str, group)))
| 6,100 | 38.875817 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/split_rules.py
|
# split_rules: Split rules whose conditions fall into different "connected
# components" (where to conditions are related if they share a variabe) into
# several rules, one for each connected component and one high-level rule.
from pddl_to_prolog import Rule, get_variables
import graph
import greedy_join
import pddl
def get_connected_conditions(conditions):
agraph = graph.Graph(conditions)
var_to_conditions = {var: [] for var in get_variables(conditions)}
for cond in conditions:
for var in cond.args:
if var[0] == "?":
var_to_conditions[var].append(cond)
# Connect conditions with a common variable
for var, conds in var_to_conditions.items():
for cond in conds[1:]:
agraph.connect(conds[0], cond)
return sorted(map(sorted, agraph.connected_components()))
def project_rule(rule, conditions, name_generator):
predicate = next(name_generator)
effect_variables = set(rule.effect.args) & get_variables(conditions)
effect = pddl.Atom(predicate, sorted(effect_variables))
projected_rule = Rule(conditions, effect)
return projected_rule
def split_rule(rule, name_generator):
important_conditions, trivial_conditions = [], []
for cond in rule.conditions:
for arg in cond.args:
if arg[0] == "?":
important_conditions.append(cond)
break
else:
trivial_conditions.append(cond)
# important_conditions = [cond for cond in rule.conditions if cond.args]
# trivial_conditions = [cond for cond in rule.conditions if not cond.args]
components = get_connected_conditions(important_conditions)
if len(components) == 1 and not trivial_conditions:
return split_into_binary_rules(rule, name_generator)
projected_rules = [project_rule(rule, conditions, name_generator)
for conditions in components]
result = []
for proj_rule in projected_rules:
result += split_into_binary_rules(proj_rule, name_generator)
conditions = ([proj_rule.effect for proj_rule in projected_rules] +
trivial_conditions)
combining_rule = Rule(conditions, rule.effect)
if len(conditions) >= 2:
combining_rule.type = "product"
else:
combining_rule.type = "project"
result.append(combining_rule)
return result
def split_into_binary_rules(rule, name_generator):
if len(rule.conditions) <= 1:
rule.type = "project"
return [rule]
return greedy_join.greedy_join(rule, name_generator)
| 2,569 | 36.246377 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/variable_order.py
|
from collections import defaultdict, deque
from itertools import chain
import heapq
import sccs
DEBUG = False
class CausalGraph:
"""Weighted causal graph used for defining a variable order.
The causal graph only contains pre->eff edges (in contrast to the
variant that also has eff<->eff edges).
The variable order is defined such that removing all edges v->v'
with v>v' induces an acyclic subgraph of the causal graph. This
corresponds to the pruning of the causal graph as described in the
JAIR 2006 Fast Downward paper for the causal graph heuristic. The
greedy method is based on weighting the edges of the causal graph.
In this implementation these weights slightly differ from the
description in the JAIR paper to reproduce the behaviour of the
original implementation in the preprocessor component of the
planner.
"""
def __init__(self, sas_task):
self.weighted_graph = defaultdict(lambda: defaultdict(int))
## var_no -> (var_no -> number)
self.predecessor_graph = defaultdict(set)
self.ordering = []
self.weight_graph_from_ops(sas_task.operators)
self.weight_graph_from_axioms(sas_task.axioms)
self.num_variables = len(sas_task.variables.ranges)
self.goal_map = dict(sas_task.goal.pairs)
def get_ordering(self):
if not self.ordering:
sccs = self.get_strongly_connected_components()
self.calculate_topological_pseudo_sort(sccs)
return self.ordering
def weight_graph_from_ops(self, operators):
### A source variable can be processed several times. This was
### probably not intended originally but in experiments (cf.
### issue26) it performed better than the (clearer) weighting
### described in the Fast Downward paper (which would require
### a more complicated implementation).
for op in operators:
source_vars = [var for (var, value) in op.prevail]
for var, pre, _, _ in op.pre_post:
if pre != -1:
source_vars.append(var)
for target, _, _, cond in op.pre_post:
for source in chain(source_vars, (var for var, _ in cond)):
if source != target:
self.weighted_graph[source][target] += 1
self.predecessor_graph[target].add(source)
def weight_graph_from_axioms(self, axioms):
for ax in axioms:
target = ax.effect[0]
for source, _ in ax.condition:
if source != target:
self.weighted_graph[source][target] += 1
self.predecessor_graph[target].add(source)
def get_strongly_connected_components(self):
unweighted_graph = [[] for _ in range(self.num_variables)]
assert(len(self.weighted_graph) <= self.num_variables)
for source, target_weights in self.weighted_graph.items():
unweighted_graph[source] = sorted(target_weights.keys())
return sccs.get_sccs_adjacency_list(unweighted_graph)
def calculate_topological_pseudo_sort(self, sccs):
for scc in sccs:
if len(scc) > 1:
# component needs to be turned into acyclic subgraph
# Compute subgraph induced by scc
subgraph = defaultdict(list)
for var in scc:
# for each variable in component only list edges inside
# component.
subgraph_edges = subgraph[var]
for target, cost in sorted(self.weighted_graph[var].items()):
if target in scc:
if target in self.goal_map:
subgraph_edges.append((target, 100000 + cost))
subgraph_edges.append((target, cost))
self.ordering.extend(MaxDAG(subgraph, scc).get_result())
else:
self.ordering.append(scc[0])
def calculate_important_vars(self, goal):
# Note for future refactoring: it is perhaps more idiomatic
# and efficient to use a set rather than a defaultdict(bool).
necessary = defaultdict(bool)
for var, _ in goal.pairs:
if not necessary[var]:
necessary[var] = True
self.dfs(var, necessary)
return necessary
def dfs(self, node, necessary):
stack = [pred for pred in self.predecessor_graph[node]]
while stack:
n = stack.pop()
if not necessary[n]:
necessary[n] = True
stack.extend(pred for pred in self.predecessor_graph[n])
class MaxDAG:
"""Defines a variable ordering for a SCC of the (weighted) causal
graph.
Conceptually, the greedy algorithm successively picks a node with
minimal cummulated weight of incoming arcs and removes its
incident edges from the graph until only a single node remains
(cf. computation of total order of vertices when pruning the
causal graph in the Fast Downward JAIR 2006 paper).
"""
def __init__(self, graph, input_order):
self.weighted_graph = graph
# input_order is only used to get the same tie-breaking as
# with the old preprocessor
self.input_order = input_order
def get_result(self):
incoming_weights = defaultdict(int)
for weighted_edges in self.weighted_graph.values():
for target, weight in weighted_edges:
incoming_weights[target] += weight
weight_to_nodes = defaultdict(deque)
for node in self.input_order:
weight = incoming_weights[node]
weight_to_nodes[weight].append(node)
weights = list(weight_to_nodes.keys())
heapq.heapify(weights)
done = set()
result = []
while weights:
min_key = weights[0]
min_elem = None
entries = weight_to_nodes[min_key]
while entries and (min_elem is None or min_elem in done or
min_key > incoming_weights[min_elem]):
min_elem = entries.popleft()
if not entries:
del weight_to_nodes[min_key]
heapq.heappop(weights) # remove min_key from heap
if min_elem is None or min_elem in done:
# since we use lazy deletion from the heap weights,
# there can be weights with a "done" entry in
# weight_to_nodes
continue
done.add(min_elem)
result.append(min_elem)
for target, weight in self.weighted_graph[min_elem]:
if target not in done:
weight = weight % 100000
if weight == 0:
continue
old_in_weight = incoming_weights[target]
new_in_weight = old_in_weight - weight
incoming_weights[target] = new_in_weight
# add new entry to weight_to_nodes
if new_in_weight not in weight_to_nodes:
heapq.heappush(weights, new_in_weight)
weight_to_nodes[new_in_weight].append(target)
return result
class VariableOrder:
"""Apply a given variable order to a SAS task."""
def __init__(self, ordering):
"""Ordering is a list of variable numbers in the desired order.
If a variable does not occur in the ordering, it is removed
from the task.
"""
self.ordering = ordering
self.new_var = {v: i for i, v in enumerate(ordering)}
def apply_to_task(self, sas_task):
self._apply_to_variables(sas_task.variables)
self._apply_to_init(sas_task.init)
self._apply_to_goal(sas_task.goal)
self._apply_to_mutexes(sas_task.mutexes)
self._apply_to_operators(sas_task.operators)
self._apply_to_axioms(sas_task.axioms)
if DEBUG:
sas_task.validate()
def _apply_to_variables(self, variables):
ranges = []
layers = []
names = []
for index, var in enumerate(self.ordering):
ranges.append(variables.ranges[var])
layers.append(variables.axiom_layers[var])
names.append(variables.value_names[var])
variables.ranges = ranges
variables.axiom_layers = layers
variables.value_names = names
def _apply_to_init(self, init):
init.values = [init.values[var] for var in self.ordering]
def _apply_to_goal(self, goal):
goal.pairs = sorted((self.new_var[var], val)
for var, val in goal.pairs
if var in self.new_var)
def _apply_to_mutexes(self, mutexes):
new_mutexes = []
for group in mutexes:
facts = [(self.new_var[var], val) for var, val in group.facts
if var in self.new_var]
if facts and len({var for var, _ in facts}) > 1:
group.facts = facts
new_mutexes.append(group)
print("%s of %s mutex groups necessary." % (len(new_mutexes),
len(mutexes)))
mutexes[:] = new_mutexes
def _apply_to_operators(self, operators):
new_ops = []
for op in operators:
pre_post = []
for eff_var, pre, post, cond in op.pre_post:
if eff_var in self.new_var:
new_cond = list((self.new_var[var], val)
for var, val in cond
if var in self.new_var)
pre_post.append(
(self.new_var[eff_var], pre, post, new_cond))
if pre_post:
op.pre_post = pre_post
op.prevail = [(self.new_var[var], val)
for var, val in op.prevail
if var in self.new_var]
new_ops.append(op)
print("%s of %s operators necessary." % (len(new_ops),
len(operators)))
operators[:] = new_ops
def _apply_to_axioms(self, axioms):
new_axioms = []
for ax in axioms:
eff_var, eff_val = ax.effect
if eff_var in self.new_var:
ax.condition = [(self.new_var[var], val)
for var, val in ax.condition
if var in self.new_var]
ax.effect = (self.new_var[eff_var], eff_val)
new_axioms.append(ax)
print("%s of %s axiom rules necessary." % (len(new_axioms),
len(axioms)))
axioms[:] = new_axioms
def find_and_apply_variable_order(sas_task, reorder_vars=True,
filter_unimportant_vars=True):
if reorder_vars or filter_unimportant_vars:
cg = CausalGraph(sas_task)
if reorder_vars:
order = cg.get_ordering()
else:
order = list(range(len(sas_task.variables.ranges)))
if filter_unimportant_vars:
necessary = cg.calculate_important_vars(sas_task.goal)
print("%s of %s variables necessary." % (len(necessary),
len(order)))
order = [var for var in order if necessary[var]]
VariableOrder(order).apply_to_task(sas_task)
| 11,601 | 39.708772 | 81 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/axiom_rules.py
|
import options
import pddl
import sccs
import timers
from collections import defaultdict
from itertools import chain
DEBUG = False
class AxiomDependencies(object):
def __init__(self, axioms):
if DEBUG:
assert all(isinstance(axiom.effect, pddl.Atom) for axiom in axioms)
self.derived_variables = {axiom.effect for axiom in axioms}
self.positive_dependencies = defaultdict(set)
self.negative_dependencies = defaultdict(set)
for axiom in axioms:
head = axiom.effect
for body_literal in axiom.condition:
body_atom = body_literal.positive()
if body_atom in self.derived_variables:
if body_literal.negated:
self.negative_dependencies[head].add(body_atom)
else:
self.positive_dependencies[head].add(body_atom)
# Remove all information for variables whose literals are not necessary.
# We do not need to remove single entries from the dicts because if the key
# (= head of an axiom) is relevant, then all its values (= body of axiom)
# must be relevant by definition.
def remove_unnecessary_variables(self, necessary_literals):
for var in self.derived_variables.copy():
if var not in necessary_literals and var.negate() not in necessary_literals:
self.derived_variables.remove(var)
self.positive_dependencies.pop(var, None)
self.negative_dependencies.pop(var, None)
class AxiomCluster(object):
def __init__(self, derived_variables):
self.variables = derived_variables
self.axioms = dict((v, []) for v in derived_variables)
# Positive children will be populated with clusters that contain an
# atom that occurs in the body of an axiom whose head is from this
# cluster. Negative children analogous for atoms that occur negated
# in the body.
self.positive_children = set()
self.negative_children = set()
self.needed_negatively = False
self.layer = 0
def handle_axioms(operators, axioms, goals, layer_strategy):
clusters = compute_clusters(axioms, goals, operators)
axiom_layers = compute_axiom_layers(clusters, layer_strategy)
# TODO: It would be cleaner if these negated rules were an implementation
# detail of the heuristics in the search component that make use of them
# rather than part of the translation process. They should be removed in
# the future. Similarly, it would be a good idea to remove the notion of
# axiom layers and derived variable default values from the output.
# (All derived variables should be binary and default to false.)
with timers.timing("Computing negative axioms"):
compute_negative_axioms(clusters)
axioms = get_axioms(clusters)
if DEBUG:
verify_layering_condition(axioms, axiom_layers)
return axioms, axiom_layers
def compute_necessary_literals(dependencies, goals, operators):
necessary_literals = set()
for g in goals:
if g.positive() in dependencies.derived_variables:
necessary_literals.add(g)
for op in operators:
derived_preconditions = (l for l in op.precondition if l.positive()
in dependencies.derived_variables)
necessary_literals.update(derived_preconditions)
for condition, effect in chain(op.add_effects, op.del_effects):
for c in condition:
if c.positive() in dependencies.derived_variables:
necessary_literals.add(c)
necessary_literals.add(c.negate())
literals_to_process = list(necessary_literals)
while literals_to_process:
l = literals_to_process.pop()
atom = l.positive()
for body_atom in dependencies.positive_dependencies[atom]:
l2 = body_atom.negate() if l.negated else body_atom
if l2 not in necessary_literals:
literals_to_process.append(l2)
necessary_literals.add(l2)
for body_atom in dependencies.negative_dependencies[atom]:
l2 = body_atom if l.negated else body_atom.negate()
if l2 not in necessary_literals:
literals_to_process.append(l2)
necessary_literals.add(l2)
return necessary_literals
# Compute strongly connected components of the dependency graph.
# In order to receive a deterministic result, we first sort the variables.
# We then build adjacency lists over the variable indices based on dependencies.
def get_strongly_connected_components(dependencies):
sorted_vars = sorted(dependencies.derived_variables)
variable_to_index = {var: index for index, var in enumerate(sorted_vars)}
adjacency_list = []
for derived_var in sorted_vars:
pos = dependencies.positive_dependencies[derived_var]
neg = dependencies.negative_dependencies[derived_var]
indices = [variable_to_index[atom] for atom in sorted(pos.union(neg))]
adjacency_list.append(indices)
index_groups = sccs.get_sccs_adjacency_list(adjacency_list)
groups = [[sorted_vars[i] for i in g] for g in index_groups]
return groups
# Expects a list of axioms *with the same head* and returns a subset consisting
# of all non-dominated axioms whose conditions have been cleaned up
# (duplicate elimination).
def compute_simplified_axioms(axioms):
"""Remove duplicate axioms, duplicates within axioms, and dominated axioms."""
if DEBUG:
assert len(set(axiom.effect for axiom in axioms)) == 1
# Remove duplicates from axiom conditions.
for axiom in axioms:
axiom.condition = sorted(set(axiom.condition))
# Remove dominated axioms.
axioms_to_skip = set()
axioms_by_literal = defaultdict(set)
for axiom in axioms:
if axiom.effect in axiom.condition:
axioms_to_skip.add(id(axiom))
else:
for literal in axiom.condition:
axioms_by_literal[literal].add(id(axiom))
for axiom in axioms:
if id(axiom) in axioms_to_skip:
continue # Required to keep one of multiple identical axioms.
if not axiom.condition: # empty condition: dominates everything
return [axiom]
literals = iter(axiom.condition)
dominated_axioms = axioms_by_literal[next(literals)].copy()
for literal in literals:
dominated_axioms &= axioms_by_literal[literal]
for dominated_axiom in dominated_axioms:
if dominated_axiom != id(axiom):
axioms_to_skip.add(dominated_axiom)
return [axiom for axiom in axioms if id(axiom) not in axioms_to_skip]
def compute_clusters(axioms, goals, operators):
dependencies = AxiomDependencies(axioms)
# Compute necessary literals and prune unnecessary vars from dependencies.
necessary_literals = compute_necessary_literals(dependencies, goals, operators)
dependencies.remove_unnecessary_variables(necessary_literals)
groups = get_strongly_connected_components(dependencies)
clusters = [AxiomCluster(group) for group in groups]
# Compute mapping from variables to their clusters and set needed_negatively.
variable_to_cluster = {}
for cluster in clusters:
for variable in cluster.variables:
variable_to_cluster[variable] = cluster
if variable.negate() in necessary_literals:
cluster.needed_negatively = True
# Assign axioms to their clusters.
for axiom in axioms:
# axiom.effect is derived but might have been pruned
if axiom.effect in dependencies.derived_variables:
variable_to_cluster[axiom.effect].axioms[axiom.effect].append(axiom)
removed = 0
with timers.timing("Simplifying axioms"):
for cluster in clusters:
for variable in cluster.variables:
old_size = len(cluster.axioms[variable])
cluster.axioms[variable] = compute_simplified_axioms(cluster.axioms[variable])
removed += old_size - len(cluster.axioms[variable])
print("Translator axioms removed by simplifying: %d" % removed)
# Create links between clusters (positive dependencies).
for from_variable, depends_on in dependencies.positive_dependencies.items():
from_cluster = variable_to_cluster[from_variable]
for to_variable in depends_on:
to_cluster = variable_to_cluster[to_variable]
if from_cluster is not to_cluster:
from_cluster.positive_children.add(to_cluster)
# Create links between clusters (negative dependencies).
for from_variable, depends_on in dependencies.negative_dependencies.items():
from_cluster = variable_to_cluster[from_variable]
for to_variable in depends_on:
to_cluster = variable_to_cluster[to_variable]
if from_cluster is to_cluster:
raise ValueError("axioms are not stratifiable")
from_cluster.negative_children.add(to_cluster)
return clusters
# Assign every cluster the smallest possible layer.
def compute_single_cluster_layer(cluster):
layer = 0
for pos_child in cluster.positive_children:
layer = max(pos_child.layer, layer)
for neg_child in cluster.negative_children:
layer = max(neg_child.layer + 1, layer)
return layer
# Clusters must be ordered topologically based on AxiomDependencies.
# Since we need to visit clusters containing variables that occur in the body
# of an atom before we visit the cluster containing the head, we need to
# traverse the clusters in reverse order.
def compute_axiom_layers(clusters, strategy):
if strategy == "max":
layer = 0
for cluster in reversed(clusters):
cluster.layer = layer
layer += 1
elif strategy == "min":
for cluster in reversed(clusters):
cluster.layer = compute_single_cluster_layer(cluster)
layers = dict()
for cluster in clusters:
for variable in cluster.variables:
layers[variable] = cluster.layer
return layers
def compute_negative_axioms(clusters):
for cluster in clusters:
if cluster.needed_negatively:
if len(cluster.variables) > 1:
# If the cluster contains multiple variables, they have a cyclic
# positive dependency. In this case, the "obvious" way of
# negating the formula defining the derived variable is
# semantically wrong. For details, see issue453.
#
# Therefore, in this case we perform a naive overapproximation
# instead, which assumes that derived variables occurring in
# such clusters can be false unconditionally. This is good
# enough for correctness of the code that uses these negated
# axioms (within heuristics of the search component), but loses
# accuracy. Negating the rules in an exact
# (non-overapproximating) way is possible but more expensive.
# Again, see issue453 for details.
for variable in cluster.variables:
name = cluster.axioms[variable][0].name
negated_axiom = pddl.PropositionalAxiom(name, [], variable.negate())
cluster.axioms[variable].append(negated_axiom)
else:
variable = next(iter(cluster.variables))
negated_axioms = negate(cluster.axioms[variable])
cluster.axioms[variable] += negated_axioms
def negate(axioms):
assert axioms
result = [pddl.PropositionalAxiom(axioms[0].name, [], axioms[0].effect.negate())]
for axiom in axioms:
condition = axiom.condition
if len(condition) == 0:
# The derived fact we want to negate is triggered with an
# empty condition, so it is always true and its negation
# is always false.
return []
elif len(condition) == 1: # Handle easy special case quickly.
new_literal = condition[0].negate()
for result_axiom in result:
result_axiom.condition.append(new_literal)
else:
new_result = []
for literal in condition:
literal = literal.negate()
for result_axiom in result:
new_axiom = result_axiom.clone()
new_axiom.condition.append(literal)
new_result.append(new_axiom)
result = new_result
result = compute_simplified_axioms(result)
return result
def get_axioms(clusters):
axioms = []
for cluster in clusters:
for v in cluster.variables:
axioms += cluster.axioms[v]
return axioms
def verify_layering_condition(axioms, axiom_layers):
# This function is only used for debugging.
variables_in_heads = set()
literals_in_heads = set()
variables_with_layers = set()
for axiom in axioms:
head = axiom.effect
variables_in_heads.add(head.positive())
literals_in_heads.add(head)
variables_with_layers = set(axiom_layers.keys())
# 1. A variable has a defined layer iff it appears in a head.
# (This is stricter than it needs to be; we could allow
# derived variables that are never generated by a rule.
# But this test follows the axiom simplification step, and
# after simplification this should not be too strict.)
# All layers are integers and at least 0.
# (Note: the "-1" layer for non-derived variables is
# set elsewhere.)
print("Verifying 1...")
assert variables_in_heads == variables_with_layers
for atom, layer in axiom_layers.items():
assert isinstance(layer, int)
assert layer >= 0
# 2. For every rule head <- ... cond ... where cond is a literal
# of a derived variable where the layer of head is equal to
# the layer of cond, cond occurs with the same polarity in heads.
#
# Note regarding issue454 and issue453: Because of the negated axioms
# mentioned in these issues, a derived variable may appear with *both*
# polarities in heads. This makes this test less strong than it would
# be otherwise. When these issues are addressed and axioms only occur
# with one polarity in heads, this test will remain correct in its
# current form, but it will be able to detect more violations of the
# layering property.
print("Verifying 2...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if (cond_positive in variables_in_heads and
axiom_layers[cond_positive] == axiom_layers[head_positive]):
assert cond in literals_in_heads
# 3. For every rule head <- ... cond ... where cond is a literal
# of a derived variable, the layer of head is greater or equal
# to the layer of cond.
print("Verifying 3...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if cond_positive in variables_in_heads:
# We need the assertion to be on a single line for
# our error handler to be able to print the line.
assert (axiom_layers[cond_positive] <= axiom_layers[head_positive]), (axiom_layers[cond_positive], axiom_layers[head_positive])
| 15,777 | 41.187166 | 143 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/translate.py
|
#! /usr/bin/env python3
import os
import sys
import traceback
def python_version_supported():
return sys.version_info >= (3, 6)
if not python_version_supported():
sys.exit("Error: Translator only supports Python >= 3.6.")
from collections import defaultdict
from copy import deepcopy
from itertools import product
import axiom_rules
import fact_groups
import instantiate
import normalize
import options
import pddl
import pddl_parser
import sas_tasks
import signal
import simplify
import timers
import tools
import variable_order
# TODO: The translator may generate trivial derived variables which are always
# true, for example if there ia a derived predicate in the input that only
# depends on (non-derived) variables which are detected as always true.
# Such a situation was encountered in the PSR-STRIPS-DerivedPredicates domain.
# Such "always-true" variables should best be compiled away, but it is
# not clear what the best place to do this should be. Similar
# simplifications might be possible elsewhere, for example if a
# derived variable is synonymous with another variable (derived or
# non-derived).
DEBUG = False
## For a full list of exit codes, please see driver/returncodes.py. Here,
## we only list codes that are used by the translator component of the planner.
TRANSLATE_OUT_OF_MEMORY = 20
TRANSLATE_OUT_OF_TIME = 21
simplified_effect_condition_counter = 0
added_implied_precondition_counter = 0
def strips_to_sas_dictionary(groups, assert_partial):
dictionary = {}
for var_no, group in enumerate(groups):
for val_no, atom in enumerate(group):
dictionary.setdefault(atom, []).append((var_no, val_no))
if assert_partial:
assert all(len(sas_pairs) == 1
for sas_pairs in dictionary.values())
return [len(group) + 1 for group in groups], dictionary
def translate_strips_conditions_aux(conditions, dictionary, ranges):
condition = {}
for fact in conditions:
if fact.negated:
# we handle negative conditions later, because then we
# can recognize when the negative condition is already
# ensured by a positive condition
continue
for var, val in dictionary.get(fact, ()):
# The default () here is a bit of a hack. For goals (but
# only for goals!), we can get static facts here. They
# cannot be statically false (that would have been
# detected earlier), and hence they are statically true
# and don't need to be translated.
# TODO: This would not be necessary if we dealt with goals
# in the same way we deal with operator preconditions etc.,
# where static facts disappear during grounding. So change
# this when the goal code is refactored (also below). (**)
if (condition.get(var) is not None and
val not in condition.get(var)):
# Conflicting conditions on this variable: Operator invalid.
return None
condition[var] = {val}
def number_of_values(var_vals_pair):
var, vals = var_vals_pair
return len(vals)
for fact in conditions:
if fact.negated:
## Note: here we use a different solution than in Sec. 10.6.4
## of the thesis. Compare the last sentences of the third
## paragraph of the section.
## We could do what is written there. As a test case,
## consider Airport ADL tasks with only one airport, where
## (occupied ?x) variables are encoded in a single variable,
## and conditions like (not (occupied ?x)) do occur in
## preconditions.
## However, here we avoid introducing new derived predicates
## by treat the negative precondition as a disjunctive
## precondition and expanding it by "multiplying out" the
## possibilities. This can lead to an exponential blow-up so
## it would be nice to choose the behaviour as an option.
done = False
new_condition = {}
atom = pddl.Atom(fact.predicate, fact.args) # force positive
for var, val in dictionary.get(atom, ()):
# see comment (**) above
poss_vals = set(range(ranges[var]))
poss_vals.remove(val)
if condition.get(var) is None:
assert new_condition.get(var) is None
new_condition[var] = poss_vals
else:
# constrain existing condition on var
prev_possible_vals = condition.get(var)
done = True
prev_possible_vals.intersection_update(poss_vals)
if len(prev_possible_vals) == 0:
# Conflicting conditions on this variable:
# Operator invalid.
return None
if not done and len(new_condition) != 0:
# we did not enforce the negative condition by constraining
# an existing condition on one of the variables representing
# this atom. So we need to introduce a new condition:
# We can select any from new_condition and currently prefer the
# smallest one.
candidates = sorted(new_condition.items(), key=number_of_values)
var, vals = candidates[0]
condition[var] = vals
def multiply_out(condition): # destroys the input
sorted_conds = sorted(condition.items(), key=number_of_values)
flat_conds = [{}]
for var, vals in sorted_conds:
if len(vals) == 1:
for cond in flat_conds:
cond[var] = vals.pop() # destroys the input here
else:
new_conds = []
for cond in flat_conds:
for val in vals:
new_cond = deepcopy(cond)
new_cond[var] = val
new_conds.append(new_cond)
flat_conds = new_conds
return flat_conds
return multiply_out(condition)
def translate_strips_conditions(conditions, dictionary, ranges,
mutex_dict, mutex_ranges):
if not conditions:
return [{}] # Quick exit for common case.
# Check if the condition violates any mutexes.
if translate_strips_conditions_aux(conditions, mutex_dict,
mutex_ranges) is None:
return None
return translate_strips_conditions_aux(conditions, dictionary, ranges)
def translate_strips_operator(operator, dictionary, ranges, mutex_dict,
mutex_ranges, implied_facts):
conditions = translate_strips_conditions(operator.precondition, dictionary,
ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
sas_operators = []
for condition in conditions:
op = translate_strips_operator_aux(operator, dictionary, ranges,
mutex_dict, mutex_ranges,
implied_facts, condition)
if op is not None:
sas_operators.append(op)
return sas_operators
def negate_and_translate_condition(condition, dictionary, ranges, mutex_dict,
mutex_ranges):
# condition is a list of lists of literals (DNF)
# the result is the negation of the condition in DNF in
# finite-domain representation (a list of dictionaries that map
# variables to values)
negation = []
if [] in condition: # condition always satisfied
return None # negation unsatisfiable
for combination in product(*condition):
cond = [l.negate() for l in combination]
cond = translate_strips_conditions(cond, dictionary, ranges,
mutex_dict, mutex_ranges)
if cond is not None:
negation.extend(cond)
return negation if negation else None
def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict,
mutex_ranges, implied_facts, condition):
# collect all add effects
effects_by_variable = defaultdict(lambda: defaultdict(list))
# effects_by_variables: var -> val -> list(FDR conditions)
add_conds_by_variable = defaultdict(list)
for conditions, fact in operator.add_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary,
ranges, mutex_dict,
mutex_ranges)
if eff_condition_list is None: # Impossible condition for this effect.
continue
for var, val in dictionary[fact]:
effects_by_variable[var][val].extend(eff_condition_list)
add_conds_by_variable[var].append(conditions)
# collect all del effects
del_effects_by_variable = defaultdict(lambda: defaultdict(list))
for conditions, fact in operator.del_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary,
ranges, mutex_dict,
mutex_ranges)
if eff_condition_list is None: # Impossible condition for this effect.
continue
for var, val in dictionary[fact]:
del_effects_by_variable[var][val].extend(eff_condition_list)
# add effect var=none_of_those for all del effects with the additional
# condition that the deleted value has been true and no add effect triggers
for var in del_effects_by_variable:
no_add_effect_condition = negate_and_translate_condition(
add_conds_by_variable[var], dictionary, ranges, mutex_dict,
mutex_ranges)
if no_add_effect_condition is None: # there is always an add effect
continue
none_of_those = ranges[var] - 1
for val, conds in del_effects_by_variable[var].items():
for cond in conds:
# add guard
if var in cond and cond[var] != val:
continue # condition inconsistent with deleted atom
cond[var] = val
# add condition that no add effect triggers
for no_add_cond in no_add_effect_condition:
new_cond = dict(cond)
# This is a rather expensive step. We try every no_add_cond
# with every condition of the delete effect and discard the
# overal combination if it is unsatisfiable. Since
# no_add_effect_condition is precomputed it can contain many
# no_add_conds in which a certain literal occurs. So if cond
# plus the literal is already unsatisfiable, we still try
# all these combinations. A possible optimization would be
# to re-compute no_add_effect_condition for every delete
# effect and to unfold the product(*condition) in
# negate_and_translate_condition to allow an early break.
for cvar, cval in no_add_cond.items():
if cvar in new_cond and new_cond[cvar] != cval:
# the del effect condition plus the deleted atom
# imply that some add effect on the variable
# triggers
break
new_cond[cvar] = cval
else:
effects_by_variable[var][none_of_those].append(new_cond)
return build_sas_operator(operator.name, condition, effects_by_variable,
operator.cost, ranges, implied_facts)
def build_sas_operator(name, condition, effects_by_variable, cost, ranges,
implied_facts):
if options.add_implied_preconditions:
implied_precondition = set()
for fact in condition.items():
implied_precondition.update(implied_facts[fact])
prevail_and_pre = dict(condition)
pre_post = []
for var, effects_on_var in effects_by_variable.items():
orig_pre = condition.get(var, -1)
added_effect = False
for post, eff_conditions in effects_on_var.items():
pre = orig_pre
# if the effect does not change the variable value, we ignore it
if pre == post:
continue
eff_condition_lists = [sorted(eff_cond.items())
for eff_cond in eff_conditions]
if ranges[var] == 2:
# Apply simplifications for binary variables.
if prune_stupid_effect_conditions(var, post,
eff_condition_lists,
effects_on_var):
global simplified_effect_condition_counter
simplified_effect_condition_counter += 1
if (options.add_implied_preconditions and pre == -1 and
(var, 1 - post) in implied_precondition):
global added_implied_precondition_counter
added_implied_precondition_counter += 1
pre = 1 - post
for eff_condition in eff_condition_lists:
# we do not need to represent a precondition as effect condition
# and we do not want to keep an effect whose condition contradicts
# a pre- or prevail condition
filtered_eff_condition = []
eff_condition_contradicts_precondition = False
for variable, value in eff_condition:
if variable in prevail_and_pre:
if prevail_and_pre[variable] != value:
eff_condition_contradicts_precondition = True
break
else:
filtered_eff_condition.append((variable, value))
if eff_condition_contradicts_precondition:
continue
pre_post.append((var, pre, post, filtered_eff_condition))
added_effect = True
if added_effect:
# the condition on var is not a prevail condition but a
# precondition, so we remove it from the prevail condition
condition.pop(var, -1)
if not pre_post: # operator is noop
return None
prevail = list(condition.items())
return sas_tasks.SASOperator(name, prevail, pre_post, cost)
def prune_stupid_effect_conditions(var, val, conditions, effects_on_var):
## (IF <conditions> THEN <var> := <val>) is a conditional effect.
## <var> is guaranteed to be a binary variable.
## <conditions> is in DNF representation (list of lists).
##
## We simplify <conditions> by applying two rules:
## 1. Conditions of the form "var = dualval" where var is the
## effect variable and dualval != val can be omitted.
## (If var != dualval, then var == val because it is binary,
## which means that in such situations the effect is a no-op.)
## The condition can only be omitted if there is no effect
## producing dualval (see issue736).
## 2. If conditions contains any empty list, it is equivalent
## to True and we can remove all other disjuncts.
##
## returns True when anything was changed
if conditions == [[]]:
return False # Quick exit for common case.
assert val in [0, 1]
dual_val = 1 - val
dual_fact = (var, dual_val)
if dual_val in effects_on_var:
return False
simplified = False
for condition in conditions:
# Apply rule 1.
while dual_fact in condition:
# print "*** Removing dual condition"
simplified = True
condition.remove(dual_fact)
# Apply rule 2.
if not condition:
conditions[:] = [[]]
simplified = True
break
return simplified
def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges):
conditions = translate_strips_conditions(axiom.condition, dictionary,
ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
if axiom.effect.negated:
[(var, _)] = dictionary[axiom.effect.positive()]
effect = (var, ranges[var] - 1)
else:
[effect] = dictionary[axiom.effect]
axioms = []
for condition in conditions:
axioms.append(sas_tasks.SASAxiom(condition.items(), effect))
return axioms
def translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict,
mutex_ranges, implied_facts):
result = []
for action in actions:
sas_ops = translate_strips_operator(action, strips_to_sas, ranges,
mutex_dict, mutex_ranges,
implied_facts)
result.extend(sas_ops)
return result
def translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
mutex_ranges):
result = []
for axiom in axioms:
sas_axioms = translate_strips_axiom(axiom, strips_to_sas, ranges,
mutex_dict, mutex_ranges)
result.extend(sas_axioms)
return result
def dump_task(init, goals, actions, axioms, axiom_layer_dict):
old_stdout = sys.stdout
with open("output.dump", "w") as dump_file:
sys.stdout = dump_file
print("Initial state")
for atom in init:
print(atom)
print()
print("Goals")
for goal in goals:
print(goal)
for action in actions:
print()
print("Action")
action.dump()
for axiom in axioms:
print()
print("Axiom")
axiom.dump()
print()
print("Axiom layers")
for atom, layer in axiom_layer_dict.items():
print("%s: layer %d" % (atom, layer))
sys.stdout = old_stdout
def translate_task(strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
init, goals,
actions, axioms, metric, implied_facts):
with timers.timing("Processing axioms", block=True):
axioms, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goals,
options.layer_strategy)
if options.dump_task:
# Remove init facts that don't occur in strips_to_sas: they're constant.
nonconstant_init = filter(strips_to_sas.get, init)
dump_task(nonconstant_init, goals, actions, axioms, axiom_layer_dict)
init_values = [rang - 1 for rang in ranges]
# Closed World Assumption: Initialize to "range - 1" == Nothing.
for fact in init:
pairs = strips_to_sas.get(fact, []) # empty for static init facts
for var, val in pairs:
curr_val = init_values[var]
if curr_val != ranges[var] - 1 and curr_val != val:
assert False, "Inconsistent init facts! [fact = %s]" % fact
init_values[var] = val
init = sas_tasks.SASInit(init_values)
goal_dict_list = translate_strips_conditions(goals, strips_to_sas, ranges,
mutex_dict, mutex_ranges)
if goal_dict_list is None:
# "None" is a signal that the goal is unreachable because it
# violates a mutex.
return unsolvable_sas_task("Goal violates a mutex")
assert len(goal_dict_list) == 1, "Negative goal not supported"
## we could substitute the negative goal literal in
## normalize.substitute_complicated_goal, using an axiom. We currently
## don't do this, because we don't run into this assertion, if the
## negative goal is part of finite domain variable with only two
## values, which is most of the time the case, and hence refrain from
## introducing axioms (that are not supported by all heuristics)
goal_pairs = list(goal_dict_list[0].items())
if not goal_pairs:
return solvable_sas_task("Empty goal")
goal = sas_tasks.SASGoal(goal_pairs)
operators = translate_strips_operators(actions, strips_to_sas, ranges,
mutex_dict, mutex_ranges,
implied_facts)
axioms = translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
mutex_ranges)
axiom_layers = [-1] * len(ranges)
for atom, layer in axiom_layer_dict.items():
assert layer >= 0
[(var, val)] = strips_to_sas[atom]
axiom_layers[var] = layer
variables = sas_tasks.SASVariables(ranges, axiom_layers, translation_key)
mutexes = [sas_tasks.SASMutexGroup(group) for group in mutex_key]
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def trivial_task(solvable):
variables = sas_tasks.SASVariables(
[2], [-1], [["Atom dummy(val1)", "Atom dummy(val2)"]])
# We create no mutexes: the only possible mutex is between
# dummy(val1) and dummy(val2), but the preprocessor would filter
# it out anyway since it is trivial (only involves one
# finite-domain variable).
mutexes = []
init = sas_tasks.SASInit([0])
if solvable:
goal_fact = (0, 0)
else:
goal_fact = (0, 1)
goal = sas_tasks.SASGoal([goal_fact])
operators = []
axioms = []
metric = True
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def solvable_sas_task(msg):
print("%s! Generating solvable task..." % msg)
return trivial_task(solvable=True)
def unsolvable_sas_task(msg):
print("%s! Generating unsolvable task..." % msg)
return trivial_task(solvable=False)
def pddl_to_sas(task):
with timers.timing("Instantiating", block=True):
(relaxed_reachable, atoms, actions, axioms,
reachable_action_params) = instantiate.explore(task)
if not relaxed_reachable:
return unsolvable_sas_task("No relaxed solution")
# HACK! Goals should be treated differently.
if isinstance(task.goal, pddl.Conjunction):
goal_list = task.goal.parts
else:
goal_list = [task.goal]
for item in goal_list:
assert isinstance(item, pddl.Literal)
with timers.timing("Computing fact groups", block=True):
groups, mutex_groups, translation_key = fact_groups.compute_groups(
task, atoms, reachable_action_params)
with timers.timing("Building STRIPS to SAS dictionary"):
ranges, strips_to_sas = strips_to_sas_dictionary(
groups, assert_partial=options.use_partial_encoding)
with timers.timing("Building dictionary for full mutex groups"):
mutex_ranges, mutex_dict = strips_to_sas_dictionary(
mutex_groups, assert_partial=False)
if options.add_implied_preconditions:
with timers.timing("Building implied facts dictionary..."):
implied_facts = build_implied_facts(strips_to_sas, groups,
mutex_groups)
else:
implied_facts = {}
with timers.timing("Building mutex information", block=True):
if options.use_partial_encoding:
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
else:
# With our current representation, emitting complete mutex
# information for the full encoding can incur an
# unacceptable (quadratic) blowup in the task representation
# size. See issue771 for details.
print("using full encoding: between-variable mutex information skipped.")
mutex_key = []
with timers.timing("Translating task", block=True):
sas_task = translate_task(
strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
task.init, goal_list, actions, axioms, task.use_min_cost_metric,
implied_facts)
print("%d effect conditions simplified" %
simplified_effect_condition_counter)
print("%d implied preconditions added" %
added_implied_precondition_counter)
if options.filter_unreachable_facts:
with timers.timing("Detecting unreachable propositions", block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task("Simplified to trivially false goal")
except simplify.TriviallySolvable:
return solvable_sas_task("Simplified to empty goal")
if options.reorder_variables or options.filter_unimportant_vars:
with timers.timing("Reordering and filtering variables", block=True):
variable_order.find_and_apply_variable_order(
sas_task, options.reorder_variables,
options.filter_unimportant_vars)
return sas_task
def build_mutex_key(strips_to_sas, groups):
assert options.use_partial_encoding
group_keys = []
for group in groups:
group_key = []
for fact in group:
represented_by = strips_to_sas.get(fact)
if represented_by:
assert len(represented_by) == 1
group_key.append(represented_by[0])
else:
print("not in strips_to_sas, left out:", fact)
group_keys.append(group_key)
return group_keys
def build_implied_facts(strips_to_sas, groups, mutex_groups):
## Compute a dictionary mapping facts (FDR pairs) to lists of FDR
## pairs implied by that fact. In other words, in all states
## containing p, all pairs in implied_facts[p] must also be true.
##
## There are two simple cases where a pair p implies a pair q != p
## in our FDR encodings:
## 1. p and q encode the same fact
## 2. p encodes a STRIPS proposition X, q encodes a STRIPS literal
## "not Y", and X and Y are mutex.
##
## The first case cannot arise when we use partial encodings, and
## when we use full encodings, I don't think it would give us any
## additional information to exploit in the operator translation,
## so we only use the second case.
##
## Note that for a pair q to encode a fact "not Y", Y must form a
## fact group of size 1. We call such propositions Y "lonely".
## In the first step, we compute a dictionary mapping each lonely
## proposition to its variable number.
lonely_propositions = {}
for var_no, group in enumerate(groups):
if len(group) == 1:
lonely_prop = group[0]
assert strips_to_sas[lonely_prop] == [(var_no, 0)]
lonely_propositions[lonely_prop] = var_no
## Then we compute implied facts as follows: for each mutex group,
## check if prop is lonely (then and only then "not prop" has a
## representation as an FDR pair). In that case, all other facts
## in this mutex group imply "not prop".
implied_facts = defaultdict(list)
for mutex_group in mutex_groups:
for prop in mutex_group:
prop_var = lonely_propositions.get(prop)
if prop_var is not None:
prop_is_false = (prop_var, 1)
for other_prop in mutex_group:
if other_prop is not prop:
for other_fact in strips_to_sas[other_prop]:
implied_facts[other_fact].append(prop_is_false)
return implied_facts
def dump_statistics(sas_task):
print("Translator variables: %d" % len(sas_task.variables.ranges))
print("Translator derived variables: %d" %
len([layer for layer in sas_task.variables.axiom_layers
if layer >= 0]))
print("Translator facts: %d" % sum(sas_task.variables.ranges))
print("Translator goal facts: %d" % len(sas_task.goal.pairs))
print("Translator mutex groups: %d" % len(sas_task.mutexes))
print("Translator total mutex groups size: %d" %
sum(mutex.get_encoding_size() for mutex in sas_task.mutexes))
print("Translator operators: %d" % len(sas_task.operators))
print("Translator axioms: %d" % len(sas_task.axioms))
print("Translator task size: %d" % sas_task.get_encoding_size())
try:
peak_memory = tools.get_peak_memory_in_kb()
except Warning as warning:
print(warning)
else:
print("Translator peak memory: %d KB" % peak_memory)
def main():
timer = timers.Timer()
with timers.timing("Parsing", True):
task = pddl_parser.open(
domain_filename=options.domain, task_filename=options.task)
with timers.timing("Normalizing task"):
normalize.normalize(task)
if options.generate_relaxed_task:
# Remove delete effects.
for action in task.actions:
for index, effect in reversed(list(enumerate(action.effects))):
if effect.literal.negated:
del action.effects[index]
sas_task = pddl_to_sas(task)
dump_statistics(sas_task)
with timers.timing("Writing output"):
with open(options.sas_file, "w") as output_file:
sas_task.output(output_file)
print("Done! %s" % timer)
def handle_sigxcpu(signum, stackframe):
print()
print("Translator hit the time limit")
# sys.exit() is not safe to be called from within signal handlers, but
# os._exit() is.
os._exit(TRANSLATE_OUT_OF_TIME)
if __name__ == "__main__":
try:
signal.signal(signal.SIGXCPU, handle_sigxcpu)
except AttributeError:
print("Warning! SIGXCPU is not available on your platform. "
"This means that the planner cannot be gracefully terminated "
"when using a time limit, which, however, is probably "
"supported on your platform anyway.")
try:
# Reserve about 10 MB of emergency memory.
# https://stackoverflow.com/questions/19469608/
emergency_memory = b"x" * 10**7
main()
except MemoryError:
del emergency_memory
print()
print("Translator ran out of memory, traceback:")
print("=" * 79)
traceback.print_exc(file=sys.stdout)
print("=" * 79)
sys.exit(TRANSLATE_OUT_OF_MEMORY)
| 30,991 | 41.108696 | 85 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/graph.py
|
#! /usr/bin/env python3
class Graph:
def __init__(self, nodes):
self.nodes = nodes
self.neighbours = {u: set() for u in nodes}
def connect(self, u, v):
self.neighbours[u].add(v)
self.neighbours[v].add(u)
def connected_components(self):
remaining_nodes = set(self.nodes)
result = []
def dfs(node):
result[-1].append(node)
remaining_nodes.remove(node)
for neighbour in self.neighbours[node]:
if neighbour in remaining_nodes:
dfs(neighbour)
while remaining_nodes:
node = next(iter(remaining_nodes))
result.append([])
dfs(node)
result[-1].sort()
return sorted(result)
def transitive_closure(pairs):
# Warshall's algorithm.
result = set(pairs)
nodes = {u for (u, v) in pairs} | {v for (u, v) in pairs}
for k in nodes:
for i in nodes:
for j in nodes:
if (i, j) not in result and (i, k) in result and (k, j) in result:
result.add((i, j))
return sorted(result)
if __name__ == "__main__":
g = Graph([1, 2, 3, 4, 5, 6])
g.connect(1, 2)
g.connect(1, 3)
g.connect(4, 5)
print(g.connected_components())
| 1,296 | 27.195652 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/fact_groups.py
|
import invariant_finder
import options
import pddl
import timers
DEBUG = False
def expand_group(group, task, reachable_facts):
result = []
for fact in group:
try:
pos = list(fact.args).index("?X")
except ValueError:
result.append(fact)
else:
# NOTE: This could be optimized by only trying objects of the correct
# type, or by using a unifier which directly generates the
# applicable objects. It is not worth optimizing this at this stage,
# though.
for obj in task.objects:
newargs = list(fact.args)
newargs[pos] = obj.name
atom = pddl.Atom(fact.predicate, newargs)
if atom in reachable_facts:
result.append(atom)
return result
def instantiate_groups(groups, task, reachable_facts):
return [expand_group(group, task, reachable_facts) for group in groups]
class GroupCoverQueue:
def __init__(self, groups):
if groups:
self.max_size = max([len(group) for group in groups])
self.groups_by_size = [[] for i in range(self.max_size + 1)]
self.groups_by_fact = {}
for group in groups:
group = set(group) # Copy group, as it will be modified.
self.groups_by_size[len(group)].append(group)
for fact in group:
self.groups_by_fact.setdefault(fact, []).append(group)
self._update_top()
else:
self.max_size = 0
def __bool__(self):
return self.max_size > 1
__nonzero__ = __bool__
def pop(self):
result = list(self.top) # Copy; this group will shrink further.
if options.use_partial_encoding:
for fact in result:
for group in self.groups_by_fact[fact]:
group.remove(fact)
self._update_top()
return result
def _update_top(self):
while self.max_size > 1:
max_list = self.groups_by_size[self.max_size]
while max_list:
candidate = max_list.pop()
if len(candidate) == self.max_size:
self.top = candidate
return
self.groups_by_size[len(candidate)].append(candidate)
self.max_size -= 1
def choose_groups(groups, reachable_facts):
queue = GroupCoverQueue(groups)
uncovered_facts = reachable_facts.copy()
result = []
while queue:
group = queue.pop()
uncovered_facts.difference_update(group)
result.append(group)
print(len(uncovered_facts), "uncovered facts")
result += [[fact] for fact in uncovered_facts]
return result
def build_translation_key(groups):
group_keys = []
for group in groups:
group_key = [str(fact) for fact in group]
if len(group) == 1:
group_key.append(str(group[0].negate()))
else:
group_key.append("<none of those>")
group_keys.append(group_key)
return group_keys
def collect_all_mutex_groups(groups, atoms):
# NOTE: This should be functionally identical to choose_groups
# when partial_encoding is set to False. Maybe a future
# refactoring could take that into account.
all_groups = []
uncovered_facts = atoms.copy()
for group in groups:
uncovered_facts.difference_update(group)
all_groups.append(group)
all_groups += [[fact] for fact in uncovered_facts]
return all_groups
def sort_groups(groups):
return sorted(sorted(group) for group in groups)
def compute_groups(task, atoms, reachable_action_params):
groups = invariant_finder.get_groups(task, reachable_action_params)
with timers.timing("Instantiating groups"):
groups = instantiate_groups(groups, task, atoms)
# Sort here already to get deterministic mutex groups.
groups = sort_groups(groups)
# TODO: I think that collect_all_mutex_groups should do the same thing
# as choose_groups with partial_encoding=False, so these two should
# be unified.
with timers.timing("Collecting mutex groups"):
mutex_groups = collect_all_mutex_groups(groups, atoms)
with timers.timing("Choosing groups", block=True):
groups = choose_groups(groups, atoms)
groups = sort_groups(groups)
with timers.timing("Building translation key"):
translation_key = build_translation_key(groups)
if DEBUG:
for group in groups:
if len(group) >= 2:
print("{%s}" % ", ".join(map(str, group)))
return groups, mutex_groups, translation_key
| 4,698 | 34.598485 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/timers.py
|
import contextlib
import os
import sys
import time
class Timer:
def __init__(self):
self.start_time = time.time()
self.start_clock = self._clock()
def _clock(self):
times = os.times()
return times[0] + times[1]
def __str__(self):
return "[%.3fs CPU, %.3fs wall-clock]" % (
self._clock() - self.start_clock,
time.time() - self.start_time)
@contextlib.contextmanager
def timing(text, block=False):
timer = Timer()
if block:
print("%s..." % text)
else:
print("%s..." % text, end=' ')
sys.stdout.flush()
yield
if block:
print("%s: %s" % (text, timer))
else:
print(timer)
sys.stdout.flush()
| 732 | 19.361111 | 50 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/tools.py
|
def cartesian_product(sequences):
# TODO: Rename this. It's not good that we have two functions
# called "product" and "cartesian_product", of which "product"
# computes cartesian products, while "cartesian_product" does not.
# This isn't actually a proper cartesian product because we
# concatenate lists, rather than forming sequences of atomic elements.
# We could probably also use something like
# map(itertools.chain, product(*sequences))
# but that does not produce the same results
if not sequences:
yield []
else:
temp = list(cartesian_product(sequences[1:]))
for item in sequences[0]:
for sequence in temp:
yield item + sequence
def get_peak_memory_in_kb():
try:
# This will only work on Linux systems.
with open("/proc/self/status") as status_file:
for line in status_file:
parts = line.split()
if parts[0] == "VmPeak:":
return int(parts[1])
except OSError:
pass
raise Warning("warning: could not determine peak memory")
| 1,127 | 35.387097 | 74 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/options.py
|
import argparse
import sys
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"domain", help="path to domain pddl file")
argparser.add_argument(
"task", help="path to task pddl file")
argparser.add_argument(
"--relaxed", dest="generate_relaxed_task", action="store_true",
help="output relaxed task (no delete effects)")
argparser.add_argument(
"--full-encoding",
dest="use_partial_encoding", action="store_false",
help="By default we represent facts that occur in multiple "
"mutex groups only in one variable. Using this parameter adds "
"these facts to multiple variables. This can make the meaning "
"of the variables clearer, but increases the number of facts.")
argparser.add_argument(
"--invariant-generation-max-candidates", default=100000, type=int,
help="max number of candidates for invariant generation "
"(default: %(default)d). Set to 0 to disable invariant "
"generation and obtain only binary variables. The limit is "
"needed for grounded input files that would otherwise produce "
"too many candidates.")
argparser.add_argument(
"--sas-file", default="output.sas",
help="path to the SAS output file (default: %(default)s)")
argparser.add_argument(
"--invariant-generation-max-time", default=300, type=int,
help="max time for invariant generation (default: %(default)ds)")
argparser.add_argument(
"--add-implied-preconditions", action="store_true",
help="infer additional preconditions. This setting can cause a "
"severe performance penalty due to weaker relevance analysis "
"(see issue7).")
argparser.add_argument(
"--keep-unreachable-facts",
dest="filter_unreachable_facts", action="store_false",
help="keep facts that can't be reached from the initial state")
argparser.add_argument(
"--skip-variable-reordering",
dest="reorder_variables", action="store_false",
help="do not reorder variables based on the causal graph. Do not use "
"this option with the causal graph heuristic!")
argparser.add_argument(
"--keep-unimportant-variables",
dest="filter_unimportant_vars", action="store_false",
help="keep variables that do not influence the goal in the causal graph")
argparser.add_argument(
"--dump-task", action="store_true",
help="dump human-readable SAS+ representation of the task")
argparser.add_argument(
"--layer-strategy", default="min", choices=["min", "max"],
help="How to assign layers to derived variables. 'min' attempts to put as "
"many variables into the same layer as possible, while 'max' puts each variable "
"into its own layer unless it is part of a cycle.")
return argparser.parse_args()
def copy_args_to_module(args):
module_dict = sys.modules[__name__].__dict__
for key, value in vars(args).items():
module_dict[key] = value
def setup():
args = parse_args()
copy_args_to_module(args)
setup()
| 3,176 | 41.36 | 89 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/simplify.py
|
"""This module contains a function for simplifying tasks in
finite-domain representation (SASTask). Usage:
simplify.filter_unreachable_propositions(sas_task)
simplifies `sas_task` in-place. If simplification detects that the
task is unsolvable, the function raises `simplify.Impossible`. If it
detects that is has an empty goal, the function raises
`simplify.TriviallySolvable`.
The simplification procedure generates DTGs for the task and then
removes facts that are unreachable from the initial state in a DTG.
Note that such unreachable facts can exist even though we perform a
relaxed reachability analysis before grounding (and DTG reachability
is weaker than relaxed reachability) because the previous relaxed
reachability does not take into account any mutex information, while
PDDL-to-SAS conversion gets rid of certain operators that cannot be
applicable given the mutex information.
Despite the name, the method touches more than the set of facts. For
example, operators that have preconditions on pruned facts are
removed, too. (See also the docstring of
filter_unreachable_propositions.)
"""
from collections import defaultdict
from itertools import count
import sas_tasks
DEBUG = False
# TODO:
# This is all quite hackish and would be easier if the translator were
# restructured so that more information is immediately available for
# the propositions, and if propositions had more structure. Directly
# working with int pairs is awkward.
class DomainTransitionGraph:
"""Domain transition graphs.
Attributes:
- init (int): the initial state value of the DTG variable
- size (int): the number of values in the domain
- arcs (defaultdict: int -> set(int)): the DTG arcs (unlabeled)
There are no transition labels or goal values.
The intention is that nodes are represented as ints in {1, ...,
domain_size}, but this is not enforced.
For derived variables, the "fallback value" that is produced by
negation by failure should be used for `init`, so that it is
always considered reachable.
"""
def __init__(self, init, size):
"""Create a DTG with no arcs."""
self.init = init
self.size = size
self.arcs = defaultdict(set)
def add_arc(self, u, v):
"""Add an arc from u to v."""
self.arcs[u].add(v)
def reachable(self):
"""Return the values reachable from the initial value.
Represented as a set(int)."""
queue = [self.init]
reachable = set(queue)
while queue:
node = queue.pop()
new_neighbors = self.arcs.get(node, set()) - reachable
reachable |= new_neighbors
queue.extend(new_neighbors)
return reachable
def dump(self):
"""Dump the DTG."""
print("DTG size:", self.size)
print("DTG init value:", self.init)
print("DTG arcs:")
for source, destinations in sorted(self.arcs.items()):
for destination in sorted(destinations):
print(" %d => %d" % (source, destination))
def build_dtgs(task):
"""Build DTGs for all variables of the SASTask `task`.
Return a list(DomainTransitionGraph), one for each variable.
For derived variables, we do not consider the axiom bodies, i.e.,
we treat each axiom as if it were an operator with no
preconditions. In the case where the only derived variables used
are binary and all rules change the value from the default value
to the non-default value, this results in the correct DTG.
Otherwise, at worst it results in an overapproximation, which
would not threaten correctness."""
init_vals = task.init.values
sizes = task.variables.ranges
dtgs = [DomainTransitionGraph(init, size)
for (init, size) in zip(init_vals, sizes)]
def add_arc(var_no, pre_spec, post):
"""Add a DTG arc for var_no induced by transition pre_spec -> post.
pre_spec may be -1, in which case arcs from every value
other than post are added."""
if pre_spec == -1:
pre_values = set(range(sizes[var_no])).difference([post])
else:
pre_values = [pre_spec]
for pre in pre_values:
dtgs[var_no].add_arc(pre, post)
def get_effective_pre(var_no, conditions, effect_conditions):
"""Return combined information on the conditions on `var_no`
from operator conditions and effect conditions.
- conditions: dict(int -> int) containing the combined
operator prevail and preconditions
- effect_conditions: list(pair(int, int)) containing the
effect conditions
Result:
- -1 if there is no condition on var_no
- val if there is a unique condition var_no=val
- None if there are contradictory conditions on var_no"""
result = conditions.get(var_no, -1)
for cond_var_no, cond_val in effect_conditions:
if cond_var_no == var_no:
if result == -1:
# This is the first condition on var_no.
result = cond_val
elif cond_val != result:
# We have contradictory conditions on var_no.
return None
return result
for op in task.operators:
conditions = dict(op.get_applicability_conditions())
for var_no, _, post, cond in op.pre_post:
effective_pre = get_effective_pre(var_no, conditions, cond)
if effective_pre is not None:
add_arc(var_no, effective_pre, post)
for axiom in task.axioms:
var_no, val = axiom.effect
add_arc(var_no, -1, val)
return dtgs
always_false = object()
always_true = object()
class Impossible(Exception):
pass
class TriviallySolvable(Exception):
pass
class DoesNothing(Exception):
pass
class VarValueRenaming:
def __init__(self):
self.new_var_nos = [] # indexed by old var_no
self.new_values = [] # indexed by old var_no and old value
self.new_sizes = [] # indexed by new var_no
self.new_var_count = 0
self.num_removed_values = 0
def dump(self):
old_var_count = len(self.new_var_nos)
print("variable count: %d => %d" % (
old_var_count, self.new_var_count))
print("number of removed values: %d" % self.num_removed_values)
print("variable conversions:")
for old_var_no, (new_var_no, new_values) in enumerate(
zip(self.new_var_nos, self.new_values)):
old_size = len(new_values)
if new_var_no is None:
print("variable %d [size %d] => removed" % (
old_var_no, old_size))
else:
new_size = self.new_sizes[new_var_no]
print("variable %d [size %d] => %d [size %d]" % (
old_var_no, old_size, new_var_no, new_size))
for old_value, new_value in enumerate(new_values):
if new_value is always_false:
new_value = "always false"
elif new_value is always_true:
new_value = "always true"
print(" value %d => %s" % (old_value, new_value))
def register_variable(self, old_domain_size, init_value, new_domain):
assert 1 <= len(new_domain) <= old_domain_size
assert init_value in new_domain
if len(new_domain) == 1:
# Remove this variable completely.
new_values_for_var = [always_false] * old_domain_size
new_values_for_var[init_value] = always_true
self.new_var_nos.append(None)
self.new_values.append(new_values_for_var)
self.num_removed_values += old_domain_size
else:
new_value_counter = count()
new_values_for_var = []
for value in range(old_domain_size):
if value in new_domain:
new_values_for_var.append(next(new_value_counter))
else:
self.num_removed_values += 1
new_values_for_var.append(always_false)
new_size = next(new_value_counter)
assert new_size == len(new_domain)
self.new_var_nos.append(self.new_var_count)
self.new_values.append(new_values_for_var)
self.new_sizes.append(new_size)
self.new_var_count += 1
def apply_to_task(self, task):
if DEBUG:
self.dump()
self.apply_to_variables(task.variables)
self.apply_to_mutexes(task.mutexes)
self.apply_to_init(task.init)
self.apply_to_goals(task.goal.pairs)
self.apply_to_operators(task.operators)
self.apply_to_axioms(task.axioms)
def apply_to_variables(self, variables):
variables.ranges = self.new_sizes
new_axiom_layers = [None] * self.new_var_count
for old_no, new_no in enumerate(self.new_var_nos):
if new_no is not None:
new_axiom_layers[new_no] = variables.axiom_layers[old_no]
assert None not in new_axiom_layers
variables.axiom_layers = new_axiom_layers
self.apply_to_value_names(variables.value_names)
def apply_to_value_names(self, value_names):
new_value_names = [[None] * size for size in self.new_sizes]
for var_no, values in enumerate(value_names):
for value, value_name in enumerate(values):
new_var_no, new_value = self.translate_pair((var_no, value))
if new_value is always_true:
if DEBUG:
print("Removed true proposition: %s" % value_name)
elif new_value is always_false:
if DEBUG:
print("Removed false proposition: %s" % value_name)
else:
new_value_names[new_var_no][new_value] = value_name
assert all((None not in value_names) for value_names in new_value_names)
value_names[:] = new_value_names
def apply_to_mutexes(self, mutexes):
new_mutexes = []
for mutex in mutexes:
new_facts = []
for var, val in mutex.facts:
new_var_no, new_value = self.translate_pair((var, val))
if (new_value is not always_true and
new_value is not always_false):
new_facts.append((new_var_no, new_value))
if len(new_facts) >= 2:
mutex.facts = new_facts
new_mutexes.append(mutex)
mutexes[:] = new_mutexes
def apply_to_init(self, init):
init_pairs = list(enumerate(init.values))
try:
self.convert_pairs(init_pairs)
except Impossible:
assert False, "Initial state impossible? Inconceivable!"
new_values = [None] * self.new_var_count
for new_var_no, new_value in init_pairs:
new_values[new_var_no] = new_value
assert None not in new_values
init.values = new_values
def apply_to_goals(self, goals):
# This may propagate Impossible up.
self.convert_pairs(goals)
if not goals:
# We raise an exception because we do not consider a SAS+
# task without goals well-formed. Our callers are supposed
# to catch this and replace the task with a well-formed
# trivially solvable task.
raise TriviallySolvable
def apply_to_operators(self, operators):
new_operators = []
num_removed = 0
for op in operators:
new_op = self.translate_operator(op)
if new_op is None:
num_removed += 1
if DEBUG:
print("Removed operator: %s" % op.name)
else:
new_operators.append(new_op)
print("%d operators removed" % num_removed)
operators[:] = new_operators
def apply_to_axioms(self, axioms):
new_axioms = []
num_removed = 0
for axiom in axioms:
try:
self.apply_to_axiom(axiom)
except (Impossible, DoesNothing):
num_removed += 1
if DEBUG:
print("Removed axiom:")
axiom.dump()
else:
new_axioms.append(axiom)
print("%d axioms removed" % num_removed)
axioms[:] = new_axioms
def translate_operator(self, op):
"""Compute a new operator from op where the var/value renaming has
been applied. Return None if op should be pruned (because it
is always inapplicable or has no effect.)"""
# We do not call this apply_to_operator, breaking the analogy
# with the other methods, because it creates a new operator
# rather than transforming in-place. The reason for this is
# that it would be quite difficult to generate the operator
# in-place.
# This method is trickier than it may at first appear. For
# example, pre_post values should be fully sorted (see
# documentation in the sas_tasks module), and pruning effect
# conditions from a conditional effects can break this sort
# order. Recreating the operator from scratch solves this
# because the pre_post entries are sorted by
# SASOperator.__init__.
# Also, when we detect a pre_post pair where the effect part
# can never trigger, the precondition part is still important,
# but may be demoted to a prevail condition. Whether or not
# this happens depends on the presence of other pre_post
# entries for the same variable. We solve this by computing
# the sorting into prevail vs. preconditions from scratch, too.
applicability_conditions = op.get_applicability_conditions()
try:
self.convert_pairs(applicability_conditions)
except Impossible:
# The operator is never applicable.
return None
conditions_dict = dict(applicability_conditions)
new_prevail_vars = set(conditions_dict)
new_pre_post = []
for entry in op.pre_post:
new_entry = self.translate_pre_post(entry, conditions_dict)
if new_entry is not None:
new_pre_post.append(new_entry)
# Mark the variable in the entry as not prevailed.
new_var = new_entry[0]
new_prevail_vars.discard(new_var)
if not new_pre_post:
# The operator has no effect.
return None
new_prevail = sorted(
(var, value)
for (var, value) in conditions_dict.items()
if var in new_prevail_vars)
return sas_tasks.SASOperator(
name=op.name, prevail=new_prevail, pre_post=new_pre_post,
cost=op.cost)
def apply_to_axiom(self, axiom):
# The following line may generate an Impossible exception,
# which is propagated up.
self.convert_pairs(axiom.condition)
new_var, new_value = self.translate_pair(axiom.effect)
# If the new_value is always false, then the condition must
# have been impossible.
assert new_value is not always_false
if new_value is always_true:
raise DoesNothing
axiom.effect = new_var, new_value
def translate_pre_post(self, pre_post_entry, conditions_dict):
"""Return a translated version of a pre_post entry.
If the entry never causes a value change, return None.
(It might seem that a possible precondition part of pre_post
gets lost in this case, but pre_post entries that become
prevail conditions are handled elsewhere.)
conditions_dict contains all applicability conditions
(prevail/pre) of the operator, already converted. This is
used to detect effect conditions that can never fire.
The method may assume that the operator remains reachable,
i.e., that it does not have impossible preconditions, as these
are already checked elsewhere.
Possible cases:
- effect is always_true => return None
- effect equals prevailed value => return None
- effect condition is impossible given operator applicability
condition => return None
- otherwise => return converted pre_post tuple
"""
var_no, pre, post, cond = pre_post_entry
new_var_no, new_post = self.translate_pair((var_no, post))
if new_post is always_true:
return None
if pre == -1:
new_pre = -1
else:
_, new_pre = self.translate_pair((var_no, pre))
assert new_pre is not always_false, (
"This function should only be called for operators "
"whose applicability conditions are deemed possible.")
if new_post == new_pre:
return None
new_cond = list(cond)
try:
self.convert_pairs(new_cond)
except Impossible:
# The effect conditions can never be satisfied.
return None
for cond_var, cond_value in new_cond:
if (cond_var in conditions_dict and
conditions_dict[cond_var] != cond_value):
# This effect condition is not compatible with
# the applicability conditions.
return None
assert new_post is not always_false, (
"if we survived so far, this effect can trigger "
"(as far as our analysis can determine this), "
"and then new_post cannot be always_false")
assert new_pre is not always_true, (
"if this pre_post changes the value and can fire, "
"new_pre cannot be always_true")
return new_var_no, new_pre, new_post, new_cond
def translate_pair(self, fact_pair):
(var_no, value) = fact_pair
new_var_no = self.new_var_nos[var_no]
new_value = self.new_values[var_no][value]
return new_var_no, new_value
def convert_pairs(self, pairs):
# We call this convert_... because it is an in-place method.
new_pairs = []
for pair in pairs:
new_var_no, new_value = self.translate_pair(pair)
if new_value is always_false:
raise Impossible
elif new_value is not always_true:
assert new_var_no is not None
new_pairs.append((new_var_no, new_value))
pairs[:] = new_pairs
def build_renaming(dtgs):
renaming = VarValueRenaming()
for dtg in dtgs:
renaming.register_variable(dtg.size, dtg.init, dtg.reachable())
return renaming
def filter_unreachable_propositions(sas_task):
"""We remove unreachable propositions and then prune variables
with only one value.
Examples of things that are pruned:
- Constant propositions that are not detected in instantiate.py
because instantiate.py only reasons at the predicate level, and some
predicates such as "at" in Depot are constant for some objects
(hoists), but not others (trucks).
Example: "at(hoist1, distributor0)" and the associated variable
in depots-01.
- "none of those" values that are unreachable.
Example: at(truck1, ?x) = <none of those> in depots-01.
- Certain values that are relaxed reachable but detected as
unreachable after SAS instantiation because the only operators
that set them have inconsistent preconditions.
Example: on(crate0, crate0) in depots-01.
"""
if DEBUG:
sas_task.validate()
dtgs = build_dtgs(sas_task)
renaming = build_renaming(dtgs)
# apply_to_task may raise Impossible if the goal is detected as
# unreachable or TriviallySolvable if it has no goal. We let the
# exceptions propagate to the caller.
renaming.apply_to_task(sas_task)
print("%d propositions removed" % renaming.num_removed_values)
if DEBUG:
sas_task.validate()
| 20,146 | 37.669866 | 80 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/constraints.py
|
import itertools
class NegativeClause:
# disjunction of inequalities
def __init__(self, parts):
self.parts = parts
assert len(parts)
def __str__(self):
disj = " or ".join(["(%s != %s)" % (v1, v2)
for (v1, v2) in self.parts])
return "(%s)" % disj
def is_satisfiable(self):
for part in self.parts:
if part[0] != part[1]:
return True
return False
def apply_mapping(self, m):
new_parts = [(m.get(v1, v1), m.get(v2, v2)) for (v1, v2) in self.parts]
return NegativeClause(new_parts)
class Assignment:
def __init__(self, equalities):
self.equalities = tuple(equalities)
# represents a conjunction of expressions ?x = ?y or ?x = d
# with ?x, ?y being variables and d being a domain value
self.consistent = None
self.mapping = None
self.eq_classes = None
def __str__(self):
conj = " and ".join(["(%s = %s)" % (v1, v2)
for (v1, v2) in self.equalities])
return "(%s)" % conj
def _compute_equivalence_classes(self):
eq_classes = {}
for (v1, v2) in self.equalities:
c1 = eq_classes.setdefault(v1, {v1})
c2 = eq_classes.setdefault(v2, {v2})
if c1 is not c2:
if len(c2) > len(c1):
v1, c1, v2, c2 = v2, c2, v1, c1
c1.update(c2)
for elem in c2:
eq_classes[elem] = c1
self.eq_classes = eq_classes
def _compute_mapping(self):
if not self.eq_classes:
self._compute_equivalence_classes()
# create mapping: each key is mapped to the smallest
# element in its equivalence class (with objects being
# smaller than variables)
mapping = {}
for eq_class in self.eq_classes.values():
variables = [item for item in eq_class if item.startswith("?")]
constants = [item for item in eq_class if not item.startswith("?")]
if len(constants) >= 2:
self.consistent = False
self.mapping = None
return
if constants:
set_val = constants[0]
else:
set_val = min(variables)
for entry in eq_class:
mapping[entry] = set_val
self.consistent = True
self.mapping = mapping
def is_consistent(self):
if self.consistent is None:
self._compute_mapping()
return self.consistent
def get_mapping(self):
if self.consistent is None:
self._compute_mapping()
return self.mapping
class ConstraintSystem:
def __init__(self):
self.combinatorial_assignments = []
self.neg_clauses = []
def __str__(self):
combinatorial_assignments = []
for comb_assignment in self.combinatorial_assignments:
disj = " or ".join([str(assig) for assig in comb_assignment])
disj = "(%s)" % disj
combinatorial_assignments.append(disj)
assigs = " and\n".join(combinatorial_assignments)
neg_clauses = [str(clause) for clause in self.neg_clauses]
neg_clauses = " and ".join(neg_clauses)
return assigs + "(" + neg_clauses + ")"
def _all_clauses_satisfiable(self, assignment):
mapping = assignment.get_mapping()
for neg_clause in self.neg_clauses:
clause = neg_clause.apply_mapping(mapping)
if not clause.is_satisfiable():
return False
return True
def _combine_assignments(self, assignments):
new_equalities = []
for a in assignments:
new_equalities.extend(a.equalities)
return Assignment(new_equalities)
def add_assignment(self, assignment):
self.add_assignment_disjunction([assignment])
def add_assignment_disjunction(self, assignments):
self.combinatorial_assignments.append(assignments)
def add_negative_clause(self, negative_clause):
self.neg_clauses.append(negative_clause)
def combine(self, other):
"""Combines two constraint systems to a new system"""
combined = ConstraintSystem()
combined.combinatorial_assignments = (self.combinatorial_assignments +
other.combinatorial_assignments)
combined.neg_clauses = self.neg_clauses + other.neg_clauses
return combined
def copy(self):
other = ConstraintSystem()
other.combinatorial_assignments = list(self.combinatorial_assignments)
other.neg_clauses = list(self.neg_clauses)
return other
def dump(self):
print("AssignmentSystem:")
for comb_assignment in self.combinatorial_assignments:
disj = " or ".join([str(assig) for assig in comb_assignment])
print(" ASS: ", disj)
for neg_clause in self.neg_clauses:
print(" NEG: ", str(neg_clause))
def is_solvable(self):
"""Check whether the combinatorial assignments include at least
one consistent assignment under which the negative clauses
are satisfiable"""
for assignments in itertools.product(*self.combinatorial_assignments):
combined = self._combine_assignments(assignments)
if not combined.is_consistent():
continue
if self._all_clauses_satisfiable(combined):
return True
return False
| 5,589 | 33.720497 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/invariants.py
|
from collections import defaultdict
import itertools
import constraints
import pddl
import tools
# Notes:
# All parts of an invariant always use all non-counted variables
# -> the arity of all predicates covered by an invariant is either the
# number of the invariant variables or this value + 1
#
# we currently keep the assumption that each predicate occurs at most once
# in every invariant.
def invert_list(alist):
result = defaultdict(list)
for pos, arg in enumerate(alist):
result[arg].append(pos)
return result
def instantiate_factored_mapping(pairs):
part_mappings = [[list(zip(preimg, perm_img)) for perm_img in itertools.permutations(img)]
for (preimg, img) in pairs]
return tools.cartesian_product(part_mappings)
def find_unique_variables(action, invariant):
# find unique names for invariant variables
params = {p.name for p in action.parameters}
for eff in action.effects:
params.update([p.name for p in eff.parameters])
inv_vars = []
counter = itertools.count()
for _ in range(invariant.arity()):
while True:
new_name = "?v%i" % next(counter)
if new_name not in params:
inv_vars.append(new_name)
break
return inv_vars
def get_literals(condition):
if isinstance(condition, pddl.Literal):
yield condition
elif isinstance(condition, pddl.Conjunction):
yield from condition.parts
def ensure_conjunction_sat(system, *parts):
"""Modifies the constraint system such that it is only solvable if the
conjunction of all parts is satisfiable.
Each part must be an iterator, generator, or an iterable over
literals."""
pos = defaultdict(set)
neg = defaultdict(set)
for literal in itertools.chain(*parts):
if literal.predicate == "=": # use (in)equalities in conditions
if literal.negated:
n = constraints.NegativeClause([literal.args])
system.add_negative_clause(n)
else:
a = constraints.Assignment([literal.args])
system.add_assignment_disjunction([a])
else:
if literal.negated:
neg[literal.predicate].add(literal)
else:
pos[literal.predicate].add(literal)
for pred, posatoms in pos.items():
if pred in neg:
for posatom in posatoms:
for negatom in neg[pred]:
parts = list(zip(negatom.args, posatom.args))
if parts:
negative_clause = constraints.NegativeClause(parts)
system.add_negative_clause(negative_clause)
def ensure_cover(system, literal, invariant, inv_vars):
"""Modifies the constraint system such that it is only solvable if the
invariant covers the literal"""
a = invariant.get_covering_assignments(inv_vars, literal)
assert(len(a) == 1)
# if invariants could contain several parts of one predicate, this would
# not be true but the depending code in parts relies on this assumption
system.add_assignment_disjunction(a)
def ensure_inequality(system, literal1, literal2):
"""Modifies the constraint system such that it is only solvable if the
literal instantiations are not equal (ignoring whether one is negated and
the other is not)"""
if (literal1.predicate == literal2.predicate and
literal1.args):
parts = list(zip(literal1.args, literal2.args))
system.add_negative_clause(constraints.NegativeClause(parts))
class InvariantPart:
def __init__(self, predicate, order, omitted_pos=-1):
self.predicate = predicate
self.order = order
self.omitted_pos = omitted_pos
def __eq__(self, other):
# This implies equality of the omitted_pos component.
return self.predicate == other.predicate and self.order == other.order
def __ne__(self, other):
return self.predicate != other.predicate or self.order != other.order
def __le__(self, other):
return self.predicate <= other.predicate or self.order <= other.order
def __lt__(self, other):
return self.predicate < other.predicate or self.order < other.order
def __hash__(self):
return hash((self.predicate, tuple(self.order)))
def __str__(self):
var_string = " ".join(map(str, self.order))
omitted_string = ""
if self.omitted_pos != -1:
omitted_string = " [%d]" % self.omitted_pos
return "%s %s%s" % (self.predicate, var_string, omitted_string)
def arity(self):
return len(self.order)
def get_assignment(self, parameters, literal):
equalities = [(arg, literal.args[argpos])
for arg, argpos in zip(parameters, self.order)]
return constraints.Assignment(equalities)
def get_parameters(self, literal):
return [literal.args[pos] for pos in self.order]
def instantiate(self, parameters):
args = ["?X"] * (len(self.order) + (self.omitted_pos != -1))
for arg, argpos in zip(parameters, self.order):
args[argpos] = arg
return pddl.Atom(self.predicate, args)
def possible_mappings(self, own_literal, other_literal):
allowed_omissions = len(other_literal.args) - len(self.order)
if allowed_omissions not in (0, 1):
return []
own_parameters = self.get_parameters(own_literal)
arg_to_ordered_pos = invert_list(own_parameters)
other_arg_to_pos = invert_list(other_literal.args)
factored_mapping = []
for key, other_positions in other_arg_to_pos.items():
own_positions = arg_to_ordered_pos.get(key, [])
len_diff = len(own_positions) - len(other_positions)
if len_diff >= 1 or len_diff <= -2 or len_diff == -1 and not allowed_omissions:
return []
if len_diff:
own_positions.append(-1)
allowed_omissions = 0
factored_mapping.append((other_positions, own_positions))
return instantiate_factored_mapping(factored_mapping)
def possible_matches(self, own_literal, other_literal):
assert self.predicate == own_literal.predicate
result = []
for mapping in self.possible_mappings(own_literal, other_literal):
new_order = [None] * len(self.order)
omitted = -1
for (key, value) in mapping:
if value == -1:
omitted = key
else:
new_order[value] = key
result.append(InvariantPart(other_literal.predicate, new_order, omitted))
return result
def matches(self, other, own_literal, other_literal):
return self.get_parameters(own_literal) == other.get_parameters(other_literal)
class Invariant:
# An invariant is a logical expression of the type
# forall V1...Vk: sum_(part in parts) weight(part, V1, ..., Vk) <= 1.
# k is called the arity of the invariant.
# A "part" is a symbolic fact only variable symbols in {V1, ..., Vk, X};
# the symbol X may occur at most once.
def __init__(self, parts):
self.parts = frozenset(parts)
self.predicates = {part.predicate for part in parts}
self.predicate_to_part = {part.predicate: part for part in parts}
assert len(self.parts) == len(self.predicates)
def __eq__(self, other):
return self.parts == other.parts
def __ne__(self, other):
return self.parts != other.parts
def __lt__(self, other):
return self.parts < other.parts
def __le__(self, other):
return self.parts <= other.parts
def __hash__(self):
return hash(self.parts)
def __str__(self):
return "{%s}" % ", ".join(str(part) for part in self.parts)
def __repr__(self):
return '<Invariant %s>' % self
def arity(self):
return next(iter(self.parts)).arity()
def get_parameters(self, atom):
return self.predicate_to_part[atom.predicate].get_parameters(atom)
def instantiate(self, parameters):
return [part.instantiate(parameters) for part in self.parts]
def get_covering_assignments(self, parameters, atom):
part = self.predicate_to_part[atom.predicate]
return [part.get_assignment(parameters, atom)]
# if there were more parts for the same predicate the list
# contained more than one element
def check_balance(self, balance_checker, enqueue_func):
# Check balance for this hypothesis.
actions_to_check = set()
for part in self.parts:
actions_to_check |= balance_checker.get_threats(part.predicate)
for action in actions_to_check:
heavy_action = balance_checker.get_heavy_action(action)
if self.operator_too_heavy(heavy_action):
return False
if self.operator_unbalanced(action, enqueue_func):
return False
return True
def operator_too_heavy(self, h_action):
add_effects = [eff for eff in h_action.effects
if not eff.literal.negated and
self.predicate_to_part.get(eff.literal.predicate)]
inv_vars = find_unique_variables(h_action, self)
if len(add_effects) <= 1:
return False
for eff1, eff2 in itertools.combinations(add_effects, 2):
system = constraints.ConstraintSystem()
ensure_inequality(system, eff1.literal, eff2.literal)
ensure_cover(system, eff1.literal, self, inv_vars)
ensure_cover(system, eff2.literal, self, inv_vars)
ensure_conjunction_sat(system, get_literals(h_action.precondition),
get_literals(eff1.condition),
get_literals(eff2.condition),
[eff1.literal.negate()],
[eff2.literal.negate()])
if system.is_solvable():
return True
return False
def operator_unbalanced(self, action, enqueue_func):
inv_vars = find_unique_variables(action, self)
relevant_effs = [eff for eff in action.effects
if self.predicate_to_part.get(eff.literal.predicate)]
add_effects = [eff for eff in relevant_effs
if not eff.literal.negated]
del_effects = [eff for eff in relevant_effs
if eff.literal.negated]
for eff in add_effects:
if self.add_effect_unbalanced(action, eff, del_effects, inv_vars,
enqueue_func):
return True
return False
def minimal_covering_renamings(self, action, add_effect, inv_vars):
"""computes the minimal renamings of the action parameters such
that the add effect is covered by the action.
Each renaming is an constraint system"""
# add_effect must be covered
assigs = self.get_covering_assignments(inv_vars, add_effect.literal)
# renaming of operator parameters must be minimal
minimal_renamings = []
params = [p.name for p in action.parameters]
for assignment in assigs:
system = constraints.ConstraintSystem()
system.add_assignment(assignment)
mapping = assignment.get_mapping()
if len(params) > 1:
for (n1, n2) in itertools.combinations(params, 2):
if mapping.get(n1, n1) != mapping.get(n2, n2):
negative_clause = constraints.NegativeClause([(n1, n2)])
system.add_negative_clause(negative_clause)
minimal_renamings.append(system)
return minimal_renamings
def add_effect_unbalanced(self, action, add_effect, del_effects,
inv_vars, enqueue_func):
minimal_renamings = self.minimal_covering_renamings(action, add_effect,
inv_vars)
lhs_by_pred = defaultdict(list)
for lit in itertools.chain(get_literals(action.precondition),
get_literals(add_effect.condition),
get_literals(add_effect.literal.negate())):
lhs_by_pred[lit.predicate].append(lit)
for del_effect in del_effects:
minimal_renamings = self.unbalanced_renamings(
del_effect, add_effect, inv_vars, lhs_by_pred, minimal_renamings)
if not minimal_renamings:
return False
# Otherwise, the balance check fails => Generate new candidates.
self.refine_candidate(add_effect, action, enqueue_func)
return True
def refine_candidate(self, add_effect, action, enqueue_func):
"""refines the candidate for an add effect that is unbalanced in the
action and adds the refined one to the queue"""
part = self.predicate_to_part[add_effect.literal.predicate]
for del_eff in [eff for eff in action.effects if eff.literal.negated]:
if del_eff.literal.predicate not in self.predicate_to_part:
for match in part.possible_matches(add_effect.literal,
del_eff.literal):
enqueue_func(Invariant(self.parts.union((match,))))
def unbalanced_renamings(self, del_effect, add_effect, inv_vars,
lhs_by_pred, unbalanced_renamings):
"""returns the renamings from unbalanced renamings for which
the del_effect does not balance the add_effect."""
system = constraints.ConstraintSystem()
ensure_cover(system, del_effect.literal, self, inv_vars)
# Since we may only rename the quantified variables of the delete effect
# we need to check that "renamings" of constants are already implied by
# the unbalanced_renaming (of the of the operator parameters). The
# following system is used as a helper for this. It builds a conjunction
# that formulates that the constants are NOT renamed accordingly. We
# below check that this is impossible with each unbalanced renaming.
check_constants = False
constant_test_system = constraints.ConstraintSystem()
for a, b in system.combinatorial_assignments[0][0].equalities:
# first 0 because the system was empty before we called ensure_cover
# second 0 because ensure_cover only adds assignments with one entry
if b[0] != "?":
check_constants = True
neg_clause = constraints.NegativeClause([(a, b)])
constant_test_system.add_negative_clause(neg_clause)
ensure_inequality(system, add_effect.literal, del_effect.literal)
still_unbalanced = []
for renaming in unbalanced_renamings:
if check_constants:
new_sys = constant_test_system.combine(renaming)
if new_sys.is_solvable():
# it is possible that the operator arguments are not
# mapped to constants as required for covering the delete
# effect
still_unbalanced.append(renaming)
continue
new_sys = system.combine(renaming)
if self.lhs_satisfiable(renaming, lhs_by_pred):
implies_system = self.imply_del_effect(del_effect, lhs_by_pred)
if not implies_system:
still_unbalanced.append(renaming)
continue
new_sys = new_sys.combine(implies_system)
if not new_sys.is_solvable():
still_unbalanced.append(renaming)
return still_unbalanced
def lhs_satisfiable(self, renaming, lhs_by_pred):
system = renaming.copy()
ensure_conjunction_sat(system, *itertools.chain(lhs_by_pred.values()))
return system.is_solvable()
def imply_del_effect(self, del_effect, lhs_by_pred):
"""returns a constraint system that is solvable if lhs implies
the del effect (only if lhs is satisfiable). If a solvable
lhs never implies the del effect, return None."""
# del_effect.cond and del_effect.atom must be implied by lhs
implies_system = constraints.ConstraintSystem()
for literal in itertools.chain(get_literals(del_effect.condition),
[del_effect.literal.negate()]):
poss_assignments = []
for match in lhs_by_pred[literal.predicate]:
if match.negated != literal.negated:
continue
else:
a = constraints.Assignment(list(zip(literal.args, match.args)))
poss_assignments.append(a)
if not poss_assignments:
return None
implies_system.add_assignment_disjunction(poss_assignments)
return implies_system
| 17,202 | 40.55314 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/greedy_join.py
|
import sys
import pddl
import pddl_to_prolog
class OccurrencesTracker:
"""Keeps track of the number of times each variable appears
in a list of symbolic atoms."""
def __init__(self, rule):
self.occurrences = {}
self.update(rule.effect, +1)
for cond in rule.conditions:
self.update(cond, +1)
def update(self, symatom, delta):
for var in symatom.args:
if var[0] == "?":
if var not in self.occurrences:
self.occurrences[var] = 0
self.occurrences[var] += delta
assert self.occurrences[var] >= 0
if not self.occurrences[var]:
del self.occurrences[var]
def variables(self):
return set(self.occurrences)
class CostMatrix:
def __init__(self, joinees):
self.joinees = []
self.cost_matrix = []
for joinee in joinees:
self.add_entry(joinee)
def add_entry(self, joinee):
new_row = [self.compute_join_cost(joinee, other) for other in self.joinees]
self.cost_matrix.append(new_row)
self.joinees.append(joinee)
def delete_entry(self, index):
for row in self.cost_matrix[index + 1:]:
del row[index]
del self.cost_matrix[index]
del self.joinees[index]
def find_min_pair(self):
assert len(self.joinees) >= 2
min_cost = (sys.maxsize, sys.maxsize)
for i, row in enumerate(self.cost_matrix):
for j, entry in enumerate(row):
if entry < min_cost:
min_cost = entry
left_index, right_index = i, j
return left_index, right_index
def remove_min_pair(self):
left_index, right_index = self.find_min_pair()
left, right = self.joinees[left_index], self.joinees[right_index]
assert left_index > right_index
self.delete_entry(left_index)
self.delete_entry(right_index)
return (left, right)
def compute_join_cost(self, left_joinee, right_joinee):
left_vars = pddl_to_prolog.get_variables([left_joinee])
right_vars = pddl_to_prolog.get_variables([right_joinee])
if len(left_vars) > len(right_vars):
left_vars, right_vars = right_vars, left_vars
common_vars = left_vars & right_vars
return (len(left_vars) - len(common_vars),
len(right_vars) - len(common_vars),
-len(common_vars))
def can_join(self):
return len(self.joinees) >= 2
class ResultList:
def __init__(self, rule, name_generator):
self.final_effect = rule.effect
self.result = []
self.name_generator = name_generator
def get_result(self):
self.result[-1].effect = self.final_effect
return self.result
def add_rule(self, type, conditions, effect_vars):
effect = pddl.Atom(next(self.name_generator), effect_vars)
rule = pddl_to_prolog.Rule(conditions, effect)
rule.type = type
self.result.append(rule)
return rule.effect
def greedy_join(rule, name_generator):
assert len(rule.conditions) >= 2
cost_matrix = CostMatrix(rule.conditions)
occurrences = OccurrencesTracker(rule)
result = ResultList(rule, name_generator)
while cost_matrix.can_join():
joinees = list(cost_matrix.remove_min_pair())
for joinee in joinees:
occurrences.update(joinee, -1)
common_vars = set(joinees[0].args) & set(joinees[1].args)
condition_vars = set(joinees[0].args) | set(joinees[1].args)
effect_vars = occurrences.variables() & condition_vars
for i, joinee in enumerate(joinees):
joinee_vars = set(joinee.args)
retained_vars = joinee_vars & (effect_vars | common_vars)
if retained_vars != joinee_vars:
joinees[i] = result.add_rule("project", [joinee], sorted(retained_vars))
joint_condition = result.add_rule("join", joinees, sorted(effect_vars))
cost_matrix.add_entry(joint_condition)
occurrences.update(joint_condition, +1)
# assert occurrences.variables() == set(rule.effect.args)
# for var in set(rule.effect.args):
# assert occurrences.occurrences[var] == 2 * rule.effect.args.count(var)
return result.get_result()
| 4,347 | 38.171171 | 88 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/instantiate.py
|
#! /usr/bin/env python3
from collections import defaultdict
import build_model
import pddl_to_prolog
import pddl
import timers
def get_fluent_facts(task, model):
fluent_predicates = set()
for action in task.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
for axiom in task.axioms:
fluent_predicates.add(axiom.name)
return {fact for fact in model
if fact.predicate in fluent_predicates}
def get_objects_by_type(typed_objects, types):
result = defaultdict(list)
supertypes = {}
for type in types:
supertypes[type.name] = type.supertype_names
for obj in typed_objects:
result[obj.type_name].append(obj.name)
for type in supertypes[obj.type_name]:
result[type].append(obj.name)
return result
def instantiate(task, model):
relaxed_reachable = False
fluent_facts = get_fluent_facts(task, model)
init_facts = set()
init_assignments = {}
for element in task.init:
if isinstance(element, pddl.Assign):
init_assignments[element.fluent] = element.expression
else:
init_facts.add(element)
type_to_objects = get_objects_by_type(task.objects, task.types)
instantiated_actions = []
instantiated_axioms = []
reachable_action_parameters = defaultdict(list)
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
parameters = action.parameters
inst_parameters = atom.args[:len(parameters)]
# Note: It's important that we use the action object
# itself as the key in reachable_action_parameters (rather
# than action.name) since we can have multiple different
# actions with the same name after normalization, and we
# want to distinguish their instantiations.
reachable_action_parameters[action].append(inst_parameters)
variable_mapping = {par.name: arg
for par, arg in zip(parameters, atom.args)}
inst_action = action.instantiate(
variable_mapping, init_facts, init_assignments,
fluent_facts, type_to_objects,
task.use_min_cost_metric)
if inst_action:
instantiated_actions.append(inst_action)
elif isinstance(atom.predicate, pddl.Axiom):
axiom = atom.predicate
variable_mapping = {par.name: arg
for par, arg in zip(axiom.parameters, atom.args)}
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
elif atom.predicate == "@goal-reachable":
relaxed_reachable = True
return (relaxed_reachable, fluent_facts, instantiated_actions,
sorted(instantiated_axioms), reachable_action_parameters)
def explore(task):
prog = pddl_to_prolog.translate(task)
model = build_model.compute_model(prog)
with timers.timing("Completing instantiation"):
return instantiate(task, model)
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
relaxed_reachable, atoms, actions, axioms, _ = explore(task)
print("goal relaxed reachable: %s" % relaxed_reachable)
print("%d atoms:" % len(atoms))
for atom in atoms:
print(" ", atom)
print()
print("%d actions:" % len(actions))
for action in actions:
action.dump()
print()
print()
print("%d axioms:" % len(axioms))
for axiom in axioms:
axiom.dump()
print()
| 3,722 | 34.798077 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/src/translate/normalize.py
|
#! /usr/bin/env python3
import copy
import pddl
class ConditionProxy:
def clone_owner(self):
clone = copy.copy(self)
clone.owner = copy.copy(clone.owner)
return clone
class PreconditionProxy(ConditionProxy):
def __init__(self, action):
self.owner = action
self.condition = action.precondition
def set(self, new_condition):
self.owner.precondition = self.condition = new_condition
def register_owner(self, task):
task.actions.append(self.owner)
def delete_owner(self, task):
task.actions.remove(self.owner)
def build_rules(self, rules):
action = self.owner
rule_head = get_action_predicate(action)
rule_body = condition_to_rule_body(action.parameters, self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
return self.owner.type_map
class EffectConditionProxy(ConditionProxy):
def __init__(self, action, effect):
self.action = action
self.owner = effect
self.condition = effect.condition
def set(self, new_condition):
self.owner.condition = self.condition = new_condition
def register_owner(self, task):
self.action.effects.append(self.owner)
def delete_owner(self, task):
self.action.effects.remove(self.owner)
def build_rules(self, rules):
effect = self.owner
rule_head = effect.literal
if not rule_head.negated:
rule_body = [get_action_predicate(self.action)]
rule_body += condition_to_rule_body([], self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
return self.action.type_map
class AxiomConditionProxy(ConditionProxy):
def __init__(self, axiom):
self.owner = axiom
self.condition = axiom.condition
def set(self, new_condition):
self.owner.condition = self.condition = new_condition
def register_owner(self, task):
task.axioms.append(self.owner)
def delete_owner(self, task):
task.axioms.remove(self.owner)
def build_rules(self, rules):
axiom = self.owner
app_rule_head = get_axiom_predicate(axiom)
app_rule_body = condition_to_rule_body(axiom.parameters, self.condition)
rules.append((app_rule_body, app_rule_head))
params = axiom.parameters[:axiom.num_external_parameters]
eff_rule_head = pddl.Atom(axiom.name, [par.name for par in params])
eff_rule_body = [app_rule_head]
rules.append((eff_rule_body, eff_rule_head))
def get_type_map(self):
return self.owner.type_map
class GoalConditionProxy(ConditionProxy):
def __init__(self, task):
self.owner = task
self.condition = task.goal
def set(self, new_condition):
self.owner.goal = self.condition = new_condition
def register_owner(self, task):
# this assertion should never trigger, because disjunctive
# goals are now implemented with axioms
# (see substitute_complicated_goal)
assert False, "Disjunctive goals not (yet) implemented."
def delete_owner(self, task):
# this assertion should never trigger, because disjunctive
# goals are now implemented with axioms
# (see substitute_complicated_goal)
assert False, "Disjunctive goals not (yet) implemented."
def build_rules(self, rules):
rule_head = pddl.Atom("@goal-reachable", [])
rule_body = condition_to_rule_body([], self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
# HACK!
# Method uniquify_variables HAS already been called (which is good).
# We call it here again for its SIDE EFFECT of collecting the type_map
# (which is bad). Having "top-level conditions" (currently, only goal
# conditions, but might also include safety conditions and similar)
# contained in a separate wrapper class that stores a type map might
# be a better design.
type_map = {}
self.condition.uniquify_variables(type_map)
return type_map
def get_action_predicate(action):
name = action
variables = [par.name for par in action.parameters]
if isinstance(action.precondition, pddl.ExistentialCondition):
variables += [par.name for par in action.precondition.parameters]
return pddl.Atom(name, variables)
def get_axiom_predicate(axiom):
name = axiom
variables = [par.name for par in axiom.parameters]
if isinstance(axiom.condition, pddl.ExistentialCondition):
variables += [par.name for par in axiom.condition.parameters]
return pddl.Atom(name, variables)
def all_conditions(task):
for action in task.actions:
yield PreconditionProxy(action)
for effect in action.effects:
yield EffectConditionProxy(action, effect)
for axiom in task.axioms:
yield AxiomConditionProxy(axiom)
yield GoalConditionProxy(task)
# [1] Remove universal quantifications from conditions.
#
# Replace, in a top-down fashion, <forall(vars, phi)> by <not(not-all-phi)>,
# where <not-all-phi> is a new axiom.
#
# <not-all-phi> is defined as <not(forall(vars,phi))>, which is of course
# translated to NNF. The parameters of the new axioms are exactly the free
# variables of <forall(vars, phi)>.
def remove_universal_quantifiers(task):
def recurse(condition):
# Uses new_axioms_by_condition and type_map from surrounding scope.
if isinstance(condition, pddl.UniversalCondition):
axiom_condition = condition.negate()
parameters = sorted(axiom_condition.free_variables())
typed_parameters = tuple(pddl.TypedObject(v, type_map[v]) for v in parameters)
axiom = new_axioms_by_condition.get((axiom_condition, typed_parameters))
if not axiom:
condition = recurse(axiom_condition)
axiom = task.add_axiom(list(typed_parameters), condition)
new_axioms_by_condition[(condition, typed_parameters)] = axiom
return pddl.NegatedAtom(axiom.name, parameters)
else:
new_parts = [recurse(part) for part in condition.parts]
return condition.change_parts(new_parts)
new_axioms_by_condition = {}
for proxy in tuple(all_conditions(task)):
# Cannot use generator because we add new axioms on the fly.
if proxy.condition.has_universal_part():
type_map = proxy.get_type_map()
proxy.set(recurse(proxy.condition))
# [2] Pull disjunctions to the root of the condition.
#
# After removing universal quantifiers, the (k-ary generalization of the)
# following rules suffice for doing that:
# (1) or(phi, or(psi, psi')) == or(phi, psi, psi')
# (2) exists(vars, or(phi, psi)) == or(exists(vars, phi), exists(vars, psi))
# (3) and(phi, or(psi, psi')) == or(and(phi, psi), and(phi, psi'))
def build_DNF(task):
def recurse(condition):
disjunctive_parts = []
other_parts = []
for part in condition.parts:
part = recurse(part)
if isinstance(part, pddl.Disjunction):
disjunctive_parts.append(part)
else:
other_parts.append(part)
if not disjunctive_parts:
return condition
# Rule (1): Associativity of disjunction.
if isinstance(condition, pddl.Disjunction):
result_parts = other_parts
for part in disjunctive_parts:
result_parts.extend(part.parts)
return pddl.Disjunction(result_parts)
# Rule (2): Distributivity disjunction/existential quantification.
if isinstance(condition, pddl.ExistentialCondition):
parameters = condition.parameters
result_parts = [pddl.ExistentialCondition(parameters, (part,))
for part in disjunctive_parts[0].parts]
return pddl.Disjunction(result_parts)
# Rule (3): Distributivity disjunction/conjunction.
assert isinstance(condition, pddl.Conjunction)
result_parts = [pddl.Conjunction(other_parts)]
while disjunctive_parts:
previous_result_parts = result_parts
result_parts = []
parts_to_distribute = disjunctive_parts.pop().parts
for part1 in previous_result_parts:
for part2 in parts_to_distribute:
result_parts.append(pddl.Conjunction((part1, part2)))
return pddl.Disjunction(result_parts)
for proxy in all_conditions(task):
if proxy.condition.has_disjunction():
proxy.set(recurse(proxy.condition).simplified())
# [3] Split conditions at the outermost disjunction.
def split_disjunctions(task):
for proxy in tuple(all_conditions(task)):
# Cannot use generator directly because we add/delete entries.
if isinstance(proxy.condition, pddl.Disjunction):
for part in proxy.condition.parts:
new_proxy = proxy.clone_owner()
new_proxy.set(part)
new_proxy.register_owner(task)
proxy.delete_owner(task)
# [4] Pull existential quantifiers out of conjunctions and group them.
#
# After removing universal quantifiers and creating the disjunctive form,
# only the following (representatives of) rules are needed:
# (1) exists(vars, exists(vars', phi)) == exists(vars + vars', phi)
# (2) and(phi, exists(vars, psi)) == exists(vars, and(phi, psi)),
# if var does not occur in phi as a free variable.
def move_existential_quantifiers(task):
def recurse(condition):
existential_parts = []
other_parts = []
for part in condition.parts:
part = recurse(part)
if isinstance(part, pddl.ExistentialCondition):
existential_parts.append(part)
else:
other_parts.append(part)
if not existential_parts:
return condition
# Rule (1): Combine nested quantifiers.
if isinstance(condition, pddl.ExistentialCondition):
new_parameters = condition.parameters + existential_parts[0].parameters
new_parts = existential_parts[0].parts
return pddl.ExistentialCondition(new_parameters, new_parts)
# Rule (2): Pull quantifiers out of conjunctions.
assert isinstance(condition, pddl.Conjunction)
new_parameters = []
new_conjunction_parts = other_parts
for part in existential_parts:
new_parameters += part.parameters
new_conjunction_parts += part.parts
new_conjunction = pddl.Conjunction(new_conjunction_parts)
return pddl.ExistentialCondition(new_parameters, (new_conjunction,))
for proxy in all_conditions(task):
if proxy.condition.has_existential_part():
proxy.set(recurse(proxy.condition).simplified())
# [5a] Drop existential quantifiers from axioms, turning them
# into parameters.
def eliminate_existential_quantifiers_from_axioms(task):
# Note: This is very redundant with the corresponding method for
# actions and could easily be merged if axioms and actions were
# unified.
for axiom in task.axioms:
precond = axiom.condition
if isinstance(precond, pddl.ExistentialCondition):
# Copy parameter list, since it can be shared with
# parameter lists of other versions of this axiom (e.g.
# created when splitting up disjunctive preconditions).
axiom.parameters = list(axiom.parameters)
axiom.parameters.extend(precond.parameters)
axiom.condition = precond.parts[0]
# [5b] Drop existential quantifiers from action preconditions,
# turning them into action parameters (that don't form part of the
# name of the action).
def eliminate_existential_quantifiers_from_preconditions(task):
for action in task.actions:
precond = action.precondition
if isinstance(precond, pddl.ExistentialCondition):
# Copy parameter list, since it can be shared with
# parameter lists of other versions of this action (e.g.
# created when splitting up disjunctive preconditions).
action.parameters = list(action.parameters)
action.parameters.extend(precond.parameters)
action.precondition = precond.parts[0]
# [5c] Eliminate existential quantifiers from effect conditions
#
# For effect conditions, we replace "when exists(x, phi) then e" with
# "forall(x): when phi then e.
def eliminate_existential_quantifiers_from_conditional_effects(task):
for action in task.actions:
for effect in action.effects:
condition = effect.condition
if isinstance(condition, pddl.ExistentialCondition):
effect.parameters = list(effect.parameters)
effect.parameters.extend(condition.parameters)
effect.condition = condition.parts[0]
def substitute_complicated_goal(task):
goal = task.goal
if isinstance(goal, pddl.Literal):
return
elif isinstance(goal, pddl.Conjunction):
for item in goal.parts:
if not isinstance(item, pddl.Literal):
break
else:
return
new_axiom = task.add_axiom([], goal)
task.goal = pddl.Atom(new_axiom.name, new_axiom.parameters)
# Combine Steps [1], [2], [3], [4], [5] and do some additional verification
# that the task makes sense.
def normalize(task):
remove_universal_quantifiers(task)
substitute_complicated_goal(task)
build_DNF(task)
split_disjunctions(task)
move_existential_quantifiers(task)
eliminate_existential_quantifiers_from_axioms(task)
eliminate_existential_quantifiers_from_preconditions(task)
eliminate_existential_quantifiers_from_conditional_effects(task)
verify_axiom_predicates(task)
def verify_axiom_predicates(task):
# Verify that derived predicates are not used in :init or
# action effects.
axiom_names = set()
for axiom in task.axioms:
axiom_names.add(axiom.name)
for fact in task.init:
# Note that task.init can contain the assignment to (total-cost)
# in addition to regular atoms.
if getattr(fact, "predicate", None) in axiom_names:
raise SystemExit(
"error: derived predicate %r appears in :init fact '%s'" %
(fact.predicate, fact))
for action in task.actions:
for effect in action.effects:
if effect.literal.predicate in axiom_names:
raise SystemExit(
"error: derived predicate %r appears in effect of action %r" %
(effect.literal.predicate, action.name))
# [6] Build rules for exploration component.
def build_exploration_rules(task):
result = []
for proxy in all_conditions(task):
proxy.build_rules(result)
return result
def condition_to_rule_body(parameters, condition):
result = []
for par in parameters:
result.append(par.get_atom())
if not isinstance(condition, pddl.Truth):
if isinstance(condition, pddl.ExistentialCondition):
for par in condition.parameters:
result.append(par.get_atom())
condition = condition.parts[0]
if isinstance(condition, pddl.Conjunction):
parts = condition.parts
else:
parts = (condition,)
for part in parts:
if isinstance(part, pddl.Falsity):
# Use an atom in the body that is always false because
# it is not initially true and doesn't occur in the
# head of any rule.
return [pddl.Atom("@always-false", [])]
assert isinstance(part, pddl.Literal), "Condition not normalized: %r" % part
if not part.negated:
result.append(part)
return result
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
normalize(task)
task.dump()
| 16,121 | 39.507538 | 90 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.