file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-lama-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["release32", "release64"]
CONFIGS = [
IssueConfig(
"lama-" + build,
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama", "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick in ["lama"]]
outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals()))
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
outfile=outfile)
exp.add_step(
'publish-report', subprocess.call, ['publish', outfile])
exp.run_steps()
| 2,282 |
Python
| 29.039473 | 102 | 0.683611 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-lama-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIGS = [
IssueConfig(
"lama-" + build,
[],
build_options=[build],
driver_options=["--build", build, "--alias", "seq-sat-lama-2011", "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick in ["lama"]]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 2,100 |
Python
| 28.591549 | 104 | 0.698571 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-lama-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIGS = [
IssueConfig(
"lama-" + build,
[],
build_options=[build],
driver_options=["--build", build, "--alias", "seq-sat-lama-2011"])
for rev in REVISIONS
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick in ["lama"]]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 2,070 |
Python
| 28.169014 | 78 | 0.7 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-opt-5min-debug.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["debug64"]
SEARCHES = [
("bjolp", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
("blind", ["--search", "astar(blind())"]),
("cegar", ["--search", "astar(cegar())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("lmcut", ["--search", "astar(lmcut())"]),
("mas", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"]),
("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
("h2", ["--search", "astar(hm(m=2))"]),
("hmax", ["--search", "astar(hmax())"]),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
search,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 2,648 |
Python
| 32.1125 | 112 | 0.67145 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-sat-extra-configs.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_DICT = {
"lama-first-typed": [
"--heuristic", "hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))",
"--heuristic", "hff=ff_synergy(hlm)",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
" single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
" preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=True,"
" preferred_successors_first=False)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_DICT.items()]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
exp.run_steps()
| 2,718 |
Python
| 31.759036 | 119 | 0.672921 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"quality",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,782 |
Python
| 36.425316 | 82 | 0.61832 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-opt-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb())"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"),
("occ", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"),
("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"),
("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"),
("h2", "astar(hm(m=2))"),
("hmax", "astar(hmax())"),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{search_nick}-{build1}".format(**locals()),
"{rev}-{search_nick}-{build2}".format(**locals()),
"Diff ({search_nick}-{rev})".format(**locals()))
for search_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 3,167 |
Python
| 33.434782 | 103 | 0.675087 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-lama-first-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_NICKS = [
("lama-first-syn", [
"--heuristic",
"""hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one),
transform=adapt_costs(one))""",
"--heuristic", "hff=ff_synergy(hlm)",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]),
("lama-first-no-syn", [
"--heuristic",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one))",
"--heuristic", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]),
]
CONFIGS = [
IssueConfig(
config_nick + "-" + build,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_NICKS]
print algorithm_pairs
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 2,914 |
Python
| 31.752809 | 115 | 0.650995 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-lama-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["release32", "release64"]
CONFIGS = [
IssueConfig(
"lama-" + build,
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama"])
for rev in REVISIONS
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick in ["lama"]]
outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals()))
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
outfile=outfile)
exp.add_step(
'publish-report', subprocess.call, ['publish', outfile])
exp.run_steps()
| 2,252 |
Python
| 28.644736 | 102 | 0.684725 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v2-blind-m32.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v2"]
BUILDS = ["release32"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time", "memory"])
exp.run_steps()
| 1,206 |
Python
| 23.14 | 62 | 0.705638 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v3-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-base", "issue213-v1", "issue213-v3"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb(max_time=900))"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), "
"merge_strategy=merge_dfp(), "
"label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"),
("seq", "astar(operatorcounting([state_equation_constraints()]))"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
# Compare across revisions and builds.
# lmcut-base-32 vs. lmcut-v3-64
build1, build2 = BUILDS
rev1, rev2 = "issue213-base", "issue213-v3"
algorithm_pairs = [
("{rev1}-{config_nick}-{build1}".format(**locals()),
"{rev2}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-before-vs-after")
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release32", "issue213-v3-blind-release32"]),
name="issue213-relative-scatter-blind-m32-v1-vs-v3-{}".format(attribute))
exp.run_steps()
| 3,946 |
Python
| 34.881818 | 125 | 0.648505 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb())"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"),
("seq", "astar(operatorcounting([state_equation_constraints()]))"),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{search_nick}-{build1}".format(**locals()),
"{rev}-{search_nick}-{build2}".format(**locals()),
"Diff ({search_nick}-{rev})".format(**locals()))
for search_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
exp.run_steps()
| 2,930 |
Python
| 32.306818 | 103 | 0.674061 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v5-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v4", "issue213-v5"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = ["pegsol-opt11-strips"] # common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')])
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time",
"hash_set_load_factor", "hash_set_resizings"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]),
name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute))
exp.run_steps()
| 3,084 |
Python
| 33.277777 | 93 | 0.651427 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v6-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v5", "issue213-v6"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')])
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time",
"hash_set_load_factor", "hash_set_resizings"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]),
name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute))
exp.run_steps()
| 3,057 |
Python
| 32.977777 | 93 | 0.651619 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def main():
parser = Parser()
parser.add_pattern(
"hash_set_load_factor",
"Hash set load factor: \d+/\d+ = (.+)",
required=False,
type=float)
parser.add_pattern(
"hash_set_resizings",
"Hash set resizings: (\d+)",
required=False,
type=int)
print "Running custom parser"
parser.parse()
main()
| 432 |
Python
| 18.681817 | 47 | 0.55787 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-opt-extra-configs.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"),
("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"),
("h2", "astar(hm(m=2))"),
("hmax", "astar(hmax())"),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{search_nick}-{build1}".format(**locals()),
"{rev}-{search_nick}-{build2}".format(**locals()),
"Diff ({search_nick}-{rev})".format(**locals()))
for search_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
exp.run_steps()
| 2,390 |
Python
| 29.265822 | 75 | 0.679498 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-opt-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
("blind", ["--search", "astar(blind())"]),
("cegar", ["--search", "astar(cegar())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("lmcut", ["--search", "astar(lmcut())"]),
("mas", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"]),
("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
("h2", ["--search", "astar(hm(m=2))"]),
("hmax", ["--search", "astar(hmax())"]),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
search,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in SEARCHES]
outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals()))
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
outfile=outfile)
exp.add_step(
'publish-report', subprocess.call, ['publish', outfile])
exp.run_steps()
| 3,401 |
Python
| 34.072165 | 112 | 0.655984 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v2-blind-m64.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v2"]
BUILDS = ["release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time", "memory"])
exp.run_steps()
| 1,206 |
Python
| 23.14 | 62 | 0.705638 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-opt-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
("blind", ["--search", "astar(blind())"]),
("cegar", ["--search", "astar(cegar())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("lmcut", ["--search", "astar(lmcut())"]),
("mas", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"]),
("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
("h2", ["--search", "astar(hm(m=2))"]),
("hmax", ["--search", "astar(hmax())"]),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
search,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "30m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in SEARCHES]
outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals()))
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
outfile=outfile)
exp.add_step(
'publish-report', subprocess.call, ['publish', outfile])
exp.run_steps()
| 3,402 |
Python
| 34.082474 | 112 | 0.656085 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-lama-first-pref-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_NICKS = [
("lama-first-syn", [
"--heuristic",
"""hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one),
transform=adapt_costs(one))""",
"--heuristic", "hff=ff_synergy(hlm)",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]),
("lama-first-no-syn-pref-false", [
"--heuristic",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=false)",
"--heuristic", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]),
("lama-first-no-syn-pref-true", [
"--heuristic",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=true)",
"--heuristic", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]),
]
CONFIGS = [
IssueConfig(
config_nick + "-" + build,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_NICKS]
print algorithm_pairs
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 3,344 |
Python
| 34.210526 | 127 | 0.638158 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v4-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v3", "issue213-v4"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time",
"hash_set_load_factor", "hash_set_resizings"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]),
name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute))
exp.run_steps()
| 2,930 |
Python
| 32.306818 | 93 | 0.649829 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--heuristic",
"""hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one),
transform=adapt_costs(one))""",
"--heuristic", "hff=ff_synergy(hlm)",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_DICT.items()]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
exp.run_steps()
| 3,093 |
Python
| 29.333333 | 76 | 0.617847 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 |
Python
| 35.566037 | 78 | 0.59871 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-sat-5min-debug.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["debug64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"lama-first-typed": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 3,057 |
Python
| 30.204081 | 103 | 0.630357 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-opt-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb())"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"),
("seq", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"),
("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"),
("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"),
("h2", "astar(hm(m=2))"),
("hmax", "astar(hmax())"),
]
CONFIGS = [
IssueConfig(
"-".join([search_nick, build]),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "30m"])
for rev in REVISIONS
for build in BUILDS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{search_nick}-{build1}".format(**locals()),
"{rev}-{search_nick}-{build2}".format(**locals()),
"Diff ({search_nick}-{rev})".format(**locals()))
for search_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 3,168 |
Python
| 33.445652 | 103 | 0.675189 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-sat-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v8"]
BUILDS = ["release32", "release64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"lama-first-typed": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_DICT.items()]
outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals()))
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
outfile=outfile)
exp.add_step(
'publish-report', subprocess.call, ['publish', outfile])
exp.run_steps()
| 3,821 |
Python
| 32.234782 | 103 | 0.624706 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-sat-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--heuristic",
"""hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one),
transform=adapt_costs(one))""",
"--heuristic", "hff=ff_synergy(hlm)",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"ff-typed": [
"--heuristic", "hff=ff()",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
" type_based([hff, g()])], boost=1000),"
" preferred=[hff], cost_type=one)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_DICT.items()]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 3,346 |
Python
| 29.99074 | 78 | 0.613568 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v3-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v2", "issue213-v3"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
algorithm_pairs = []
revision1, revision2 = REVISIONS
for build in BUILDS:
for config_nick, search in SEARCHES:
algorithm_pairs.append(
("{revision1}-{config_nick}-{build}".format(**locals()),
"{revision2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals())))
exp.add_report(
ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name="issue213-v2-vs-v3-blind")
exp.run_steps()
| 1,654 |
Python
| 25.693548 | 72 | 0.671705 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v7-sat-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--heuristic",
"""hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one),
transform=adapt_costs(one))""",
"--heuristic", "hff=ff_synergy(hlm)",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"ff-typed": [
"--heuristic", "hff=ff()",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
" type_based([hff, g()])], boost=1000),"
" preferred=[hff], cost_type=one)"],
}
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "30m"])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, _ in CONFIG_DICT.items()]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 3,347 |
Python
| 30 | 78 | 0.613684 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v1-base", "issue688-v1"]
BUILDS = ["release32"]
SEARCHES = [
("blind", ["--search", "astar(blind())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,229 |
Python
| 24.624999 | 62 | 0.676159 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v2-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v2-base", "issue688-v2"]
BUILDS = ["release32"]
SEARCHES = [
("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]),
("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]),
("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
] + [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama-first"])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,525 |
Python
| 27.259259 | 88 | 0.648525 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,171 |
Python
| 35.715026 | 79 | 0.613859 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v1-base", "issue688-v1"]
BUILDS = ["release32"]
SEARCHES = [
("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]),
("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]),
("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
] + [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama-first"])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,519 |
Python
| 27.148148 | 88 | 0.647136 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v3-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v3-base", "issue688-v3"]
BUILDS = ["release32"]
SEARCHES = [
("blind", ["--search", "astar(blind())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_report(RelativeScatterPlotReport(
attributes=["search_time"],
filter_algorithm=["issue688-v3-base-blind-release32", "issue688-v3-blind-release32"],
get_category=lambda run1, run2: run1.get("domain"),
), outfile="{}-blind-search_time.png".format(exp.name))
exp.run_steps()
| 1,566 |
Python
| 27.490909 | 89 | 0.694125 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 |
Python
| 35.566037 | 78 | 0.59871 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v3-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v3-base", "issue688-v3"]
BUILDS = ["release32"]
SEARCHES = [
("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]),
("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]),
("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
] + [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama-first"])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,525 |
Python
| 27.259259 | 88 | 0.648525 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v2-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v2-base", "issue688-v2"]
BUILDS = ["release32"]
SEARCHES = [
("blind", ["--search", "astar(blind())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_report(RelativeScatterPlotReport(
attributes=["search_time"],
filter_algorithm=["issue688-v2-base-blind-release32", "issue688-v2-blind-release32"],
get_category=lambda run1, run2: run1.get("domain"),
), outfile="{}-blind-search_time.png".format(exp.name))
exp.run_steps()
| 1,566 |
Python
| 27.490909 | 89 | 0.694125 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v3", "issue561-v4")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v5.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v4", "issue561-v5")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/main.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import common_setup
def main(rev1, rev2):
REVS = [rev1, rev2]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
B_CONFIGS = {
'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
G_CONFIGS = {
'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
F_CONFIGS = {
'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_search_parser('ms-parser.py')
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
actual_search_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp()
| 4,410 |
Python
| 54.835442 | 273 | 0.721088 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, email=None, processes=1, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
email=email)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in self.get_supported_attributes(
config_nick, attributes):
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,965 |
Python
| 35.116992 | 79 | 0.611415 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int)
parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int)
parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
# Compute actual search time
if ms_abstraction_constructed == True and props.get('search_time') is not None:
difference = props.get('search_time') - props.get('ms_construction_time')
props['actual_search_time'] = difference
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
parser.parse()
| 2,879 |
Python
| 36.402597 | 128 | 0.663077 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-base", "issue561-v1")
| 108 |
Python
| 14.571426 | 36 | 0.638889 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v2", "issue561-v3")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v1", "issue561-v2")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue870/base-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue870-base"]
BUILDS = ["release64", "release64dynamic"]
CONFIG_NICKS = [
("blind", ["--search", "astar(blind())"]),
("lmcut", ["--search", "astar(lmcut())"]),
#("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]),
]
CONFIGS = [
IssueConfig(
config_nick + ":" + build,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parse_again_step()
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{nick}:{build1}".format(**locals()),
"{rev}-{nick}:{build2}".format(**locals()),
"Diff ({rev}-{nick})".format(**locals()))
for build1, build2 in itertools.combinations(BUILDS, 2)
for nick, config in CONFIG_NICKS]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue839-opt-static-vs-dynamic")
exp.run_steps()
| 2,330 |
Python
| 27.777777 | 86 | 0.691416 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue870/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 |
Python
| 36.435443 | 82 | 0.618355 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue870/v1-seq.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILDS_AND_REVISIONS = [("release64", "issue870-base"), ("release64dynamic", "issue870-v1")]
CONFIG_NICKS = [
("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[],
configs=[],
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
for build, rev in BUILDS_AND_REVISIONS:
for config_nick, config in CONFIG_NICKS:
exp.add_algorithm(
":".join([config_nick, build, rev]),
common_setup.get_repo_base(),
rev,
config,
build_options=[build],
driver_options=["--build", build])
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
algorithm_pairs = [
("seq:release64:issue870-base",
"seq:release64dynamic:issue870-v1",
"Diff (seq)")
]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue870-seq-static-vs-dynamic")
exp.run_steps()
| 2,087 |
Python
| 28 | 92 | 0.705319 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue870/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 |
Python
| 35.566037 | 78 | 0.59871 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue582/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in self.get_supported_attributes(
config_nick, attributes):
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,856 |
Python
| 34.913408 | 79 | 0.612943 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue582/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from relativescatter import RelativeScatterPlotReport
import common_setup
REVS = ["issue582-base", "issue582-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_lmcut": [
"--search",
"astar(lmcut())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue582_base_v1_total_time.png'
)
exp.add_comparison_table_step()
exp()
| 663 |
Python
| 17.971428 | 59 | 0.6546 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue582/relativescatter.py
|
# -*- coding: utf-8 -*-
#
# downward uses the lab package to conduct experiments with the
# Fast Downward planning system.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import os
from lab import tools
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 4,690 |
Python
| 35.937008 | 84 | 0.624947 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/plan_manager.py
|
import itertools
import os
import os.path
import re
from . import returncodes
_PLAN_INFO_REGEX = re.compile(r"; cost = (\d+) \((unit cost|general cost)\)\n")
def _read_last_line(filename):
line = None
with open(filename) as input_file:
for line in input_file:
pass
return line
def _parse_plan(plan_filename):
"""Parse a plan file and return a pair (cost, problem_type)
summarizing the salient information. Return (None, None) for
incomplete plans."""
last_line = _read_last_line(plan_filename) or ""
match = _PLAN_INFO_REGEX.match(last_line)
if match:
return int(match.group(1)), match.group(2)
else:
return None, None
class PlanManager:
def __init__(self, plan_prefix, portfolio_bound=None, single_plan=False):
self._plan_prefix = plan_prefix
self._plan_costs = []
self._problem_type = None
if portfolio_bound is None:
portfolio_bound = "infinity"
self._portfolio_bound = portfolio_bound
self._single_plan = single_plan
def get_plan_prefix(self):
return self._plan_prefix
def get_plan_counter(self):
return len(self._plan_costs)
def get_next_portfolio_cost_bound(self):
"""Return the next plan cost bound to be used in a portfolio planner.
Initially, this is the user-specified cost bound, or "infinity"
if the user specified no bound. Once a plan has been found, it
is the cost of the best plan found so far. (This is always the
last plan found because plans must decrease in cost.)
"""
if self._plan_costs:
return self._plan_costs[-1]
else:
return self._portfolio_bound
def abort_portfolio_after_first_plan(self):
return self._single_plan
def get_problem_type(self):
if self._problem_type is None:
returncodes.exit_with_driver_critical_error("no plans found yet: cost type not set")
return self._problem_type
def process_new_plans(self):
"""Update information about plans after a planner run.
Read newly generated plans and store the relevant information.
If the last plan file is incomplete, delete it.
"""
had_incomplete_plan = False
for counter in itertools.count(self.get_plan_counter() + 1):
plan_filename = self._get_plan_file(counter)
def bogus_plan(msg):
returncodes.exit_with_driver_critical_error("%s: %s" % (plan_filename, msg))
if not os.path.exists(plan_filename):
break
if had_incomplete_plan:
bogus_plan("plan found after incomplete plan")
cost, problem_type = _parse_plan(plan_filename)
if cost is None:
had_incomplete_plan = True
print("%s is incomplete. Deleted the file." % plan_filename)
os.remove(plan_filename)
else:
print("plan manager: found new plan with cost %d" % cost)
if self._problem_type is None:
# This is the first plan we found.
self._problem_type = problem_type
else:
# Check if info from this plan matches previous info.
if self._problem_type != problem_type:
bogus_plan("problem type has changed")
if cost >= self._plan_costs[-1]:
bogus_plan("plan quality has not improved")
self._plan_costs.append(cost)
def get_existing_plans(self):
"""Yield all plans that match the given plan prefix."""
if os.path.exists(self._plan_prefix):
yield self._plan_prefix
for counter in itertools.count(start=1):
plan_filename = self._get_plan_file(counter)
if os.path.exists(plan_filename):
yield plan_filename
else:
break
def delete_existing_plans(self):
"""Delete all plans that match the given plan prefix."""
for plan in self.get_existing_plans():
os.remove(plan)
def _get_plan_file(self, number):
return "%s.%d" % (self._plan_prefix, number)
| 4,288 |
Python
| 33.869918 | 96 | 0.588386 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/util.py
|
import os
from . import returncodes
DRIVER_DIR = os.path.abspath(os.path.dirname(__file__))
REPO_ROOT_DIR = os.path.dirname(DRIVER_DIR)
BUILDS_DIR = os.path.join(REPO_ROOT_DIR, "builds")
def get_elapsed_time():
"""
Return the CPU time taken by the python process and its child
processes.
"""
if os.name == "nt":
# The child time components of os.times() are 0 on Windows.
raise NotImplementedError("cannot use get_elapsed_time() on Windows")
return sum(os.times()[:4])
def find_domain_filename(task_filename):
"""
Find domain filename for the given task using automatic naming rules.
"""
dirname, basename = os.path.split(task_filename)
domain_basenames = [
"domain.pddl",
basename[:3] + "-domain.pddl",
"domain_" + basename,
"domain-" + basename,
]
for domain_basename in domain_basenames:
domain_filename = os.path.join(dirname, domain_basename)
if os.path.exists(domain_filename):
return domain_filename
returncodes.exit_with_driver_input_error(
"Error: Could not find domain file using automatic naming rules.")
| 1,166 |
Python
| 26.785714 | 77 | 0.647513 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/main.py
|
from __future__ import print_function
import logging
import os
import sys
from . import aliases
from . import arguments
from . import cleanup
from . import run_components
from . import __version__
def main():
args = arguments.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format="%(levelname)-8s %(message)s",
stream=sys.stdout)
logging.debug("processed args: %s" % args)
if args.version:
print(__version__)
sys.exit()
if args.show_aliases:
aliases.show_aliases()
sys.exit()
if args.cleanup:
cleanup.cleanup_temporary_files(args)
sys.exit()
exitcode = None
for component in args.components:
if component == "translate":
(exitcode, continue_execution) = run_components.run_translate(args)
elif component == "search":
(exitcode, continue_execution) = run_components.run_search(args)
if not args.keep_sas_file:
print("Remove intermediate file {}".format(args.sas_file))
os.remove(args.sas_file)
elif component == "validate":
(exitcode, continue_execution) = run_components.run_validate(args)
else:
assert False, "Error: unhandled component: {}".format(component)
print("{component} exit code: {exitcode}".format(**locals()))
print()
if not continue_execution:
print("Driver aborting after {}".format(component))
break
# Exit with the exit code of the last component that ran successfully.
# This means for example that if no plan was found, validate is not run,
# and therefore the return code is that of the search.
sys.exit(exitcode)
if __name__ == "__main__":
main()
| 1,832 |
Python
| 30.067796 | 79 | 0.611354 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/limits.py
|
from . import returncodes
from . import util
try:
import resource
except ImportError:
resource = None
import sys
"""
Notes on limits: On Windows, the resource module does not exist and hence we
cannot enforce any limits there. Furthermore, while the module exists on macOS,
memory limits are not enforced by that OS and hence we do not support imposing
memory limits there.
"""
CANNOT_LIMIT_MEMORY_MSG = "Setting memory limits is not supported on your platform."
CANNOT_LIMIT_TIME_MSG = "Setting time limits is not supported on your platform."
def can_set_time_limit():
return resource is not None
def can_set_memory_limit():
return resource is not None and sys.platform != "darwin"
def set_time_limit(time_limit):
if time_limit is None:
return
if not can_set_time_limit():
raise NotImplementedError(CANNOT_LIMIT_TIME_MSG)
# Reaching the soft time limit leads to a (catchable) SIGXCPU signal,
# which we catch to gracefully exit. Reaching the hard limit leads to
# a SIGKILL, which is unpreventable. We set a hard limit one second
# higher than the soft limit to make sure we abort also in cases where
# the graceful shutdown doesn't work, or doesn't work reasonably
# quickly.
try:
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit + 1))
except ValueError:
# If the previous call failed, we try again without the extra second.
# In particular, this is necessary if there already exists an external
# hard limit equal to time_limit.
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit))
def set_memory_limit(memory):
"""*memory* must be given in bytes or None."""
if memory is None:
return
if not can_set_memory_limit():
raise NotImplementedError(CANNOT_LIMIT_MEMORY_MSG)
resource.setrlimit(resource.RLIMIT_AS, (memory, memory))
def convert_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
def get_memory_limit(component_limit, overall_limit):
"""
Return the minimum of the component and overall limits or None if neither is set.
"""
limits = [limit for limit in [component_limit, overall_limit] if limit is not None]
return min(limits) if limits else None
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit
| 2,854 |
Python
| 32.197674 | 87 | 0.69096 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/tests.py
|
"""
Test module for Fast Downward driver script. Run with
py.test driver/tests.py
"""
import os
import subprocess
import sys
import pytest
from .aliases import ALIASES, PORTFOLIOS
from .arguments import EXAMPLES
from . import limits
from . import returncodes
from .util import REPO_ROOT_DIR, find_domain_filename
def translate():
"""Create translated task."""
cmd = [sys.executable, "fast-downward.py", "--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def cleanup():
subprocess.check_call([sys.executable, "fast-downward.py", "--cleanup"],
cwd=REPO_ROOT_DIR)
def run_driver(parameters):
cleanup()
translate()
cmd = [sys.executable, "fast-downward.py"] + parameters
return subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def test_commandline_args():
for description, cmd in EXAMPLES:
parameters = [x.strip('"') for x in cmd]
run_driver(parameters)
def test_aliases():
for alias, config in ALIASES.items():
parameters = ["--alias", alias, "output.sas"]
run_driver(parameters)
def test_show_aliases():
run_driver(["--show-aliases"])
def test_portfolios():
for name, portfolio in PORTFOLIOS.items():
parameters = ["--portfolio", portfolio,
"--search-time-limit", "30m", "output.sas"]
run_driver(parameters)
@pytest.mark.skipif(not limits.can_set_time_limit(), reason="Cannot set time limits on this system")
def test_hard_time_limit():
def preexec_fn():
limits.set_time_limit(10)
driver = [sys.executable, "fast-downward.py"]
parameters = [
"--translate", "--translate-time-limit",
"10s", "misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
parameters = [
"--translate", "--translate-time-limit",
"20s", "misc/tests/benchmarks/gripper/prob01.pddl"]
with pytest.raises(subprocess.CalledProcessError) as exception_info:
subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
assert exception_info.value.returncode == returncodes.DRIVER_INPUT_ERROR
def test_automatic_domain_file_name_computation():
benchmarks_dir = os.path.join(REPO_ROOT_DIR, "benchmarks")
for dirpath, dirnames, filenames in os.walk(benchmarks_dir):
for filename in filenames:
if "domain" not in filename:
assert find_domain_filename(os.path.join(dirpath, filename))
| 2,596 |
Python
| 28.850574 | 100 | 0.665254 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/arguments.py
|
import argparse
import os.path
import re
import sys
from . import aliases
from . import returncodes
from . import util
DESCRIPTION = """Fast Downward driver script.
Input files can be either a PDDL problem file (with an optional PDDL domain
file), in which case the driver runs both planner components (translate and
search), or a SAS+ translator output file, in which case the driver runs just
the search component. You can override this default behaviour by selecting
components manually with the flags below. The first component to be run
determines the required input files:
--translate: [DOMAIN] PROBLEM
--search: TRANSLATE_OUTPUT
Arguments given before the specified input files are interpreted by the driver
script ("driver options"). Arguments given after the input files are passed on
to the planner components ("component options"). In exceptional cases where no
input files are needed, use "--" to separate driver from component options. In
even more exceptional cases where input files begin with "--", use "--" to
separate driver options from input files and also to separate input files from
component options.
By default, component options are passed to the search component. Use
"--translate-options" or "--search-options" within the component options to
override the default for the following options, until overridden again. (See
below for examples.)"""
LIMITS_HELP = """You can limit the time or memory for individual components
or the whole planner. The effective limit for each component is the minimum
of the component, overall, external soft, and external hard limits.
Limits are given in seconds or MiB. You can change the unit by using the
suffixes s, m, h and K, M, G.
By default, all limits are inactive. Only external limits (e.g. set with
ulimit) are respected.
Portfolios require that a time limit is in effect. Portfolio configurations
that exceed their time or memory limit are aborted, and the next
configuration is run."""
EXAMPLE_PORTFOLIO = os.path.relpath(
aliases.PORTFOLIOS["seq-opt-fdss-1"], start=util.REPO_ROOT_DIR)
EXAMPLES = [
("Translate and find a plan with A* + LM-Cut:",
["misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(lmcut())"']),
("Translate and run no search:",
["--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]),
("Run predefined configuration (LAMA-2011) on translated task:",
["--alias", "seq-sat-lama-2011", "output.sas"]),
("Run a portfolio on a translated task:",
["--portfolio", EXAMPLE_PORTFOLIO,
"--search-time-limit", "30m", "output.sas"]),
("Run the search component in debug mode (with assertions enabled) "
"and validate the resulting plan:",
["--debug", "output.sas", "--search", '"astar(ipdb())"']),
("Pass options to translator and search components:",
["misc/tests/benchmarks/gripper/prob01.pddl",
"--translate-options", "--full-encoding",
"--search-options", "--search", '"astar(lmcut())"']),
("Find a plan and validate it:",
["--validate",
"misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(cegar())"']),
]
EPILOG = """component options:
--translate-options OPTION1 OPTION2 ...
--search-options OPTION1 OPTION2 ...
pass OPTION1 OPTION2 ... to specified planner component
(default: pass component options to search)
Examples:
%s
""" % "\n\n".join("%s\n%s" % (desc, " ".join([os.path.basename(sys.argv[0])] + parameters)) for desc, parameters in EXAMPLES)
COMPONENTS_PLUS_OVERALL = ["translate", "search", "validate", "overall"]
DEFAULT_SAS_FILE = "output.sas"
"""
Function to emulate the behavior of ArgumentParser.error, but with our
custom exit codes instead of 2.
"""
def print_usage_and_exit_with_driver_input_error(parser, msg):
parser.print_usage()
returncodes.exit_with_driver_input_error("{}: error: {}".format(os.path.basename(sys.argv[0]), msg))
class RawHelpFormatter(argparse.HelpFormatter):
"""Preserve newlines and spacing."""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _format_args(self, action, default_metavar):
"""Show explicit help for remaining args instead of "..."."""
if action.nargs == argparse.REMAINDER:
return "INPUT_FILE1 [INPUT_FILE2] [COMPONENT_OPTION ...]"
else:
return argparse.HelpFormatter._format_args(self, action, default_metavar)
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index
def _split_off_filenames(planner_args):
"""Given the list of arguments to be passed on to the planner
components, split it into a prefix of filenames and a suffix of
options. Returns a pair (filenames, options).
If a "--" separator is present, the last such separator serves as
the border between filenames and options. The separator itself is
not returned. (This implies that "--" can be a filename, but never
an option to a planner component.)
If no such separator is present, the first argument that begins
with "-" and consists of at least two characters starts the list
of options, and all previous arguments are filenames."""
if "--" in planner_args:
separator_pos = _rindex(planner_args, "--")
num_filenames = separator_pos
del planner_args[separator_pos]
else:
num_filenames = 0
for arg in planner_args:
# We treat "-" by itself as a filename because by common
# convention it denotes stdin or stdout, and we might want
# to support this later.
if arg.startswith("-") and arg != "-":
break
num_filenames += 1
return planner_args[:num_filenames], planner_args[num_filenames:]
def _split_planner_args(parser, args):
"""Partition args.planner_args, the list of arguments for the
planner components, into args.filenames, args.translate_options
and args.search_options. Modifies args directly and removes the original
args.planner_args list."""
args.filenames, options = _split_off_filenames(args.planner_args)
args.translate_options = []
args.search_options = []
curr_options = args.search_options
for option in options:
if option == "--translate-options":
curr_options = args.translate_options
elif option == "--search-options":
curr_options = args.search_options
else:
curr_options.append(option)
def _check_mutex_args(parser, args, required=False):
for pos, (name1, is_specified1) in enumerate(args):
for name2, is_specified2 in args[pos + 1:]:
if is_specified1 and is_specified2:
print_usage_and_exit_with_driver_input_error(
parser, "cannot combine %s with %s" % (name1, name2))
if required and not any(is_specified for _, is_specified in args):
print_usage_and_exit_with_driver_input_error(
parser, "exactly one of {%s} has to be specified" %
", ".join(name for name, _ in args))
def _looks_like_search_input(filename):
with open(filename) as input_file:
first_line = next(input_file, "").rstrip()
return first_line == "begin_version"
def _set_components_automatically(parser, args):
"""Guess which planner components to run based on the specified
filenames and set args.components accordingly. Currently
implements some simple heuristics:
1. If there is exactly one input file and it looks like a
Fast-Downward-generated file, run search only.
2. Otherwise, run all components."""
if len(args.filenames) == 1 and _looks_like_search_input(args.filenames[0]):
args.components = ["search"]
else:
args.components = ["translate", "search"]
def _set_components_and_inputs(parser, args):
"""Set args.components to the planner components to be run and set
args.translate_inputs and args.search_input to the correct input
filenames.
Rules:
1. If any --run-xxx option is specified, then the union
of the specified components is run.
2. If nothing is specified, use automatic rules. See
separate function."""
args.components = []
if args.translate or args.run_all:
args.components.append("translate")
if args.search or args.run_all:
args.components.append("search")
if not args.components:
_set_components_automatically(parser, args)
# We implicitly activate validation in debug mode. However, for
# validation we need the PDDL input files and a plan, therefore both
# components must be active.
if args.validate or (args.debug and len(args.components) == 2):
args.components.append("validate")
args.translate_inputs = []
assert args.components
first = args.components[0]
num_files = len(args.filenames)
# When passing --help to any of the components (or -h to the
# translator), we don't require input filenames and silently
# swallow any that are provided. This is undocumented to avoid
# cluttering the driver's --help output.
if first == "translate":
if "--help" in args.translate_options or "-h" in args.translate_options:
args.translate_inputs = []
elif num_files == 1:
task_file, = args.filenames
domain_file = util.find_domain_filename(task_file)
args.translate_inputs = [domain_file, task_file]
elif num_files == 2:
args.translate_inputs = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "translator needs one or two input files")
elif first == "search":
if "--help" in args.search_options:
args.search_input = None
elif num_files == 1:
args.search_input, = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "search needs exactly one input file")
else:
assert False, first
def _set_translator_output_options(parser, args):
if any("--sas-file" in opt for opt in args.translate_options):
print_usage_and_exit_with_driver_input_error(
parser, "Cannot pass the \"--sas-file\" option to translate.py from the "
"fast-downward.py script. Pass it directly to fast-downward.py instead.")
args.search_input = args.sas_file
args.translate_options += ["--sas-file", args.search_input]
def _get_time_limit_in_seconds(limit, parser):
match = re.match(r"^(\d+)(s|m|h)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed time limit parameter: {}".format(limit))
time = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "m":
time *= 60
elif suffix == "h":
time *= 3600
return time
def _get_memory_limit_in_bytes(limit, parser):
match = re.match(r"^(\d+)(k|m|g)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed memory limit parameter: {}".format(limit))
memory = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "k":
memory *= 1024
elif suffix is None or suffix == "m":
memory *= 1024 * 1024
elif suffix == "g":
memory *= 1024 * 1024 * 1024
return memory
def set_time_limit_in_seconds(parser, args, component):
param = component + "_time_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_time_limit_in_seconds(limit, parser))
def set_memory_limit_in_bytes(parser, args, component):
param = component + "_memory_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_memory_limit_in_bytes(limit, parser))
def _convert_limits_to_ints(parser, args):
for component in COMPONENTS_PLUS_OVERALL:
set_time_limit_in_seconds(parser, args, component)
set_memory_limit_in_bytes(parser, args, component)
def parse_args():
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=RawHelpFormatter,
add_help=False)
help_options = parser.add_argument_group(
title=("driver options that show information and exit "
"(don't run planner)"))
# We manually add the help option because we want to control
# how it is grouped in the output.
help_options.add_argument(
"-h", "--help",
action="help", default=argparse.SUPPRESS,
help="show this help message and exit")
help_options.add_argument(
"-v", "--version", action="store_true",
help="print version number and exit")
help_options.add_argument(
"--show-aliases", action="store_true",
help="show the known aliases (see --alias) and exit")
components = parser.add_argument_group(
title=("driver options selecting the planner components to be run\n"
"(may select several; default: auto-select based on input file(s))"))
components.add_argument(
"--run-all", action="store_true",
help="run all components of the planner")
components.add_argument(
"--translate", action="store_true",
help="run translator component")
components.add_argument(
"--search", action="store_true",
help="run search component")
limits = parser.add_argument_group(
title="time and memory limits", description=LIMITS_HELP)
for component in COMPONENTS_PLUS_OVERALL:
limits.add_argument("--{}-time-limit".format(component))
limits.add_argument("--{}-memory-limit".format(component))
driver_other = parser.add_argument_group(
title="other driver options")
driver_other.add_argument(
"--alias",
help="run a config with an alias (e.g. seq-sat-lama-2011)")
driver_other.add_argument(
"--build",
help="BUILD can be a predefined build name like release "
"(default) and debug, a custom build name, or the path to "
"a directory holding the planner binaries. The driver "
"first looks for the planner binaries under 'BUILD'. If "
"this path does not exist, it tries the directory "
"'<repo>/builds/BUILD/bin', where the build script creates "
"them by default.")
driver_other.add_argument(
"--debug", action="store_true",
help="alias for --build=debug --validate")
driver_other.add_argument(
"--validate", action="store_true",
help='validate plans (implied by --debug); needs "validate" (VAL) on PATH')
driver_other.add_argument(
"--log-level", choices=["debug", "info", "warning"],
default="info",
help="set log level (most verbose: debug; least verbose: warning; default: %(default)s)")
driver_other.add_argument(
"--plan-file", metavar="FILE", default="sas_plan",
help="write plan(s) to FILE (default: %(default)s; anytime configurations append .1, .2, ...)")
driver_other.add_argument(
"--sas-file", metavar="FILE",
help="intermediate file for storing the translator output "
"(implies --keep-sas-file, default: {})".format(DEFAULT_SAS_FILE))
driver_other.add_argument(
"--keep-sas-file", action="store_true",
help="keep translator output file (implied by --sas-file, default: "
"delete file if translator and search component are active)")
driver_other.add_argument(
"--portfolio", metavar="FILE",
help="run a portfolio specified in FILE")
driver_other.add_argument(
"--portfolio-bound", metavar="VALUE", default=None, type=int,
help="exclusive bound on plan costs (only supported for satisficing portfolios)")
driver_other.add_argument(
"--portfolio-single-plan", action="store_true",
help="abort satisficing portfolio after finding the first plan")
driver_other.add_argument(
"--cleanup", action="store_true",
help="clean up temporary files (translator output and plan files) and exit")
parser.add_argument(
"planner_args", nargs=argparse.REMAINDER,
help="file names and options passed on to planner components")
# Using argparse.REMAINDER relies on the fact that the first
# argument that doesn't belong to the driver doesn't look like an
# option, i.e., doesn't start with "-". This is usually satisfied
# because the argument is a filename; in exceptional cases, "--"
# can be used as an explicit separator. For example, "./fast-downward.py --
# --help" passes "--help" to the search code.
args = parser.parse_args()
if args.sas_file:
args.keep_sas_file = True
else:
args.sas_file = DEFAULT_SAS_FILE
if args.build and args.debug:
print_usage_and_exit_with_driver_input_error(
parser, "The option --debug is an alias for --build=debug "
"--validate. Do no specify both --debug and --build.")
if not args.build:
if args.debug:
args.build = "debug"
else:
args.build = "release"
_split_planner_args(parser, args)
_check_mutex_args(parser, [
("--alias", args.alias is not None),
("--portfolio", args.portfolio is not None),
("options for search component", bool(args.search_options))])
_set_translator_output_options(parser, args)
_convert_limits_to_ints(parser, args)
if args.alias:
try:
aliases.set_options_for_alias(args.alias, args)
except KeyError:
print_usage_and_exit_with_driver_input_error(
parser, "unknown alias: %r" % args.alias)
if args.portfolio_bound is not None and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound may only be used for portfolios.")
if args.portfolio_bound is not None and args.portfolio_bound < 0:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound must not be negative.")
if args.portfolio_single_plan and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-single-plan may only be used for portfolios.")
if not args.version and not args.show_aliases and not args.cleanup:
_set_components_and_inputs(parser, args)
if "translate" not in args.components or "search" not in args.components:
args.keep_sas_file = True
return args
| 19,013 |
Python
| 38.448133 | 125 | 0.651659 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/__init__.py
|
from .version import __version__
| 33 |
Python
| 15.999992 | 32 | 0.727273 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolio_runner.py
|
""" Module for running planner portfolios.
Memory limits: We apply the same memory limit that is given to the
plan script to each planner call. Note that this setup does not work if
the sum of the memory usage of the Python process and the planner calls
is limited. In this case the Python process might get killed although
we would like to kill only the single planner call and continue with
the remaining configurations. If we ever want to support this scenario
we will have to reduce the memory limit of the planner calls by the
amount of memory that the Python process needs. On maia for example
this amounts to 128MB of reserved virtual memory. We can make Python
reserve less space by lowering the soft limit for virtual memory before
the process is started.
"""
__all__ = ["run"]
import subprocess
import sys
from . import call
from . import limits
from . import returncodes
from . import util
DEFAULT_TIMEOUT = 1800
def adapt_heuristic_cost_type(arg, cost_type):
if cost_type == "normal":
transform = "no_transform()"
else:
transform = "adapt_costs({})".format(cost_type)
return arg.replace("H_COST_TRANSFORM", transform)
def adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager):
g_bound = plan_manager.get_next_portfolio_cost_bound()
plan_counter = plan_manager.get_plan_counter()
print("g bound: %s" % g_bound)
print("next plan number: %d" % (plan_counter + 1))
for index, arg in enumerate(args):
if arg == "--evaluator" or arg == "--heuristic":
heuristic = args[index + 1]
heuristic = adapt_heuristic_cost_type(heuristic, heuristic_cost_type)
args[index + 1] = heuristic
elif arg == "--search":
search = args[index + 1]
if "bound=BOUND" not in search:
returncodes.exit_with_driver_critical_error(
"Satisficing portfolios need the string "
"\"bound=BOUND\" in each search configuration. "
"See the FDSS portfolios for examples.")
for name, value in [
("BOUND", g_bound),
("S_COST_TYPE", search_cost_type)]:
search = search.replace(name, str(value))
search = adapt_heuristic_cost_type(search, heuristic_cost_type)
args[index + 1] = search
break
def run_search(executable, args, sas_file, plan_manager, time, memory):
complete_args = [executable] + args + [
"--internal-plan-file", plan_manager.get_plan_prefix()]
print("args: %s" % complete_args)
try:
exitcode = call.check_call(
"search", complete_args, stdin=sas_file,
time_limit=time, memory_limit=memory)
except subprocess.CalledProcessError as err:
exitcode = err.returncode
print("exitcode: %d" % exitcode)
print()
return exitcode
def compute_run_time(timeout, configs, pos):
remaining_time = timeout - util.get_elapsed_time()
print("remaining time: {}".format(remaining_time))
relative_time = configs[pos][0]
remaining_relative_time = sum(config[0] for config in configs[pos:])
print("config {}: relative time {}, remaining {}".format(
pos, relative_time, remaining_relative_time))
# For the last config we have relative_time == remaining_relative_time, so
# we use all of the remaining time at the end.
return remaining_time * relative_time / remaining_relative_time
def run_sat_config(configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory):
run_time = compute_run_time(timeout, configs, pos)
if run_time <= 0:
return None
_, args_template = configs[pos]
args = list(args_template)
adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager)
if not plan_manager.abort_portfolio_after_first_plan():
args.extend([
"--internal-previous-portfolio-plans",
str(plan_manager.get_plan_counter())])
result = run_search(executable, args, sas_file, plan_manager, run_time, memory)
plan_manager.process_new_plans()
return result
def run_sat(configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory):
# If the configuration contains S_COST_TYPE or H_COST_TRANSFORM and the task
# has non-unit costs, we start by treating all costs as one. When we find
# a solution, we rerun the successful config with real costs.
heuristic_cost_type = "one"
search_cost_type = "one"
changed_cost_types = False
while configs:
configs_next_round = []
for pos, (relative_time, args) in enumerate(configs):
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if exitcode == returncodes.SUCCESS:
if plan_manager.abort_portfolio_after_first_plan():
return
configs_next_round.append((relative_time, args))
if (not changed_cost_types and can_change_cost_type(args) and
plan_manager.get_problem_type() == "general cost"):
print("Switch to real costs and repeat last run.")
changed_cost_types = True
search_cost_type = "normal"
heuristic_cost_type = "plusone"
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if final_config_builder:
print("Build final config.")
final_config = final_config_builder(args)
break
if final_config:
break
# Only run the successful configs in the next round.
configs = configs_next_round
if final_config:
print("Abort portfolio and run final config.")
exitcode = run_sat_config(
[(1, final_config)], 0, search_cost_type,
heuristic_cost_type, executable, sas_file, plan_manager,
timeout, memory)
if exitcode is not None:
yield exitcode
def run_opt(configs, executable, sas_file, plan_manager, timeout, memory):
for pos, (relative_time, args) in enumerate(configs):
run_time = compute_run_time(timeout, configs, pos)
exitcode = run_search(executable, args, sas_file, plan_manager,
run_time, memory)
yield exitcode
if exitcode in [returncodes.SUCCESS, returncodes.SEARCH_UNSOLVABLE]:
break
def can_change_cost_type(args):
return any("S_COST_TYPE" in part or "H_COST_TRANSFORM" in part for part in args)
def get_portfolio_attributes(portfolio):
attributes = {}
with open(portfolio, "rb") as portfolio_file:
content = portfolio_file.read()
try:
exec(content, attributes)
except Exception:
returncodes.exit_with_driver_critical_error(
"The portfolio %s could not be loaded. Maybe it still "
"uses the old portfolio syntax? See the FDSS portfolios "
"for examples using the new syntax." % portfolio)
if "CONFIGS" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define CONFIGS")
if "OPTIMAL" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define OPTIMAL")
return attributes
def run(portfolio, executable, sas_file, plan_manager, time, memory):
"""
Run the configs in the given portfolio file.
The portfolio is allowed to run for at most *time* seconds and may
use a maximum of *memory* bytes.
"""
attributes = get_portfolio_attributes(portfolio)
configs = attributes["CONFIGS"]
optimal = attributes["OPTIMAL"]
final_config = attributes.get("FINAL_CONFIG")
final_config_builder = attributes.get("FINAL_CONFIG_BUILDER")
if "TIMEOUT" in attributes:
returncodes.exit_with_driver_input_error(
"The TIMEOUT attribute in portfolios has been removed. "
"Please pass a time limit to fast-downward.py.")
if time is None:
if sys.platform == "win32":
returncodes.exit_with_driver_unsupported_error(limits.CANNOT_LIMIT_TIME_MSG)
else:
returncodes.exit_with_driver_input_error(
"Portfolios need a time limit. Please pass --search-time-limit "
"or --overall-time-limit to fast-downward.py.")
timeout = util.get_elapsed_time() + time
if optimal:
exitcodes = run_opt(
configs, executable, sas_file, plan_manager, timeout, memory)
else:
exitcodes = run_sat(
configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory)
return returncodes.generate_portfolio_exitcode(list(exitcodes))
| 9,492 |
Python
| 38.719665 | 88 | 0.623894 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/version.py
|
# This file is auto-generated by the scripts in misc/release.
# Do not modify it.
__version__ = "20.06+"
| 106 |
Python
| 20.399996 | 61 | 0.679245 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/aliases.py
|
import os
from .util import DRIVER_DIR
PORTFOLIO_DIR = os.path.join(DRIVER_DIR, "portfolios")
ALIASES = {}
ALIASES["seq-sat-fd-autotune-1"] = [
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--evaluator", "hcea=cea()",
"--evaluator", "hcg=cg(transform=adapt_costs(plusone))",
"--evaluator", "hgc=goalcount()",
"--evaluator", "hAdd=add()",
"--search", """iterated([
lazy(alt([single(sum([g(),weight(hff,10)])),
single(sum([g(),weight(hff,10)]),pref_only=true)],
boost=2000),
preferred=[hff],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hAdd,7)])),
single(sum([g(),weight(hAdd,7)]),pref_only=true),
single(sum([g(),weight(hcg,7)])),
single(sum([g(),weight(hcg,7)]),pref_only=true),
single(sum([g(),weight(hcea,7)])),
single(sum([g(),weight(hcea,7)]),pref_only=true),
single(sum([g(),weight(hgc,7)])),
single(sum([g(),weight(hgc,7)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([tiebreaking([sum([g(),weight(hAdd,3)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,3)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,3)]),hcg]),
tiebreaking([sum([g(),weight(hcg,3)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,3)]),hcea]),
tiebreaking([sum([g(),weight(hcea,3)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,3)]),hgc]),
tiebreaking([sum([g(),weight(hgc,3)]),hgc],pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=normal),
eager(alt([tiebreaking([sum([g(),weight(hAdd,10)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,10)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,10)]),hcg]),
tiebreaking([sum([g(),weight(hcg,10)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,10)]),hcea]),
tiebreaking([sum([g(),weight(hcea,10)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,10)]),hgc]),
tiebreaking([sum([g(),weight(hgc,10)]),hgc],pref_only=true)],
boost=500),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal)
],repeat_last=true,continue_on_fail=true)"""]
ALIASES["seq-sat-fd-autotune-2"] = [
"--evaluator", "hcea=cea(transform=adapt_costs(plusone))",
"--evaluator", "hcg=cg(transform=adapt_costs(one))",
"--evaluator", "hgc=goalcount(transform=adapt_costs(plusone))",
"--evaluator", "hff=ff()",
"--search", """iterated([
ehc(hcea,preferred=[hcea],preferred_usage=0,cost_type=normal),
lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),
single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcg,3)])),
single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcea,3)])),
single(sum([weight(g(),2),weight(hcea,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hgc,3)])),
single(sum([weight(g(),2),weight(hgc,3)]),pref_only=true)],
boost=200),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hff,5)])),
single(sum([g(),weight(hff,5)]),pref_only=true),
single(sum([g(),weight(hcg,5)])),
single(sum([g(),weight(hcg,5)]),pref_only=true),
single(sum([g(),weight(hcea,5)])),
single(sum([g(),weight(hcea,5)]),pref_only=true),
single(sum([g(),weight(hgc,5)])),
single(sum([g(),weight(hgc,5)]),pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal),
lazy(alt([single(sum([g(),weight(hff,2)])),
single(sum([g(),weight(hff,2)]),pref_only=true),
single(sum([g(),weight(hcg,2)])),
single(sum([g(),weight(hcg,2)]),pref_only=true),
single(sum([g(),weight(hcea,2)])),
single(sum([g(),weight(hcea,2)]),pref_only=true),
single(sum([g(),weight(hgc,2)])),
single(sum([g(),weight(hgc,2)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=one)
],repeat_last=true,continue_on_fail=true)"""]
def _get_lama(**kwargs):
return [
"--if-unit-cost",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref={pref})".format(**kwargs),
"--evaluator", "hff=ff()",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref={pref})".format(**kwargs),
"--evaluator", "hff1=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref={pref})".format(**kwargs),
"--evaluator", "hff2=ff(transform=adapt_costs(plusone))",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
# Append --always to be on the safe side if we want to append
# additional options later.
"--always"]
ALIASES["seq-sat-lama-2011"] = _get_lama(pref="true")
ALIASES["lama"] = _get_lama(pref="false")
ALIASES["lama-first"] = [
"--evaluator",
"hlm=lmcount(lm_factory=lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]
ALIASES["seq-opt-bjolp"] = [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]
ALIASES["seq-opt-lmcut"] = [
"--search", "astar(lmcut())"]
PORTFOLIOS = {}
for portfolio in os.listdir(PORTFOLIO_DIR):
name, ext = os.path.splitext(portfolio)
assert ext == ".py", portfolio
PORTFOLIOS[name.replace("_", "-")] = os.path.join(PORTFOLIO_DIR, portfolio)
def show_aliases():
for alias in sorted(list(ALIASES) + list(PORTFOLIOS)):
print(alias)
def set_options_for_alias(alias_name, args):
"""
If alias_name is an alias for a configuration, set args.search_options
to the corresponding command-line arguments. If it is an alias for a
portfolio, set args.portfolio to the path to the portfolio file.
Otherwise raise KeyError.
"""
assert not args.search_options
assert not args.portfolio
if alias_name in ALIASES:
args.search_options = [x.replace(" ", "").replace("\n", "")
for x in ALIASES[alias_name]]
elif alias_name in PORTFOLIOS:
args.portfolio = PORTFOLIOS[alias_name]
else:
raise KeyError(alias_name)
| 7,900 |
Python
| 43.892045 | 119 | 0.567975 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/run_components.py
|
import errno
import logging
import os.path
import subprocess
import sys
from . import call
from . import limits
from . import portfolio_runner
from . import returncodes
from . import util
from .plan_manager import PlanManager
# TODO: We might want to turn translate into a module and call it with "python3 -m translate".
REL_TRANSLATE_PATH = os.path.join("translate", "translate.py")
if os.name == "posix":
REL_SEARCH_PATH = "downward"
VALIDATE = "validate"
elif os.name == "nt":
REL_SEARCH_PATH = "downward.exe"
VALIDATE = "validate.exe"
else:
returncodes.exit_with_driver_unsupported_error("Unsupported OS: " + os.name)
def get_executable(build, rel_path):
# First, consider 'build' to be a path directly to the binaries.
# The path can be absolute or relative to the current working
# directory.
build_dir = build
if not os.path.exists(build_dir):
# If build is not a full path to the binaries, it might be the
# name of a build in our standard directory structure.
# in this case, the binaries are in
# '<repo-root>/builds/<buildname>/bin'.
build_dir = os.path.join(util.BUILDS_DIR, build, "bin")
if not os.path.exists(build_dir):
returncodes.exit_with_driver_input_error(
"Could not find build '{build}' at {build_dir}. "
"Please run './build.py {build}'.".format(**locals()))
abs_path = os.path.join(build_dir, rel_path)
if not os.path.exists(abs_path):
returncodes.exit_with_driver_input_error(
"Could not find '{rel_path}' in build '{build}'. "
"Please run './build.py {build}'.".format(**locals()))
return abs_path
def run_translate(args):
logging.info("Running translator.")
time_limit = limits.get_time_limit(
args.translate_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.translate_memory_limit, args.overall_memory_limit)
translate = get_executable(args.build, REL_TRANSLATE_PATH)
assert sys.executable, "Path to interpreter could not be found"
cmd = [sys.executable] + [translate] + args.translate_inputs + args.translate_options
stderr, returncode = call.get_error_output_and_returncode(
"translator",
cmd,
time_limit=time_limit,
memory_limit=memory_limit)
# We collect stderr of the translator and print it here, unless
# the translator ran out of memory and all output in stderr is
# related to MemoryError.
do_print_on_stderr = True
if returncode == returncodes.TRANSLATE_OUT_OF_MEMORY:
output_related_to_memory_error = True
if not stderr:
output_related_to_memory_error = False
for line in stderr.splitlines():
if "MemoryError" not in line:
output_related_to_memory_error = False
break
if output_related_to_memory_error:
do_print_on_stderr = False
if do_print_on_stderr and stderr:
returncodes.print_stderr(stderr)
if returncode == 0:
return (0, True)
elif returncode == 1:
# Unlikely case that the translator crashed without raising an
# exception.
return (returncodes.TRANSLATE_CRITICAL_ERROR, False)
else:
# Pass on any other exit code, including in particular signals or
# exit codes such as running out of memory or time.
return (returncode, False)
def run_search(args):
logging.info("Running search (%s)." % args.build)
time_limit = limits.get_time_limit(
args.search_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.search_memory_limit, args.overall_memory_limit)
executable = get_executable(args.build, REL_SEARCH_PATH)
plan_manager = PlanManager(
args.plan_file,
portfolio_bound=args.portfolio_bound,
single_plan=args.portfolio_single_plan)
plan_manager.delete_existing_plans()
if args.portfolio:
assert not args.search_options
logging.info("search portfolio: %s" % args.portfolio)
return portfolio_runner.run(
args.portfolio, executable, args.search_input, plan_manager,
time_limit, memory_limit)
else:
if not args.search_options:
returncodes.exit_with_driver_input_error(
"search needs --alias, --portfolio, or search options")
if "--help" not in args.search_options:
args.search_options.extend(["--internal-plan-file", args.plan_file])
try:
call.check_call(
"search",
[executable] + args.search_options,
stdin=args.search_input,
time_limit=time_limit,
memory_limit=memory_limit)
except subprocess.CalledProcessError as err:
# TODO: if we ever add support for SEARCH_PLAN_FOUND_AND_* directly
# in the planner, this assertion no longer holds. Furthermore, we
# would need to return (err.returncode, True) if the returncode is
# in [0..10].
# Negative exit codes are allowed for passing out signals.
assert err.returncode >= 10 or err.returncode < 0, "got returncode < 10: {}".format(err.returncode)
return (err.returncode, False)
else:
return (0, True)
def run_validate(args):
logging.info("Running validate.")
num_files = len(args.filenames)
if num_files == 1:
task, = args.filenames
domain = util.find_domain_filename(task)
elif num_files == 2:
domain, task = args.filenames
else:
returncodes.exit_with_driver_input_error("validate needs one or two PDDL input files.")
plan_files = list(PlanManager(args.plan_file).get_existing_plans())
if not plan_files:
print("Not running validate since no plans found.")
return (0, True)
validate_inputs = [domain, task] + plan_files
try:
call.check_call(
"validate",
[VALIDATE] + validate_inputs,
time_limit=args.validate_time_limit,
memory_limit=args.validate_memory_limit)
except OSError as err:
if err.errno == errno.ENOENT:
returncodes.exit_with_driver_input_error("Error: {} not found. Is it on the PATH?".format(VALIDATE))
else:
returncodes.exit_with_driver_critical_error(err)
else:
return (0, True)
| 6,502 |
Python
| 36.808139 | 112 | 0.633651 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/call.py
|
"""Make subprocess calls with time and memory limits."""
from . import limits
from . import returncodes
import logging
import os
import subprocess
import sys
try:
from shlex import quote
except ImportError:
from pipes import quote
def print_call_settings(nick, cmd, stdin, time_limit, memory_limit):
if stdin is not None:
stdin = quote(stdin)
logging.info("{} stdin: {}".format(nick, stdin))
if time_limit is not None:
time_limit = str(time_limit) + "s"
logging.info("{} time limit: {}".format(nick, time_limit))
if memory_limit is not None:
memory_limit = int(limits.convert_to_mb(memory_limit))
memory_limit = str(memory_limit) + " MB"
logging.info("{} memory limit: {}".format(nick, memory_limit))
escaped_cmd = [quote(x) for x in cmd]
if stdin is not None:
escaped_cmd.extend(["<", quote(stdin)])
logging.info("{} command line string: {}".format(nick, " ".join(escaped_cmd)))
def _get_preexec_function(time_limit, memory_limit):
def set_limits():
def _try_or_exit(function, description):
def fail(exception, exitcode):
returncodes.print_stderr("{} failed: {}".format(description, exception))
os._exit(exitcode)
try:
function()
except NotImplementedError as err:
fail(err, returncodes.DRIVER_UNSUPPORTED)
except OSError as err:
fail(err, returncodes.DRIVER_CRITICAL_ERROR)
except ValueError as err:
fail(err, returncodes.DRIVER_INPUT_ERROR)
_try_or_exit(lambda: limits.set_time_limit(time_limit), "Setting time limit")
_try_or_exit(lambda: limits.set_memory_limit(memory_limit), "Setting memory limit")
if time_limit is None and memory_limit is None:
return None
else:
return set_limits
def check_call(nick, cmd, stdin=None, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, stdin, time_limit, memory_limit)
kwargs = {"preexec_fn": _get_preexec_function(time_limit, memory_limit)}
sys.stdout.flush()
if stdin:
with open(stdin) as stdin_file:
return subprocess.check_call(cmd, stdin=stdin_file, **kwargs)
else:
return subprocess.check_call(cmd, **kwargs)
def get_error_output_and_returncode(nick, cmd, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, None, time_limit, memory_limit)
preexec_fn = _get_preexec_function(time_limit, memory_limit)
sys.stdout.flush()
p = subprocess.Popen(cmd, preexec_fn=preexec_fn, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr, p.returncode
| 2,720 |
Python
| 32.592592 | 91 | 0.643382 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/returncodes.py
|
from __future__ import print_function
import sys
"""
We document Fast Downward exit codes at
http://www.fast-downward.org/ExitCodes. Please update this documentation when
making changes below.
"""
SUCCESS = 0
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY = 1
SEARCH_PLAN_FOUND_AND_OUT_OF_TIME = 2
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME = 3
TRANSLATE_UNSOLVABLE = 10
SEARCH_UNSOLVABLE = 11
SEARCH_UNSOLVED_INCOMPLETE = 12
TRANSLATE_OUT_OF_MEMORY = 20
TRANSLATE_OUT_OF_TIME = 21
SEARCH_OUT_OF_MEMORY = 22
SEARCH_OUT_OF_TIME = 23
SEARCH_OUT_OF_MEMORY_AND_TIME = 24
TRANSLATE_CRITICAL_ERROR = 30
TRANSLATE_INPUT_ERROR = 31
SEARCH_CRITICAL_ERROR = 32
SEARCH_INPUT_ERROR = 33
SEARCH_UNSUPPORTED = 34
DRIVER_CRITICAL_ERROR = 35
DRIVER_INPUT_ERROR = 36
DRIVER_UNSUPPORTED = 37
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def is_unrecoverable(exitcode):
# Exit codes in the range from 30 to 39 represent unrecoverable failures.
return 30 <= exitcode < 40
def exit_with_driver_critical_error(msg):
print_stderr(msg)
sys.exit(DRIVER_CRITICAL_ERROR)
def exit_with_driver_input_error(msg):
print_stderr(msg)
sys.exit(DRIVER_INPUT_ERROR)
def exit_with_driver_unsupported_error(msg):
print_stderr(msg)
sys.exit(DRIVER_UNSUPPORTED)
def generate_portfolio_exitcode(exitcodes):
"""A portfolio's exitcode is determined as follows:
There is exactly one type of unexpected exit code -> use it.
There are multiple types of unexpected exit codes -> SEARCH_CRITICAL_ERROR.
[..., SUCCESS, ...] -> SUCCESS
[..., SEARCH_UNSOLVABLE, ...] -> SEARCH_UNSOLVABLE
[..., SEARCH_UNSOLVED_INCOMPLETE, ...] -> SEARCH_UNSOLVED_INCOMPLETE
[..., SEARCH_OUT_OF_MEMORY, ..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_MEMORY_AND_TIME
[..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_TIME
[..., SEARCH_OUT_OF_MEMORY, ...] -> SEARCH_OUT_OF_MEMORY
"""
print("Exit codes: {}".format(exitcodes))
exitcodes = set(exitcodes)
unrecoverable_codes = [code for code in exitcodes if is_unrecoverable(code)]
# There are unrecoverable exit codes.
if unrecoverable_codes:
print("Error: Unexpected exit codes: {}".format(unrecoverable_codes))
if len(unrecoverable_codes) == 1:
return (unrecoverable_codes[0], False)
else:
return (SEARCH_CRITICAL_ERROR, False)
# At least one plan was found.
if SUCCESS in exitcodes:
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME, True)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY, True)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_TIME, True)
else:
return (SUCCESS, True)
# A config proved unsolvability or did not find a plan.
for code in [SEARCH_UNSOLVABLE, SEARCH_UNSOLVED_INCOMPLETE]:
if code in exitcodes:
return (code, False)
# No plan was found due to hitting resource limits.
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_MEMORY_AND_TIME, False)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_OUT_OF_MEMORY, False)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_TIME, False)
assert False, "Error: Unhandled exit codes: {}".format(exitcodes)
| 3,500 |
Python
| 31.119266 | 94 | 0.681143 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/cleanup.py
|
from itertools import count
import os
def _try_remove(f):
try:
os.remove(f)
except OSError:
return False
return True
def cleanup_temporary_files(args):
_try_remove(args.sas_file)
_try_remove(args.plan_file)
for i in count(1):
if not _try_remove("%s.%s" % (args.plan_file, i)):
break
| 346 |
Python
| 18.277777 | 58 | 0.595376 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2.py
|
OPTIMAL = False
CONFIGS = [
# eager_greedy_ff
(330, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff
(411, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea
(213, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea
(57, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_add
(204, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cg
(208, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cg
(109, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_add
(63, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
def FINAL_CONFIG_BUILDER(successful_args):
# This assumes that CONFIGS only contains "simple" configurations.
new_args = list(successful_args)
for pos, arg in enumerate(successful_args):
if arg == "--search":
orig_search = successful_args[pos + 1]
sub_searches = []
for weight in (5, 3, 2, 1):
if orig_search.startswith("lazy"):
sub_search = \
"lazy_wastar([h],preferred=[h],w=%d,cost_type=S_COST_TYPE)" % weight
else:
sub_search = \
"eager(single(sum([g(),weight(h,%d)])),preferred=[h],cost_type=S_COST_TYPE)" % weight
sub_searches.append(sub_search)
sub_search_string = ",".join(sub_searches)
new_search = "iterated([%s],bound=BOUND,repeat_last=true)" % sub_search_string
new_args[pos + 1] = new_search
break
return new_args
| 2,502 |
Python
| 42.155172 | 109 | 0.551159 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2018.py
|
"""
This is the "Fast Downward Stone Soup 2018" sequential portfolio that participated in the IPC 2018
satisficing and bounded-cost tracks. For more information, see the planner abstract:
Jendrik Seipp and Gabriele Röger.
Fast Downward Stone Soup 2018.
In Ninth International Planning Competition (IPC 2018), Deterministic Part, pp. 80-82. 2018.
https://ai.dmi.unibas.ch/papers/seipp-roeger-ipc2018.pdf
"""
OPTIMAL = False
CONFIGS = [
(26, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true),type_based([hff,g()])],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=false,bound=BOUND)"]),
(25, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=false)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=0),preferred=[hlm],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(135, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=false,preferred_successors_first=true,bound=BOUND)"]),
(59, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(23, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=true,bound=BOUND)"]),
(57, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=false)",
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=1000),preferred=[hlm,hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(17, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(12, [
"--evaluator",
"hadd=add(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hadd),single(hadd,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hadd,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true)],boost=2000),preferred=[hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=true)",
"--evaluator",
"hcea=cea(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true),single(hcea),single(hcea,pref_only=true)],boost=0),preferred=[hlm,hcea],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(88, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(8, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=100),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hgoalcount,10)])),single(sum([g(),weight(hgoalcount,10)]),pref_only=true)],boost=2000),preferred=[hff,hgoalcount],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(24, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=false,use_orders=true)",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hblind=blind()",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hblind,2)]),pref_only=true),single(sum([g(),weight(hlm,2)])),single(sum([g(),weight(hlm,2)]),pref_only=true),single(sum([g(),weight(hff,2)])),single(sum([g(),weight(hff,2)]),pref_only=true)],boost=4419),preferred=[hlm],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(30, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy_wastar([hff],w=3,preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(58, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([single(sum([g(),weight(hblind,10)])),single(sum([g(),weight(hblind,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcea,10)])),single(sum([g(),weight(hcea,10)]),pref_only=true)],boost=536),preferred=[hff],reopen_closed=false,bound=BOUND)"]),
(27, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=one,bound=BOUND)"]),
(50, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([type_based([g()]),single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(29, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=one,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_reasonable_orders_hps(lm_rhw(only_causal_landmarks=true,disjunctive_landmarks=true,use_orders=true))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,pref=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hlm,3)])),single(sum([weight(g(),2),weight(hlm,3)]),pref_only=true),single(sum([weight(g(),2),weight(hadd,3)])),single(sum([weight(g(),2),weight(hadd,3)]),pref_only=true)],boost=2474),preferred=[hadd],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hhmax=hmax()",
"--search",
"eager(alt([tiebreaking([sum([g(),weight(hblind,7)]),hblind]),tiebreaking([sum([g(),weight(hhmax,7)]),hhmax]),tiebreaking([sum([g(),weight(hadd,7)]),hadd]),tiebreaking([sum([g(),weight(hcg,7)]),hcg])],boost=2142),preferred=[],reopen_closed=true,bound=BOUND)"]),
(28, [
"--evaluator",
"hadd=add(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff]),tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff],pref_only=true),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd]),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd],pref_only=true)],boost=2537),preferred=[hff,hadd],reopen_closed=true,bound=BOUND)"]),
(53, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=5000),preferred=[hlm],reopen_closed=false,bound=BOUND)"]),
(29, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true)],boost=5000),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(27, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hff,2)]))],boost=4480),preferred=[],reopen_closed=true,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=1000),preferred=[hlm,hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=true,use_orders=true,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([g(),weight(hlm,10)]),hlm]),tiebreaking([sum([g(),weight(hlm,10)]),hlm],pref_only=true),tiebreaking([sum([g(),weight(hff,10)]),hff]),tiebreaking([sum([g(),weight(hff,10)]),hff],pref_only=true)],boost=200),preferred=[hlm],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(87, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(30, [
"--landmarks",
"lmg=lm_exhaust(only_causal_landmarks=false)",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hhmax=hmax()",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false,transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,3)])),single(sum([g(),weight(hblind,3)]),pref_only=true),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true),single(sum([g(),weight(hhmax,3)])),single(sum([g(),weight(hhmax,3)]),pref_only=true)],boost=3052),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(56, [
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([tiebreaking([sum([g(),hff]),hff]),tiebreaking([sum([g(),hff]),hff],pref_only=true)],boost=432),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(19, [
"--landmarks",
"lmg=lm_merged([lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=false,use_orders=true),lm_hm(m=1,conjunctive_landmarks=true,use_orders=true)])",
"--evaluator",
"hff=ff()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true)],boost=500),preferred=[hff],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(56, [
"--landmarks",
"lmg=lm_exhaust(only_causal_landmarks=false)",
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=false)",
"--evaluator",
"hff=ff()",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([tiebreaking([sum([weight(g(),8),weight(hblind,9)]),hblind]),tiebreaking([sum([weight(g(),8),weight(hlm,9)]),hlm]),tiebreaking([sum([weight(g(),8),weight(hff,9)]),hff]),tiebreaking([sum([weight(g(),8),weight(hgoalcount,9)]),hgoalcount])],boost=2005),preferred=[],reopen_closed=true,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_zg(use_orders=false)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false)",
"--search",
"eager(single(sum([g(),weight(hlm,3)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(81, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=true,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"eager(single(sum([g(),weight(hlm,5)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
]
| 19,090 |
Python
| 55.315634 | 525 | 0.598009 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_opt_fdss_2.py
|
OPTIMAL = True
CONFIGS = [
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
(1, ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
(1, ["--search",
"astar(lmcut())"]),
(1, ["--search",
"astar(blind())"]),
]
| 948 |
Python
| 40.260868 | 116 | 0.60654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2014.py
|
OPTIMAL = False
CONFIGS = [
# add_lm_lazy_greedy
(114, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_greedy
(187, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_eager_greedy
(33, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_greedy
(35, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_greedy
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_eager_greedy
(120, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_eager_greedy
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hcg,hff],preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_lazy_greedy
(17, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_cg_lazy_greedy
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hcg],preferred=[hadd,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_lazy_wastar
(79, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hadd,hlm],w=3,preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_wastar
(159, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hff,hlm],w=3,preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_greedy
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hcg,hlm],preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcea,hff],w=3,preferred=[hcea,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_eager_wastar
(37, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(), weight(hcea, 3)])),single(sum([g(),weight(hcea,3)]),pref_only=true),single(sum([g(), weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_lazy_wastar
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcg,hff],w=3,preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_eager_wastar
(77, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hadd, 3)])),single(sum([g(), weight(hadd,3)]),pref_only=true)]),preferred=[hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hcea=cea(transform=H_COST_TRANSFORM)", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hcea,3)])),single(sum([g(),weight(hcea,3)]),pref_only=true)]),preferred=[hff,hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_wastar
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_eager_greedy
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_lazy_wastar
(39, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcg,hlm],w=3,preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lazy_wastar
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hcea], w=3, preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_eager_wastar
(72, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hff, 3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_eager_wastar
(38, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hcg, 3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lazy_wastar
(38, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hff], w=3, preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lazy_greedy
(116, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hcg],preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
# ff_lm_eager_wastar
FINAL_CONFIG = [
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))",
"--search",
"iterated([eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 8,552 |
Python
| 64.792307 | 277 | 0.615996 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_opt_fdss_1.py
|
OPTIMAL = True
CONFIGS = [
(175, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(432, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
(455, ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
(569, ["--search",
"astar(lmcut())"]),
]
| 930 |
Python
| 43.333331 | 118 | 0.608602 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_1.py
|
OPTIMAL = False
CONFIGS = [
# alt_lazy_ff_cg
(49, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff_1
(171, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_lazy_cea_cg
(27, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hcea,hcg],preferred=[hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_ff_1
(340, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cg
(76, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_ff_1
(88, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_add
(90, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hadd],preferred=[hff,hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea_1
(56, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cea_cg
(73, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hff,hcea,hcg],preferred=[hff,hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_add_1
(50, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea_1
(84, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_add_1
(166, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_ff_1
(87, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_cg_1
(73, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_cg_1
(89, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
FINAL_CONFIG = [
"--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"iterated([eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 3,544 |
Python
| 48.23611 | 133 | 0.582957 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_opt_merge_and_shrink.py
|
OPTIMAL = True
CONFIGS = [
(800, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1000, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
]
| 706 |
Python
| 46.13333 | 118 | 0.655807 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/utils.py
|
from __future__ import print_function
import math
import os
import pickle
import shutil
import sys
import time
import random
import cProfile
import pstats
import io
from collections import defaultdict, deque, Counter, namedtuple
from itertools import count
from heapq import heappush, heappop
import numpy as np
INF = float('inf')
SEPARATOR = '\n' + 80*'-' + '\n'
try:
user_input = raw_input
except NameError:
user_input = input
inf_generator = count
##################################################
def int_ceil(f):
return int(math.ceil(f))
def get_python_version():
return sys.version_info[0]
def read(filename):
with open(filename, 'r') as f:
return f.read()
def write(filename, string):
with open(filename, 'w') as f:
f.write(string)
def write_pickle(filename, data):
# Cannot pickle lambda or nested functions
with open(filename, 'wb') as f:
pickle.dump(data, f)
def read_pickle(filename):
# Can sometimes read pickle3 from python2 by calling twice
with open(filename, 'rb') as f:
try:
return pickle.load(f)
except UnicodeDecodeError as e:
return pickle.load(f, encoding='latin1')
def safe_remove(p):
if os.path.exists(p):
os.remove(p)
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def safe_rm_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
def clear_dir(d):
safe_rm_dir(d)
ensure_dir(d)
def get_file_path(file, rel_path):
directory = os.path.dirname(os.path.abspath(file))
return os.path.join(directory, rel_path)
def open_pdf(filename):
import subprocess
# import os
# import webbrowser
subprocess.Popen('open {}'.format(filename), shell=True)
# os.system(filename)
# webbrowser.open(filename)
user_input('Display?')
# safe_remove(filename)
# TODO: close output
##################################################
def elapsed_time(start_time):
return time.time() - start_time
def safe_zip(sequence1, sequence2):
assert len(sequence1) == len(sequence2)
return zip(sequence1, sequence2)
def get_mapping(sequence1, sequence2):
return dict(safe_zip(sequence1, sequence2))
def apply_mapping(sequence, mapping):
return tuple(mapping.get(e, e) for e in sequence)
def safe_apply_mapping(sequence, mapping):
# TODO: flip arguments order
return tuple(mapping[e] for e in sequence)
def negate_test(test):
return lambda *args, **kwargs: not test(*args, **kwargs)
def flatten(iterable_of_iterables):
return (item for iterables in iterable_of_iterables for item in iterables)
def find(test, sequence):
for item in sequence:
if test(item):
return item
return None
def find_unique(test, sequence):
found, value = False, None
for item in sequence:
if test(item):
if found:
raise RuntimeError('Both elements {} and {} satisfy the test'.format(value, item))
found, value = True, item
if not found:
raise RuntimeError('Unable to find an element satisfying the test')
return value
def implies(a, b):
return not a or b
def irange(start, end=None, step=1):
# TODO: combine with my other infinite generator
if end is None:
end = start
start = 0
n = start
while n < end:
yield n
n += step
def argmin(fn, iterable):
return min(iterable, key=fn)
def argmax(fn, iterable):
return max(iterable, key=fn)
def invert_dict(d):
return {v: k for k, v in d.items()}
def randomize(iterable):
sequence = list(iterable)
random.shuffle(sequence)
return sequence
##################################################
BYTES_PER_KILOBYTE = math.pow(2, 10)
BYTES_PER_GIGABYTE = math.pow(2, 30)
KILOBYTES_PER_GIGABYTE = BYTES_PER_GIGABYTE / BYTES_PER_KILOBYTE
def get_peak_memory_in_kb():
# TODO: use psutil instead
import psutil
# https://pypi.org/project/psutil/
# https://psutil.readthedocs.io/en/latest/
#rss: aka "Resident Set Size", this is the non-swapped physical memory a process has used. (bytes)
#vms: aka "Virtual Memory Size", this is the total amount of virtual memory used by the process. (bytes)
#shared: (Linux) memory that could be potentially shared with other processes.
#text (Linux, BSD): aka TRS (text resident set) the amount of memory devoted to executable code.
#data (Linux, BSD): aka DRS (data resident set) the amount of physical memory devoted to other than executable code.
#lib (Linux): the memory used by shared libraries.
#dirty (Linux): the number of dirty pages.
#pfaults (macOS): number of page faults.
#pageins (macOS): number of actual pageins.
process = psutil.Process(os.getpid())
#process.pid()
#process.ppid()
pmem = process.memory_info() # this seems to actually get the current memory!
memory_in_kb = pmem.vms / BYTES_PER_KILOBYTE
return memory_in_kb
#print(process.memory_full_info())
#print(process.memory_percent())
# process.rlimit(psutil.RLIMIT_NOFILE) # set resource limits (Linux only)
#print(psutil.virtual_memory())
#print(psutil.swap_memory())
#print(psutil.pids())
#try:
# # This will only work on Linux systems.
# with open("/proc/self/status") as status_file:
# for line in status_file:
# parts = line.split()
# if parts[0] == "VmPeak:":
# return float(parts[1])
#except IOError:
# pass
#return 0.
def check_memory(max_memory):
if max_memory == INF:
return True
peak_memory = get_peak_memory_in_kb()
#print('Peak memory: {} | Max memory: {}'.format(peak_memory, max_memory))
if peak_memory <= max_memory:
return True
print('Peak memory of {} KB exceeds memory limit of {} KB'.format(
int(peak_memory), int(max_memory)))
return False
##################################################
class Saver(object):
# TODO: contextlib
def save(self):
raise NotImplementedError()
def restore(self):
raise NotImplementedError()
def __enter__(self):
# TODO: move the saving to enter?
self.save()
return self
def __exit__(self, type, value, traceback):
self.restore()
class Profiler(Saver):
fields = ['tottime', 'cumtime']
def __init__(self, field='tottime', num=10):
assert field in self.fields
self.field = field
self.num = num
self.pr = cProfile.Profile()
def save(self):
self.pr.enable()
return self.pr
def restore(self):
self.pr.disable()
if self.num is None:
return None
stream = None
#stream = io.StringIO()
stats = pstats.Stats(self.pr, stream=stream).sort_stats(self.field) # TODO: print multiple
stats.print_stats(self.num)
return stats
class Verbose(Saver): # TODO: use DisableOutput
def __init__(self, verbose=False):
self.verbose = verbose
def save(self):
if self.verbose:
return
self.stdout = sys.stdout
self.devnull = open(os.devnull, 'w')
sys.stdout = self.devnull
#self.stderr = sys.stderr
#self.devnull = open(os.devnull, 'w')
#sys.stderr = self.stderr
def restore(self):
if self.verbose:
return
sys.stdout = self.stdout
self.devnull.close()
#sys.stderr = self.stderr
#self.devnull.close()
class TmpCWD(Saver):
def __init__(self, temp_cwd):
self.tmp_cwd = temp_cwd
def save(self):
self.old_cwd = os.getcwd()
os.chdir(self.tmp_cwd)
def restore(self):
os.chdir(self.old_cwd)
##################################################
class Comparable(object):
def __lt__(self, other):
raise NotImplementedError()
def __eq__(self, other):
return not (self < other) and not (other < self)
def __ne__(self, other):
return (self < other) or (other < self)
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not other < self
class MockSet(object):
def __init__(self, test=lambda item: True):
self.test = test
def __contains__(self, item):
return self.test(item)
class Score(Comparable): # tuple
def __init__(self, *args):
# TODO: convert to float
#super(Score, self).__init__(args)
self.values = tuple(args)
def check_other(self, other):
return isinstance(other, Score) and (len(self.values) == len(other.values))
def __lt__(self, other):
assert self.check_other(other)
return self.values < other.values
def __iter__(self):
return iter(self.values)
def __neg__(self):
return self.__class__(*(type(value).__neg__(value) for value in self.values))
def __add__(self, other):
return self.__class__(*(self.values + other.values))
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, self.values)
class HeapElement(Comparable):
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
return self.key < other.key
def __iter__(self):
return iter([self.key, self.value])
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.key, self.value)
##################################################
def sorted_str_from_list(obj, **kwargs):
return '[{}]'.format(', '.join(sorted(str_from_object(item, **kwargs) for item in obj)))
def str_from_object(obj, ndigits=None): # str_object
if type(obj) in [list]: #, np.ndarray):
return '[{}]'.format(', '.join(str_from_object(item, ndigits) for item in obj))
if type(obj) == tuple:
return '({})'.format(', '.join(str_from_object(item, ndigits) for item in obj))
#if isinstance(obj, dict):
if type(obj) in [dict, defaultdict, Counter]:
return '{{{}}}'.format(', '.join('{}: {}'.format(str_from_object(key, ndigits), str_from_object(obj[key], ndigits)) \
for key in sorted(obj.keys(), key=lambda k: str_from_object(k, ndigits))))
if type(obj) in [set, frozenset]:
return '{{{}}}'.format(', '.join(sorted(str_from_object(item, ndigits) for item in obj)))
if (ndigits is not None) and (type(obj) in [float, np.float64]):
obj = round(obj, ndigits=ndigits)
if obj == 0.:
obj = 0. # NOTE - catches -0.0 bug
return '{0:.{1}f}'.format(obj, ndigits)
#if isinstance(obj, types.FunctionType):
# return obj.__name__
return str(obj)
#return repr(obj)
##################################################
def incoming_from_edges(edges):
incoming_vertices = defaultdict(set)
for v1, v2 in edges:
incoming_vertices[v2].add(v1)
return incoming_vertices
def outgoing_from_edges(edges):
outgoing_vertices = defaultdict(set)
for v1, v2 in edges:
outgoing_vertices[v1].add(v2)
return outgoing_vertices
def neighbors_from_orders(orders):
return incoming_from_edges(orders), \
outgoing_from_edges(orders)
def adjacent_from_edges(edges):
undirected_edges = defaultdict(set)
for v1, v2 in edges:
undirected_edges[v1].add(v2)
undirected_edges[v2].add(v1)
return undirected_edges
##################################################
def filter_orders(vertices, orders):
# TODO: rename to filter edges?
return [order for order in orders if all(v in vertices for v in order)]
def is_valid_topological_sort(vertices, orders, solution):
orders = filter_orders(vertices, orders)
if Counter(vertices) != Counter(solution):
return False
index_from_vertex = {v: i for i, v in enumerate(solution)}
for v1, v2 in orders:
if index_from_vertex[v1] >= index_from_vertex[v2]:
return False
return True
def dfs_topological_sort(vertices, orders, priority_fn=lambda v: 0):
# TODO: DFS for all topological sorts
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
def dfs(history, visited):
reverse_ordering = []
v1 = history[-1]
if v1 in visited:
return reverse_ordering
visited.add(v1)
for v2 in sorted(outgoing_edges[v1], key=priority_fn, reverse=True):
if v2 in history:
return None # Contains a cycle
result = dfs(history + [v2], visited)
if result is None:
return None
reverse_ordering.extend(result)
reverse_ordering.append(v1)
return reverse_ordering
visited = set()
reverse_order = []
for v0 in sorted(vertices, key=priority_fn, reverse=True):
if not incoming_edges[v0]:
result = dfs([v0], visited)
if result is None:
return None
reverse_order.extend(result)
ordering = reverse_order[::-1]
assert(is_valid_topological_sort(vertices, orders, ordering))
return ordering
def topological_sort(vertices, orders, priority_fn=lambda v: 0):
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
ordering = []
queue = []
for v in vertices:
if not incoming_edges[v]:
heappush(queue, HeapElement(priority_fn(v), v))
while queue:
priority, v1 = heappop(queue) # Lowest to highest
ordering.append(v1)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1)
if not incoming_edges[v2]:
heappush(queue, HeapElement(priority_fn(v2), v2))
if len(ordering) != len(vertices):
return None
assert is_valid_topological_sort(vertices, orders, ordering)
return ordering
def layer_sort(vertices, orders): # priority_fn=lambda v: 0
# TODO: more efficient hypergraph/layer distance (h_max)
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
visited = {}
queue = []
for v in vertices:
if not incoming_edges[v]:
visited[v] = 0
heappush(queue, HeapElement(visited[v], v))
while queue:
g, v1 = heappop(queue)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1) # TODO: non-uniform cost function for max
if not incoming_edges[v2] and (v2 not in visited):
visited[v2] = g + 1
heappush(queue, HeapElement(visited[v2], v2))
return visited
def is_acyclic(vertices, orders):
return topological_sort(vertices, orders) is not None
def sample_topological_sort(vertices, orders):
# https://stackoverflow.com/questions/38551057/random-topological-sorting-with-uniform-distribution-in-near-linear-time
# https://www.geeksforgeeks.org/all-topological-sorts-of-a-directed-acyclic-graph/
priorities = {v: random.random() for v in vertices}
return topological_sort(vertices, orders, priority_fn=priorities.get)
def transitive_closure(vertices, orders):
# Warshall's algorithm
orders = filter_orders(vertices, orders)
closure = set(orders)
for k in vertices:
for i in vertices:
for j in vertices:
if ((i, j) not in closure) and ((i, k) in closure) and ((k, j) in closure):
closure.add((i, j))
return closure
##################################################
def grow_component(sources, edges, disabled=set()):
processed = set(disabled)
cluster = []
queue = deque()
def add_cluster(v):
if v in processed:
return
processed.add(v)
cluster.append(v)
queue.append(v)
for v0 in sources:
add_cluster(v0)
while queue:
# TODO: add clusters here to ensure proper BFS
v1 = queue.popleft()
for v2 in edges[v1]:
add_cluster(v2)
return cluster
def breadth_first_search(source, edges, **kwargs):
return grow_component([source], edges, **kwargs)
def get_ancestors(source, edges):
return set(breadth_first_search(source, incoming_from_edges(edges))) - {source}
def get_descendants(source, edges):
return set(breadth_first_search(source, outgoing_from_edges(edges))) - {source}
def get_connected_components(vertices, edges):
edges = filter_orders(vertices, edges)
undirected_edges = adjacent_from_edges(edges)
clusters = []
processed = set()
for v0 in vertices:
if v0 in processed:
continue
cluster = grow_component({v0}, undirected_edges, processed)
processed.update(cluster)
if cluster:
clusters.append([v for v in vertices if v in cluster])
return clusters
##################################################
SearchNode = namedtuple('Node', ['g', 'parent'])
def dijkstra(sources, edges, op=sum): # sum | max
if not isinstance(edges, dict):
edges = {edge: 1 for edge in edges}
_, outgoing_edges = neighbors_from_orders(edges)
visited = {}
queue = []
for v0 in sources:
visited[v0] = SearchNode(g=0, parent=None)
queue.append(HeapElement(visited[v0].g, v0))
while queue:
current_g, current_v = heappop(queue)
if visited[current_v].g < current_g:
continue
for next_v in outgoing_edges[current_v]:
next_g = op([current_g, edges[(current_v, next_v)]])
if (next_v not in visited) or (next_g < visited[next_v].g):
visited[next_v] = SearchNode(next_g, current_v)
heappush(queue, HeapElement(next_g, next_v))
return visited
##################################################
def is_hashable(value):
#return isinstance(value, Hashable) # TODO: issue with hashable and numpy 2.7.6
try:
hash(value)
except TypeError:
return False
return True
# def hash_or_id(value):
# if is_hashable(value):
# return hash(value)
# return id(value)
def value_or_id(value):
if is_hashable(value):
return value
return id(value)
def is_64bits():
#return sys.maxsize > 2**32
import platform
bit, _ = platform.architecture()
return bit == '64bit'
def inclusive_range(start, stop, step=1):
sequence = list(np.arange(start, stop, step))
if sequence and (sequence[-1] == stop):
sequence.append(stop)
return sequence
def read_pddl(this_file, pddl_filename):
directory = os.path.dirname(os.path.abspath(this_file))
return read(os.path.join(directory, pddl_filename))
def lowercase(*strings):
return [string.lower() for string in strings]
def str_eq(s1, s2, ignore_case=True):
if ignore_case:
s1 = s1.lower()
s2 = s2.lower()
return s1 == s2
def clip(value, lower=-INF, upper=+INF):
return min(max(lower, value), upper)
| 19,236 |
Python
| 28.686728 | 125 | 0.60236 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/optimizer.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args, get_parameter_name, is_parameter, Minimize
from hsr_tamp.pddlstream.language.conversion import substitute_expression, list_from_conjunction
from hsr_tamp.pddlstream.language.external import parse_lisp_list, get_procedure_fn
from hsr_tamp.pddlstream.language.function import PredicateResult, FunctionResult
from hsr_tamp.pddlstream.language.object import Object, SharedOptValue
from hsr_tamp.pddlstream.language.stream import StreamInfo, Stream, StreamInstance, StreamResult, \
PartialInputs, NEGATIVE_SUFFIX, WildOutput
from hsr_tamp.pddlstream.language.generator import get_next
from hsr_tamp.pddlstream.utils import INF, get_mapping, safe_zip, str_from_object
from hsr_tamp.pddlstream.algorithms.reorder import get_stream_plan_components, get_partial_orders
DEFAULT_SIMULTANEOUS = False
DEFAULT_UNIQUE = True # TODO: would it ever even make sense to do shared here?
# TODO: revert to my previous specification where streams can simply be fused
VARIABLES = ':variables'
CONSTRAINT = ':constraint'
UNSATISFIABLE = 'unsatisfiable{}'.format(NEGATIVE_SUFFIX)
##################################################
class OptimizerOutput(object):
def __init__(self, assignments=[], facts=[], infeasible=[]): # infeasible=None
self.assignments = list(assignments)
self.facts = list(facts)
self.infeasible = list(map(frozenset, infeasible))
def to_wild(self):
return WildOutput(self.assignments, self.facts)
def __bool__(self):
return bool(self.assignments)
__nonzero__ = __bool__
def __repr__(self):
#return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
return str_from_object(self.__dict__)
class Optimizer(object):
def __init__(self, name, procedure, info):
self.name = name
self.procedure = procedure
self.info = info
self.variables = []
self.constraints = []
self.objectives = []
self.streams = []
def get_streams(self):
return self.variables + self.constraints
def __repr__(self):
return '{}'.format(self.name) #, self.streams)
class ComponentStream(Stream):
def __init__(self, optimizer, *args):
self.optimizer = optimizer
super(ComponentStream, self).__init__(*args)
##################################################
def get_list_gen_fn(procedure, inputs, outputs, certified, hint={}):
# TODO: prevent outputs of the sampler from being used as inputs (only consider initial values)
def list_gen_fn(*input_values):
mapping = get_mapping(inputs, input_values)
targets = substitute_expression(certified, mapping)
return procedure(outputs, targets, hint=hint)
return list_gen_fn
def get_effort_fn(optimizer_name):
# TODO: higher effort is the variable cannot be free for the testing process
# This might happen if the variable is certified to have a property after construction
def effort_fn(*input_values):
# parameter_indices = [i for i, value in enumerate(input_values) if is_parameter(value)]
# optimizer_indices = [i for i, value in enumerate(input_values) if isinstance(value, SharedOptValue)
# if input_values[i].stream.startswith(optimizer_name)]
#if not parameter_indices and not optimizer_indices:
# return INF
return 1
return effort_fn
def prune_dominated(collections):
for i, collection1 in enumerate(collections):
if all((i == j) or not (collection2 <= collection1)
for j, collection2 in enumerate(collections)):
yield collection1
##################################################
class OptimizerInfo(StreamInfo):
def __init__(self, planable=False, p_success=None, overhead=None):
super(OptimizerInfo, self).__init__(p_success=p_success, overhead=overhead)
self.planable = planable # TODO: this isn't currently used
# TODO: post-processing
class VariableStream(ComponentStream):
# TODO: allow generation of two variables
def __init__(self, optimizer, variables, inputs, domain, certified, infos):
name = '{}-{}'.format(optimizer.name, '-'.join(map(get_parameter_name, variables)))
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, variables, certified)
# TODO: need to convert OptimizerOutput
#gen_fn = empty_gen()
#info = StreamInfo(effort=get_effort_fn(optimizer_name, inputs, outputs))
#info = StreamInfo(opt_gen_fn=PartialInputs(unique=DEFAULT_UNIQUE, num=DEFAULT_NUM))
info = infos.get(name, None)
if info is None:
info = StreamInfo(opt_gen_fn=PartialInputs(unique=DEFAULT_UNIQUE),
simultaneous=DEFAULT_SIMULTANEOUS)
super(VariableStream, self).__init__(optimizer, name, gen_fn, inputs, domain,
variables, certified, info)
class ConstraintStream(ComponentStream):
def __init__(self, optimizer, constraint, domain, infos):
# TODO: could support fluents and compile them into conditional effects
inputs = get_args(constraint)
outputs = []
certified = [constraint]
name = '{}-{}'.format(optimizer.name, get_prefix(constraint))
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, outputs, certified)
#gen_fn = empty_gen()
info = infos.get(name, None)
if info is None:
info = StreamInfo(effort=get_effort_fn(optimizer.name),
simultaneous=DEFAULT_SIMULTANEOUS)
super(ConstraintStream, self).__init__(optimizer, name, gen_fn, inputs, domain,
outputs, certified, info)
##################################################
def parse_variable(optimizer, lisp_list, infos):
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {VARIABLES, ':inputs', ':domain', ':graph'}
return VariableStream(optimizer,
value_from_attribute[VARIABLES], # TODO: assume unique?
value_from_attribute.get(':inputs', []),
list_from_conjunction(value_from_attribute.get(':domain')),
list_from_conjunction(value_from_attribute.get(':graph')),
infos)
def parse_constraint(optimizer, lisp_list, infos):
# TODO: constraints with the same name
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {CONSTRAINT, ':necessary'} # , ':fluents'}
return ConstraintStream(optimizer,
value_from_attribute[CONSTRAINT],
list_from_conjunction(value_from_attribute[':necessary']),
infos)
# TODO: convert optimizer into a set of streams? Already present within test stream
def parse_optimizer(lisp_list, procedures, infos):
_, optimizer_name = lisp_list[:2]
procedure = get_procedure_fn(procedures, optimizer_name)
optimizer_info = infos.get(optimizer_name, OptimizerInfo())
optimizer = Optimizer(optimizer_name, procedure, optimizer_info)
for sub_list in lisp_list[2:]:
form = sub_list[0]
if form == VARIABLES:
optimizer.variables.append(parse_variable(optimizer, sub_list, infos))
elif form == CONSTRAINT:
optimizer.constraints.append(parse_constraint(optimizer, sub_list, infos))
elif form == ':objective':
optimizer.objectives.append(sub_list[1])
else:
raise ValueError(form)
return optimizer.get_streams()
##################################################
class OptimizerResult(StreamResult):
def get_components(self):
return self.external.stream_plan
def get_objectives(self):
return substitute_expression(self.external.objectives, self.mapping)
def get_unsatisfiable(self):
return self.instance.get_unsatisfiable()
class OptimizerInstance(StreamInstance):
_Result = OptimizerResult
def __init__(self, stream, input_objects, fluent_facts):
super(OptimizerInstance, self).__init__(stream, input_objects, fluent_facts)
all_constraints = frozenset(range(len(self.external.certified)))
print(all_constraints)
self.infeasible = {all_constraints}
# TODO: might need to block separate clusters at once in order to ensure that it captures the true behavior
# TODO: connected components on facts
# TODO: cluster connected components in the infeasible set
# TODO: compute things dependent on a stream and treat like an optimizer
# Also make an option to just treat everything like an optimizer
def _next_wild(self):
output, self.enumerated = get_next(self._generator, default=[])
if not isinstance(output, OptimizerOutput):
output = OptimizerOutput(assignments=output)
self.infeasible.update(output.infeasible)
# TODO: instead replace each time
return output.to_wild()
def get_unsatisfiable(self):
constraints = substitute_expression(self.external.certified, self.external.mapping)
index_from_constraint = {c: i for i, c in enumerate(constraints)}
# TODO: compute connected components
result_from_index = defaultdict(set)
for result in self.external.stream_plan:
for fact in result.get_certified():
if fact in index_from_constraint:
result_from_index[index_from_constraint[fact]].add(result)
# TODO: add implied results
#orders = get_partial_orders(self.external.stream_plan)
return [{result for index in cluster for result in result_from_index[index]}
for cluster in prune_dominated(self.infeasible)]
class OptimizerStream(Stream):
_Instance = OptimizerInstance
def __init__(self, optimizer, external_plan):
optimizer.streams.append(self)
self.optimizer = optimizer
self.stream_plan, self.function_plan = partition_external_plan(external_plan)
inputs, domain, outputs, certified, functions, self.macro_from_micro, \
self.input_objects, self.output_objects, self.fluent_facts = get_cluster_values(external_plan)
hint = self.create_hint()
self.objectives = certified + functions
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, outputs, self.objectives, hint=hint)
#assert len(self.get_cluster_plans()) == 1
super(OptimizerStream, self).__init__(optimizer.name, gen_fn, inputs, domain, outputs,
certified, optimizer.info)
def create_hint(self):
hint = {}
for result, mapping in safe_zip(self.stream_plan, self.macro_from_micro):
if isinstance(result, StreamResult):
for param, obj in safe_zip(result.external.outputs, result.output_objects):
if isinstance(obj, Object):
hint[mapping[param]] = obj.value
return hint
@property
def mapping(self):
return get_mapping(self.inputs + self.outputs,
self.input_objects + self.output_objects)
def get_cluster_plans(self):
# TODO: split the optimizer into clusters when provably independent
return get_stream_plan_components(self.stream_plan + self.function_plan)
@property
def instance(self):
return self.get_instance(self.input_objects, fluent_facts=self.fluent_facts)
##################################################
def add_result_inputs(result, param_from_obj, local_mapping, inputs, input_objects):
for param, obj in zip(result.instance.external.inputs, result.instance.input_objects):
# TODO: only do optimistic parameters?
if obj not in param_from_obj:
param_from_obj[obj] = '?i{}'.format(len(inputs)) # '?_i{}'
inputs.append(param_from_obj[obj])
input_objects.append(obj)
local_mapping[param] = param_from_obj[obj]
def add_result_outputs(result, param_from_obj, local_mapping, outputs, output_objects):
for param, obj in zip(result.instance.external.outputs, result.output_objects):
if obj not in param_from_obj:
param_from_obj[obj] = '?o{}'.format(len(outputs))
outputs.append(param_from_obj[obj])
output_objects.append(obj)
local_mapping[param] = param_from_obj[obj]
def get_cluster_values(stream_plan):
param_from_obj = {}
macro_from_micro = []
inputs, domain, outputs, certified, functions = [], set(), [], set(), set()
input_objects, output_objects = [], []
fluent_facts = []
for result in stream_plan:
local_mapping = {} # global_from_local
stream = result.instance.external
add_result_inputs(result, param_from_obj, local_mapping, inputs, input_objects)
domain.update(set(substitute_expression(stream.domain, local_mapping)) - certified)
if isinstance(result, PredicateResult):
# functions.append(Equal(stream.head, result.value))
# TODO: do I need the new mapping here?
mapping = {inp: param_from_obj[inp] for inp in result.instance.input_objects}
functions.update(substitute_expression(result.get_certified(), mapping))
elif isinstance(result, FunctionResult):
functions.add(substitute_expression(Minimize(stream.head), local_mapping))
else:
fluent_facts.extend(result.instance.fluent_facts)
add_result_outputs(result, param_from_obj, local_mapping, outputs, output_objects)
certified.update(substitute_expression(stream.certified, local_mapping))
macro_from_micro.append(local_mapping) # TODO: append for functions as well?
#assert not fluent_facts
return inputs, sorted(domain), outputs, sorted(certified), sorted(functions), \
macro_from_micro, input_objects, output_objects, fluent_facts
| 14,249 |
Python
| 48.307958 | 115 | 0.645168 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/rule.py
|
from collections import deque
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.stream import Stream, StreamInfo
from hsr_tamp.pddlstream.language.external import parse_lisp_list
from hsr_tamp.pddlstream.language.generator import from_test, universe_test
from hsr_tamp.pddlstream.language.conversion import list_from_conjunction, substitute_expression
# TODO: could signal a rule by making its gen_fn just the constant True
# TODO: could apply the rule in the initial state once but then couldn't support unexpected facts
# TODO: prune unnecessary preconditions using rules
from hsr_tamp.pddlstream.utils import get_mapping
RULES = [] # TODO: no global
def parse_rule(lisp_list, stream_map, stream_info):
value_from_attribute = parse_lisp_list(lisp_list[1:])
assert set(value_from_attribute) <= {':inputs', ':domain', ':certified'}
# TODO: if len(certified) == 1, augment existing streams
RULES.append(Stream(name='rule{}'.format(len(RULES)),
gen_fn=from_test(universe_test),
inputs=value_from_attribute.get(':inputs', []),
domain=list_from_conjunction(value_from_attribute.get(':domain', [])),
fluents=[],
outputs=[],
certified=list_from_conjunction(value_from_attribute.get(':certified', [])),
info=StreamInfo(eager=True, p_success=1, overhead=0, verbose=False)))
return RULES[-1]
# TODO: could make p_success=0 to prevent use in search
##################################################
def apply_rules_to_streams(rules, streams):
# TODO: can actually this with multiple condition if stream certified contains all
# TODO: do also when no domain conditions
processed_rules = deque(rules)
while processed_rules:
rule = processed_rules.popleft()
if len(rule.domain) != 1:
continue
[rule_fact] = rule.domain
rule.info.p_success = 0 # Need not be applied
for stream in streams:
if not isinstance(stream, Stream):
continue
for certified_fact in stream.certified:
if get_prefix(rule_fact) == get_prefix(certified_fact):
mapping = get_mapping(get_args(rule_fact), get_args(certified_fact))
new_facts = set(substitute_expression(rule.certified, mapping)) - set(stream.certified)
stream.certified = stream.certified + tuple(new_facts)
if new_facts and (stream in rules):
processed_rules.append(stream)
| 2,680 |
Python
| 50.557691 | 107 | 0.633582 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/fluent.py
|
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.exogenous import replace_literals
from hsr_tamp.pddlstream.language.external import get_domain_predicates
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import find_unique, get_mapping, safe_apply_mapping
def get_predicate_map(state_streams):
predicate_map = {}
for state_stream in state_streams:
for fact in state_stream.certified:
predicate = get_prefix(fact)
if predicate in predicate_map:
# TODO: could make a disjunctive condition instead
raise NotImplementedError('Only one fluent stream can certify a predicate: {}'.format(predicate))
predicate_map[predicate] = state_stream
return predicate_map
def remap_certified(literal, stream):
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
if not all(arg in mapping for arg in stream.inputs): # Certified must contain all inputs
return None
return mapping
def compile_fluent_streams(domain, externals):
state_streams = set(filter(lambda e: isinstance(e, Stream) and e.is_special, externals))
predicate_map = get_predicate_map(state_streams)
if not predicate_map:
return state_streams
# TODO: allow usage as long as in the same action (e.g. for costs functions)
# TODO: could create a separate action per control parameter
if get_domain_predicates(externals) & set(predicate_map):
raise RuntimeError('Fluent streams certified facts cannot be domain facts')
# TODO: could make free parameters free
# TODO: could treat like a normal stream that generates values (but with no inputs required/needed)
import pddl
def fn(literal, action):
if literal.predicate not in predicate_map:
return literal
# TODO: other checks on only inputs
stream = predicate_map[literal.predicate]
mapping = remap_certified(literal, stream)
if mapping is None:
# TODO: this excludes typing. This is not entirely safe
return literal
output_args = set(mapping[arg] for arg in stream.outputs)
if isinstance(action, pddl.Action): # TODO: unified Action/Axiom effects
for effect in action.effects:
if isinstance(effect, pddl.Effect) and (output_args & set(effect.literal.args)):
raise RuntimeError('Fluent stream outputs cannot be in action effects: {}'.format(
effect.literal.predicate))
elif not stream.is_negated:
axiom = action
raise RuntimeError('Fluent stream outputs cannot be in an axiom: {}'.format(axiom.name))
blocked_args = safe_apply_mapping(stream.inputs, mapping)
blocked_literal = literal.__class__(stream.blocked_predicate, blocked_args).negate()
if stream.is_negated:
conditions = [blocked_literal]
conditions.extend(pddl.Atom(get_prefix(fact), safe_apply_mapping(get_args(fact), mapping)) # fd_from_fact
for fact in stream.domain) # TODO: be careful when using imply
return pddl.Conjunction(conditions) # TODO: prune redundant conditions
return pddl.Conjunction([literal, blocked_literal])
for action in domain.actions:
action.precondition = replace_literals(fn, action.precondition, action).simplified()
for effect in action.effects:
effect.condition = replace_literals(fn, effect.condition, action).simplified()
for axiom in domain.axioms:
axiom.condition = replace_literals(fn, axiom.condition, axiom).simplified()
return state_streams
| 3,832 |
Python
| 50.797297 | 117 | 0.68476 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/stream.py
|
import time
from collections import Counter, Sequence
from hsr_tamp.pddlstream.algorithms.common import INTERNAL_EVALUATION, add_fact
from hsr_tamp.pddlstream.algorithms.downward import make_axiom
from hsr_tamp.pddlstream.language.constants import AND, get_prefix, get_args, is_parameter, Fact, concatenate, StreamAction, Output
from hsr_tamp.pddlstream.language.conversion import list_from_conjunction, substitute_expression, \
get_formula_operators, values_from_objects, obj_from_value_expression, evaluation_from_fact, \
objects_from_values, substitute_fact
from hsr_tamp.pddlstream.language.external import ExternalInfo, Result, Instance, External, DEBUG, SHARED_DEBUG, DEBUG_MODES, \
get_procedure_fn, parse_lisp_list, select_inputs, convert_constants
from hsr_tamp.pddlstream.language.generator import get_next, from_fn, universe_test, from_test, BoundedGenerator
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject, UniqueOptValue, SharedOptValue, DebugValue, SharedDebugValue
from hsr_tamp.pddlstream.utils import str_from_object, get_mapping, irange, apply_mapping, safe_apply_mapping, safe_zip
VERBOSE_FAILURES = True
VERBOSE_WILD = False
DEFAULT_UNIQUE = False
NEGATIVE_BLOCKED = True
NEGATIVE_SUFFIX = '-negative'
CACHE_OPTIMISTIC = True
# TODO: could also make only wild facts and automatically identify output tuples satisfying certified
# TODO: default effort cost of streams with more inputs to be higher (but negated are free)
# TODO: automatically convert to test streams on inputs
##################################################
def get_empty_fn():
return lambda *input_values: None
def get_constant_fn(constant):
return lambda *input_values: constant
def get_identity_fn(indices):
return lambda *input_values: tuple(input_values[i] for i in indices)
##################################################
class PartialInputs(object):
def __init__(self, inputs='', unique=DEFAULT_UNIQUE, test=universe_test): #, num=1):
self.inputs = tuple(inputs.split())
self.unique = unique # TODO: refactor this
self.test = test
#self.num = num
self.stream = None
#def register(self, stream):
# assert self.stream is None
# self.stream = stream
# if self.unique:
# self.inputs = tuple(stream.inputs)
# assert set(self.inputs) <= set(stream.inputs)
#def __call__(self, *input_values):
# assert self.stream is not None
# if not self.test(*input_values):
# return
# input_objects = stream_instance.input_objects
# mapping = get_mapping(self.stream.inputs, input_objects)
# selected_objects = safe_apply_mapping(self.inputs, mapping)
# # for _ in irange(self.num):
# for _ in irange(stream_instance.num_optimistic):
# yield [tuple(SharedOptValue(self.stream.name, self.inputs, selected_objects, out)
# for out in self.stream.outputs)]
def get_opt_gen_fn(self, instance):
# TODO: just condition on the external
external = instance.external
inputs = external.inputs if self.unique else self.inputs
assert set(inputs) <= set(external.inputs)
unique = (set(inputs) == set(external.inputs))
# TODO: ensure no scoping errors with inputs
def gen_fn(*input_values):
if not self.test(*input_values):
return
# TODO: recover input_objects from input_values
selected_objects = select_inputs(instance, inputs)
for idx in irange(instance.num_optimistic): # self.num
# if len(inputs) == len(external.inputs):
# yield [tuple(UniqueOptValue(instance, idx, out)
# for out in external.outputs)]
# else:
if unique:
outputs = tuple(UniqueOptValue(instance, idx, out)
for out in external.outputs)
else:
outputs = tuple(SharedOptValue(external.name, inputs, selected_objects, out)
for out in external.outputs)
yield [outputs]
return gen_fn
def __repr__(self):
return repr(self.__dict__)
def get_constant_gen_fn(stream, constant):
def gen_fn(*input_values):
assert (len(stream.inputs) == len(input_values))
yield [tuple(constant for _ in range(len(stream.outputs)))]
return gen_fn
# def get_unique_fn(stream):
# # TODO: this should take into account the output number...
# def fn(*input_values):
# #input_objects = map(opt_obj_from_value, input_values)
# #stream_instance = stream.get_instance(input_objects)
# #output_values = tuple(UniqueOpt(stream_instance, i) for i in range(len(stream.outputs)))
# output_values = tuple(object() for _ in range(len(stream.outputs)))
# return [output_values]
# return fn
def get_debug_gen_fn(stream, shared=True):
if shared:
return from_fn(lambda *args, **kwargs: tuple(SharedDebugValue(stream.name, o) for o in stream.outputs))
return from_fn(lambda *args, **kwargs: tuple(DebugValue(stream.name, args, o) for o in stream.outputs))
##################################################
class WildOutput(object):
def __init__(self, values=[], facts=[], actions=[], enumerated=False, replan=False):
self.values = values
self.facts = facts
self.actions = actions
if self.actions:
raise NotImplementedError()
self.enumerated = enumerated
self.replan = replan # Reports back whether the problem has changed substantially
def __iter__(self):
return iter([self.values, self.facts])
class FluentOutput(object): # TODO: unify with OptimizerOutput
# TODO: allow fluent streams to report the subset of facts that caused failure
def __init__(self, assignments=[], facts=[], infeasible=[]):
self.assignments = list(assignments)
self.facts = list(facts)
self.infeasible = list(map(frozenset, infeasible))
def to_wild(self):
return WildOutput(self.assignments, self.facts)
def __bool__(self):
return bool(self.assignments)
__nonzero__ = __bool__
def __repr__(self):
#return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
return str_from_object(self.__dict__)
class StreamInfo(ExternalInfo):
def __init__(self, opt_gen_fn=None, negate=False, simultaneous=False,
verbose=True, **kwargs): # TODO: set negate to None to express no user preference
# TODO: could change frequency/priority for the incremental algorithm
# TODO: maximum number of evaluations per iteration of adaptive
super(StreamInfo, self).__init__(**kwargs)
# TODO: call this an abstraction instead
self.opt_gen_fn = PartialInputs() if opt_gen_fn is None else opt_gen_fn
self.negate = negate
self.simultaneous = simultaneous
self.verbose = verbose
# TODO: make this false by default for negated test streams
#self.order = 0
##################################################
class StreamResult(Result):
def __init__(self, instance, output_objects, opt_index=None,
call_index=None, list_index=None, optimistic=True):
super(StreamResult, self).__init__(instance, opt_index, call_index, optimistic)
self.output_objects = tuple(output_objects)
assert len(self.output_objects) == len(self.external.outputs)
self.list_index = list_index
self._mapping = None
self._certified = None
self._stream_fact = None
@property
def mapping(self):
if self._mapping is None:
self._mapping = get_mapping(self.external.outputs, self.output_objects)
self._mapping.update(self.instance.mapping)
return self._mapping
@property
def stream_fact(self):
if self._stream_fact is None:
self._stream_fact = substitute_expression(self.external.stream_fact, self.mapping)
return self._stream_fact
@property
def certified(self):
if self._certified is None:
self._certified = substitute_expression(self.external.certified, self.mapping)
return self._certified
def get_certified(self):
return self.certified
def get_action(self):
return StreamAction(self.name, self.input_objects, self.output_objects)
def get_optimistic(self):
raise NotImplementedError()
index = 0
#index = self.call_index
return self.instance.opt_results[index]
def remap_inputs(self, bindings):
new_instance = self.instance.remap_inputs(bindings)
return self.__class__(new_instance, self.output_objects, self.opt_index,
self.call_index, self.list_index, self.optimistic)
# def remap_outputs(self, bindings):
# new_instance = self.instance.remap_inputs(bindings)
# output_objects = apply_mapping(self.output_objects, bindings)
# return self.__class__(new_instance, output_objects, self.opt_index,
# self.call_index, self.list_index, self.optimistic)
def is_successful(self):
return True
def __repr__(self):
return '{}:{}->{}'.format(self.external.name,
str_from_object(self.instance.input_objects),
str_from_object(self.output_objects))
##################################################
class StreamInstance(Instance):
_Result = StreamResult
def __init__(self, stream, input_objects, fluent_facts):
super(StreamInstance, self).__init__(stream, input_objects)
self._generator = None
self.fluent_facts = frozenset(fluent_facts)
self.opt_gen_fns = [opt_gen_fn.get_opt_gen_fn(self) if isinstance(opt_gen_fn, PartialInputs) else opt_gen_fn
for opt_gen_fn in self.external.opt_gen_fns]
self.opt_gens = len(self.opt_gen_fns)*[None]
self._axiom_predicate = None
self._disabled_axiom = None
# TODO: keep track of unique outputs to prune repeated ones
def _check_output_values(self, new_values):
if not isinstance(new_values, Sequence):
raise ValueError('An output list for stream [{}] is not a sequence: {}'.format(self.external.name, new_values))
for output_values in new_values:
if not isinstance(output_values, Sequence):
raise ValueError('An output tuple for stream [{}] is not a sequence: {}'.format(
self.external.name, output_values))
if len(output_values) != len(self.external.outputs):
raise ValueError('An output tuple for stream [{}] has length {} instead of {}: {}'.format(
self.external.name, len(output_values), len(self.external.outputs), output_values))
def _check_wild_facts(self, new_facts):
if not isinstance(new_facts, Sequence):
raise ValueError('Output wild facts for wild stream [{}] is not a sequence: {}'.format(
self.external.name, new_facts))
def reset(self):
super(StreamInstance, self).reset()
self.previous_outputs = set()
self.num_optimistic = 1
#########################
def get_result(self, output_objects, opt_index=None, list_index=None, optimistic=True):
# TODO: rename to create_result because not unique
# TODO: ideally would increment a flag per stream for each failure
call_index = self.num_calls
#call_index = self.successes # Only counts iterations that return results for complexity
return self._Result(instance=self, output_objects=tuple(output_objects), opt_index=opt_index,
call_index=call_index, list_index=list_index, optimistic=optimistic)
def get_all_input_objects(self): # TODO: lazily compute
return set(self.input_objects) | {o for f in self.fluent_facts for o in get_args(f)}
def get_fluent_values(self):
return [Fact(get_prefix(f), values_from_objects(get_args(f))) for f in self.fluent_facts]
def _create_generator(self):
if self._generator is None:
input_values = self.get_input_values()
if self.external.is_fluent: # self.fluent_facts
self._generator = self.external.gen_fn(*input_values, fluents=self.get_fluent_values())
else:
self._generator = self.external.gen_fn(*input_values)
return self._generator
def _next_wild(self):
output, self.enumerated = get_next(self._generator, default=[])
if not isinstance(output, WildOutput):
output = WildOutput(values=output)
return output
def _next_outputs(self):
# TODO: deprecate
self._create_generator()
# TODO: shuffle history
# TODO: return all test stream outputs at once
if self.num_calls == len(self.history):
self.history.append(self._next_wild())
return self.history[self.num_calls]
def dump_new_values(self, new_values=[]):
if (not new_values and VERBOSE_FAILURES) or \
(new_values and self.info.verbose):
print('iter={}, outs={}) {}:{}->{}'.format(
self.get_iteration(), len(new_values), self.external.name,
str_from_object(self.get_input_values()), str_from_object(new_values)))
def dump_new_facts(self, new_facts=[]):
if VERBOSE_WILD and new_facts:
# TODO: format all_new_facts
print('iter={}, facts={}) {}:{}->{}'.format(
self.get_iteration(), self.external.name, str_from_object(self.get_input_values()),
new_facts, len(new_facts)))
def next_results(self, verbose=False):
assert not self.enumerated
start_time = time.time()
start_history = len(self.history)
new_values, new_facts = self._next_outputs()
self._check_output_values(new_values)
self._check_wild_facts(new_facts)
if verbose:
self.dump_new_values(new_values)
self.dump_new_facts(new_facts)
objects = [objects_from_values(output_values) for output_values in new_values]
new_objects = list(filter(lambda o: o not in self.previous_outputs, objects))
self.previous_outputs.update(new_objects) # Only counting new outputs as successes
new_results = [self.get_result(output_objects, list_index=list_index, optimistic=False)
for list_index, output_objects in enumerate(new_objects)]
if start_history <= len(self.history) - 1:
self.update_statistics(start_time, new_results)
new_facts = list(map(obj_from_value_expression, new_facts))
self.successful |= any(r.is_successful() for r in new_results)
self.num_calls += 1 # Must be after get_result
#if self.external.is_test and self.successful:
# # Set of possible test stream outputs is exhausted (excluding wild)
# self.enumerated = True
return new_results, new_facts
#########################
def get_representative_optimistic(self):
for opt_gn in self.opt_gens:
if (opt_gn is not None) and opt_gn.history and opt_gn.history[0]:
return opt_gn.history[0][0]
return None
def wrap_optimistic(self, output_values, call_index):
output_objects = []
representative_outputs = self.get_representative_optimistic()
assert representative_outputs is not None
for name, value, rep in zip(self.external.outputs, output_values, representative_outputs):
# TODO: retain the value from a custom opt_gen_fn but use unique
#unique = UniqueOptValue(instance=self, sequence_index=call_index, output=name) # object()
#param = unique if (self.opt_index == 0) else value
param = value
value = rep
output_objects.append(OptimisticObject.from_opt(value, param))
return tuple(output_objects)
def _create_opt_generator(self, opt_index=None):
# TODO: automatically refine opt_index based on self.opt_gens
if opt_index is None:
opt_index = self.opt_index
if self.opt_gens[opt_index] is None:
self.opt_gens[opt_index] = BoundedGenerator(self.opt_gen_fns[opt_index](*self.get_input_values()))
opt_gen = self.opt_gens[opt_index]
try:
next(opt_gen) # next | list
except StopIteration:
pass
return self.opt_gens[opt_index]
def next_optimistic(self):
if self.enumerated or self.disabled:
return []
opt_gen = self._create_opt_generator(self.opt_index)
# TODO: how do I distinguish between real and not real verifications of things?
output_set = set()
opt_results = []
for output_list in opt_gen.history:
self._check_output_values(output_list)
for output_values in output_list:
call_index = len(opt_results)
output_objects = self.wrap_optimistic(output_values, call_index)
if output_objects not in output_set:
output_set.add(output_objects) # No point returning the exact thing here...
opt_results.append(self._Result(instance=self, output_objects=output_objects,
opt_index=self.opt_index, call_index=call_index, list_index=0))
return opt_results
def get_blocked_fact(self):
if self.external.is_fluent:
assert self._axiom_predicate is not None
return Fact(self._axiom_predicate, self.input_objects)
return Fact(self.external.blocked_predicate, self.input_objects)
def _disable_fluent(self, evaluations, domain):
assert self.external.is_fluent
if self.successful or (self._axiom_predicate is not None):
return
self.disabled = True
index = len(self.external.disabled_instances)
self.external.disabled_instances.append(self)
self._axiom_predicate = '_ax{}-{}'.format(self.external.blocked_predicate, index)
add_fact(evaluations, self.get_blocked_fact(), result=INTERNAL_EVALUATION,
complexity=self.compute_complexity(evaluations))
# TODO: allow reporting back minimum unsatisfiable subset
static_fact = Fact(self._axiom_predicate, self.external.inputs)
preconditions = [static_fact] + list(self.fluent_facts)
derived_fact = Fact(self.external.blocked_predicate, self.external.inputs)
self._disabled_axiom = make_axiom(
parameters=self.external.inputs,
preconditions=preconditions,
derived=derived_fact)
domain.axioms.append(self._disabled_axiom)
def _disable_negated(self, evaluations):
assert self.external.is_negated
if self.successful:
return
self.disabled = True
add_fact(evaluations, self.get_blocked_fact(), result=INTERNAL_EVALUATION,
complexity=self.compute_complexity(evaluations))
def disable(self, evaluations, domain):
#assert not self.disabled
#super(StreamInstance, self).disable(evaluations, domain)
if self.external.is_fluent:
self._disable_fluent(evaluations, domain)
elif self.external.is_negated:
self._disable_negated(evaluations)
else:
self.disabled = True
def enable(self, evaluations, domain):
if not self.disabled:
return
#if self._disabled_axiom is not None:
# self.external.disabled_instances.remove(self)
# domain.axioms.remove(self._disabled_axiom)
# self._disabled_axiom = None
#super(StreamInstance, self).enable(evaluations, domain) # TODO: strange infinite loop bug if enabled?
evaluations.pop(evaluation_from_fact(self.get_blocked_fact()), None)
def remap_inputs(self, bindings):
# TODO: speed this procedure up
#if not any(o in bindings for o in self.get_all_input_objects()):
# return self
input_objects = apply_mapping(self.input_objects, bindings)
fluent_facts = [substitute_fact(f, bindings) for f in self.fluent_facts]
new_instance = self.external.get_instance(input_objects, fluent_facts=fluent_facts)
new_instance.opt_index = self.opt_index
return new_instance
def __repr__(self):
return '{}:{}->{}'.format(self.external.name, self.input_objects, self.external.outputs)
##################################################
class Stream(External):
_Instance = StreamInstance
def __init__(self, name, gen_fn, inputs, domain, outputs, certified, info=StreamInfo(), fluents=[]):
super(Stream, self).__init__(name, info, inputs, domain)
self.outputs = tuple(outputs)
self.certified = tuple(map(convert_constants, certified))
self.constants.update(a for i in certified for a in get_args(i) if not is_parameter(a))
self.fluents = fluents
#self.fluents = [] if (gen_fn in DEBUG_MODES) else fluents
for p, c in Counter(self.outputs).items():
if not is_parameter(p):
raise ValueError('Output [{}] for stream [{}] is not a parameter'.format(p, name))
if c != 1:
raise ValueError('Output [{}] for stream [{}] is not unique'.format(p, name))
for p in set(self.inputs) & set(self.outputs):
raise ValueError('Parameter [{}] for stream [{}] is both an input and output'.format(p, name))
certified_parameters = {a for i in certified for a in get_args(i) if is_parameter(a)}
for p in (certified_parameters - set(self.inputs + self.outputs)):
raise ValueError('Parameter [{}] for stream [{}] is not included within outputs'.format(p, name))
for p in (set(self.outputs) - certified_parameters):
print('Warning! Output [{}] for stream [{}] is not covered by a certified condition'.format(p, name))
# TODO: automatically switch to unique if only used once
self.gen_fn = gen_fn # DEBUG_MODES
if gen_fn == DEBUG:
self.gen_fn = get_debug_gen_fn(self, shared=False) # TODO: list of abstractions that is considered in turn
elif gen_fn == SHARED_DEBUG:
self.gen_fn = get_debug_gen_fn(self, shared=True)
assert callable(self.gen_fn)
self.opt_gen_fns = [PartialInputs(unique=True)]
if not self.is_test and not self.is_special and not \
(isinstance(self.info.opt_gen_fn, PartialInputs) and self.info.opt_gen_fn.unique):
self.opt_gen_fns.append(self.info.opt_gen_fn)
if NEGATIVE_BLOCKED:
self.blocked_predicate = '~{}{}'.format(self.name, NEGATIVE_SUFFIX) # Args are self.inputs
else:
self.blocked_predicate = '~{}'.format(self.name)
self.disabled_instances = [] # For tracking disabled axioms
self.stream_fact = Fact('_{}'.format(name), concatenate(inputs, outputs)) # TODO: just add to certified?
if self.is_negated:
if self.outputs:
raise ValueError('Negated streams cannot have outputs: {}'.format(self.outputs))
#assert len(self.certified) == 1 # TODO: is it okay to have more than one fact?
for certified in self.certified:
if not (set(self.inputs) <= set(get_args(certified))):
raise ValueError('Negated streams must have certified facts including all input parameters')
#def reset(self):
# super(Stream, self).reset()
# self.disabled_instances = []
@property
def num_opt_fns(self):
return len(self.opt_gen_fns) - 1
@property
def has_outputs(self):
return bool(self.outputs)
@property
def is_test(self):
return not self.has_outputs
@property
def is_fluent(self):
return bool(self.fluents)
@property
def is_negated(self):
return self.info.negate
@property
def is_function(self):
return False
def get_instance(self, input_objects, fluent_facts=frozenset()):
input_objects = tuple(input_objects)
fluent_facts = frozenset(fluent_facts)
assert all(isinstance(obj, Object) or isinstance(obj, OptimisticObject) for obj in input_objects)
key = (input_objects, fluent_facts)
if key not in self.instances:
self.instances[key] = self._Instance(self, input_objects, fluent_facts)
return self.instances[key]
def as_test_stream(self):
# TODO: method that converts a stream into a test stream (possibly from ss)
raise NotImplementedError()
def __repr__(self):
return '{}:{}->{}'.format(self.name, self.inputs, self.outputs)
##################################################
def create_equality_stream():
return Stream(name='equality', gen_fn=from_test(universe_test),
inputs=['?o'], domain=[('Object', '?o')],
outputs=[], certified=[('=', '?o', '?o')],
info=StreamInfo(eager=True), fluents=[])
def create_inequality_stream():
#from hsr_tamp.pddlstream.algorithms.downward import IDENTICAL
return Stream(name='inequality', gen_fn=from_test(lambda o1, o2: o1 != o2),
inputs=['?o1', '?o2'], domain=[('Object', '?o1'), ('Object', '?o2')],
outputs=[], certified=[('=', '?o1', '?o2')],
info=StreamInfo(eager=True), fluents=[])
##################################################
def parse_stream(lisp_list, stream_map, stream_info):
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {':stream', ':inputs', ':domain', ':fluents', ':outputs', ':certified'}
name = value_from_attribute[':stream']
domain = value_from_attribute.get(':domain', None)
# TODO: dnf_from_positive_formula(value_from_attribute.get(':domain', []))
if not (get_formula_operators(domain) <= {AND}):
# TODO: allow positive DNF
raise ValueError('Stream [{}] domain must be a conjunction'.format(name))
certified = value_from_attribute.get(':certified', None)
if not (get_formula_operators(certified) <= {AND}):
raise ValueError('Stream [{}] certified must be a conjunction'.format(name))
return Stream(name, get_procedure_fn(stream_map, name),
value_from_attribute.get(':inputs', []),
list_from_conjunction(domain),
value_from_attribute.get(':outputs', []),
list_from_conjunction(certified),
stream_info.get(name, StreamInfo()),
fluents=value_from_attribute.get(':fluents', []))
| 27,086 |
Python
| 46.68838 | 134 | 0.618401 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/function.py
|
import time
from hsr_tamp.pddlstream.language.conversion import substitute_expression, list_from_conjunction, str_from_head
from hsr_tamp.pddlstream.language.constants import Not, Equal, get_prefix, get_args, is_head, FunctionAction
from hsr_tamp.pddlstream.language.external import ExternalInfo, Result, Instance, External, DEBUG_MODES, get_procedure_fn
from hsr_tamp.pddlstream.utils import str_from_object, apply_mapping
# https://stackoverflow.com/questions/847936/how-can-i-find-the-number-of-arguments-of-a-python-function
#try:
# from inspect import getfullargspec as get_arg_spec
#except ImportError:
# from inspect import getargspec as get_arg_spec
#from inspect import getargspec as get_arg_spec
#from inspect import signature
##################################################
def add_opt_function(name, base_fn, stream_map, stream_info, constant=0., coefficient=1., **external_kwargs):
stream_fn = lambda *args, **kwargs: constant + coefficient*base_fn(*args, **kwargs)
stream_map[name] = stream_fn
opt_fn = lambda *args, **kwargs: constant
info = FunctionInfo(opt_fn=opt_fn, **external_kwargs)
stream_info[name] = info
return stream_map, stream_info
##################################################
class FunctionInfo(ExternalInfo):
_default_eager = True
def __init__(self, opt_fn=None, eager=_default_eager, verbose=True, **kwargs): # Setting eager=True as a heuristic
super(FunctionInfo, self).__init__(eager=eager, **kwargs)
self.opt_fn = opt_fn
self.verbose = verbose # TODO: move to ExternalInfo
#self.order = 0
class FunctionResult(Result):
def __init__(self, instance, value, optimistic=True):
super(FunctionResult, self).__init__(instance, opt_index=0, call_index=0, optimistic=optimistic)
self.instance = instance
self.value = value
self._certified = None
# TODO: could add empty output_objects tuple
@property
def certified(self):
if self._certified is None:
self._certified = [Equal(self.instance.head, self.value)]
return self._certified
def get_certified(self):
return self.certified
def get_action(self):
return FunctionAction(self.name, self.input_objects)
def remap_inputs(self, bindings):
#if not any(o in bindings for o in self.instance.get_all_input_objects()):
# return self
input_objects = apply_mapping(self.instance.input_objects, bindings)
new_instance = self.external.get_instance(input_objects)
return self.__class__(new_instance, self.value, self.optimistic)
def is_successful(self):
return True
def __repr__(self):
#from hsr_tamp.pddlstream.algorithms.downward import get_cost_scale
#value = math.log(self.value) # TODO: number of digits to display
return '{}={:.3f}'.format(str_from_head(self.instance.head), self.value)
class FunctionInstance(Instance):
_Result = FunctionResult
#_opt_value = 0
def __init__(self, external, input_objects):
super(FunctionInstance, self).__init__(external, input_objects)
self._head = None
@property
def head(self):
if self._head is None:
self._head = substitute_expression(self.external.head, self.mapping)
return self._head
@property
def value(self):
assert len(self.history) == 1
return self.history[0]
def _compute_output(self):
self.enumerated = True
self.num_calls += 1
if self.history:
return self.value
input_values = self.get_input_values()
value = self.external.fn(*input_values)
# TODO: cast the inputs and test whether still equal?
# if not (type(self.value) is self.external._codomain):
# if not isinstance(self.value, self.external.codomain):
if value < 0:
raise ValueError('Function [{}] produced a negative value [{}]'.format(self.external.name, value))
self.history.append(self.external.codomain(value))
return self.value
def next_results(self, verbose=False):
assert not self.enumerated
start_time = time.time()
start_history = len(self.history)
value = self._compute_output()
new_results = [self._Result(self, value, optimistic=False)]
new_facts = []
if (value is not False) and verbose:
# TODO: str(new_results[-1])
print('iter={}, outs={}) {}{}={:.3f}'.format(
self.get_iteration(), len(new_results), get_prefix(self.external.head),
str_from_object(self.get_input_values()), value))
if start_history <= len(self.history) - 1:
self.update_statistics(start_time, new_results)
self.successful |= any(r.is_successful() for r in new_results)
return new_results, new_facts
def next_optimistic(self):
if self.enumerated or self.disabled:
return []
# TODO: cache this value
opt_value = self.external.opt_fn(*self.get_input_values())
self.opt_results = [self._Result(self, opt_value, optimistic=True)]
return self.opt_results
def __repr__(self):
return '{}=?{}'.format(str_from_head(self.head), self.external.codomain.__name__)
class Function(External):
"""
An external nonnegative function F(i1, ..., ik) -> 0 <= int
External functions differ from streams in that their output isn't an object
"""
codomain = float # int | float
_Instance = FunctionInstance
#_default_p_success = 0.99 # 0.99 | 1 # Might be pruned using cost threshold
def __init__(self, head, fn, domain, info):
# TODO: function values that act as preconditions (cost must be below threshold)
if info is None:
# TODO: move the defaults to FunctionInfo in the event that an optimistic fn is specified
info = FunctionInfo() #p_success=self._default_p_success)
super(Function, self).__init__(get_prefix(head), info, get_args(head), domain)
self.head = head
opt_fn = lambda *args: self.codomain()
self.fn = opt_fn if (fn in DEBUG_MODES) else fn
#arg_spec = get_arg_spec(self.fn)
#if len(self.inputs) != len(arg_spec.args):
# raise TypeError('Function [{}] expects inputs {} but its procedure has inputs {}'.format(
# self.name, list(self.inputs), arg_spec.args))
self.opt_fn = opt_fn if (self.info.opt_fn is None) else self.info.opt_fn
self.num_opt_fns = 0 # TODO: support multiple opt_fns
@property
def function(self):
return get_prefix(self.head)
@property
def has_outputs(self):
return False
@property
def is_fluent(self):
return False
@property
def is_negated(self):
return False
@property
def is_function(self):
return True
@property
def is_cost(self):
return True
def __repr__(self):
return '{}=?{}'.format(str_from_head(self.head), self.codomain.__name__)
##################################################
class PredicateInfo(FunctionInfo):
_default_eager = False
class PredicateResult(FunctionResult):
def get_certified(self):
# TODO: cache these results
expression = self.instance.head
return [expression if self.value else Not(expression)]
def is_successful(self):
opt_value = self.external.opt_fn(*self.instance.get_input_values())
return self.value == opt_value
class PredicateInstance(FunctionInstance):
_Result = PredicateResult
#_opt_value = True # True | False | Predicate._codomain()
#def was_successful(self, results):
# #self.external.opt_fn(*input_values)
# return any(r.value for r in results)
class Predicate(Function):
"""
An external predicate P(i1, ..., ik) -> {False, True}
External predicates do not make the closed world assumption
"""
_Instance = PredicateInstance
codomain = bool
#def is_negative(self):
# return self._Instance._opt_value is False
def __init__(self, head, fn, domain, info):
if info is None:
info = PredicateInfo()
super(Predicate, self).__init__(head, fn, domain, info)
assert(self.info.opt_fn is None)
self.blocked_predicate = self.name
@property
def predicate(self):
return self.function
@property
def is_negated(self):
return True
@property
def is_cost(self):
return False
##################################################
def parse_common(lisp_list, stream_map, stream_info):
assert (2 <= len(lisp_list) <= 3)
head = tuple(lisp_list[1])
assert (is_head(head))
name = get_prefix(head)
fn = get_procedure_fn(stream_map, name)
domain = []
if len(lisp_list) == 3:
domain = list_from_conjunction(lisp_list[2])
info = stream_info.get(name, None)
return head, fn, domain, info
def parse_function(*args):
return Function(*parse_common(*args))
def parse_predicate(*args):
return Predicate(*parse_common(*args))
| 9,145 |
Python
| 38.593073 | 121 | 0.627337 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/temporal.py
|
from __future__ import print_function
#from os.path import expanduser
import os
import re
import subprocess
import time
import sys
import traceback
from collections import namedtuple
from hsr_tamp.pddlstream.algorithms.downward import TEMP_DIR, DOMAIN_INPUT, PROBLEM_INPUT, make_effects, \
parse_sequential_domain, get_conjunctive_parts, write_pddl, make_action, make_parameters, make_object, fd_from_fact, Domain, make_effects
from hsr_tamp.pddlstream.language.constants import DurativeAction, Fact, Not
from hsr_tamp.pddlstream.utils import INF, ensure_dir, write, user_input, safe_rm_dir, read, elapsed_time, find_unique, safe_zip
PLANNER = 'tfd' # tfd | tflap | optic | tpshe | cerberus
# tflap: no conditional effects, no derived predicates
# optic: no negative preconditions, no conditional effects, no goal derived predicates
# TODO: previously slow instantiation was due to a missing precondition on move
# TODO: installing coin broke FD compilation so I uninstalled it
# sudo apt-get install cmake coinor-libcbc-dev coinor-libclp-dev
# sudo apt-get install coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev doxygen libbz2-dev bison flex
# sudo apt-get install coinor-cbc
# sudo apt-get install apt-get -y install g++ make flex bison cmake doxygen coinor-clp coinor-libcbc-dev coinor-libclp-dev coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev libbz2-dev libgsl-dev libz-dev
# sudo apt-get install g++ make flex bison cmake doxygen coinor-clp coinor-libcbc-dev coinor-libclp-dev coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev libbz2-dev libgsl-dev libz-dev
# sudo apt-get remove coinor-libcbc-dev coinor-libclp-dev
# sudo apt-get remove coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev
##################################################
# /home/caelan/Programs/VAL
ENV_VAR = 'TFD_PATH'
#TFD_PATH = '/home/caelan/Programs/tfd-src-0.4/downward'
#TFD_PATH = '/home/caelan/Programs/TemPorAl/src/src/TFD'
#TFD_PATH = '/home/caelan/Programs/TemPorAl/src/src/temporal-FD'
MAX_TIME = '{max_planner_time}'
PLAN_FILE = 'plan'
#TFD_TRANSLATE = os.path.join(TFD_PATH, 'downward/translate/') # TFD
# TODO: the search produces unsound plans when it prints the full state-space
# TODO: still occasionally does this with the current settings
TFD_OPTIONS = {
'a': False, # anytime search
't': MAX_TIME, # success timeout
'T': MAX_TIME, # failure timeout
'g': False, # greedy search
'l': True, # disable lazy evaluation (slow when using the makespan heuristic)
'v': True, # disable verbose
'y+Y': True, # CEA heuristic
'x+X': False, # makespan heuristic
'G': 'm', # g-value evaluation (m, c, t, w)
'Q': 'p', # queue (r, p, h)
'r': True, # reschedule # TODO: reschedule doesn't seem to work well with conditional effects
#'O': 1, # num ordered preferred ops, TFD doesn't support
#'C': 1, # num cheapest preferred ops, TFD doesn't support
#'E': 1000, # num expensive preferred ops
#'R': 1000, # num random preferred ops,
'e': True, # epsilon internally
'f': False, # epsilon externally
#'b': True, # reset after solution, TFD doesn't support
}
def create_planner(anytime=False, greedy=False, lazy=False, h_cea=False, h_makespan=False, reschedule=False):
planner = dict(TFD_OPTIONS)
planner.update({
'a': anytime, # anytime search
'g': greedy, # greedy search
'l': not lazy, # disable lazy evaluation (slow when using the makespan heuristic)
'y+Y': h_cea, # CEA heuristic
'x+X': h_makespan, # makespan heuristic
'r': reschedule, # reschedule
})
return planner
# https://github.com/caelan/TemporalFastDownward/blob/020da65a39d3f44c821cc2062d1006ccb0fcd7e5/downward/search/best_first_search.cc#L376
# best_first_search
# makespan seems to be computed using timestep plus longest action
def format_option(pair):
key, value = pair
if value is True:
return key
if value is False:
return None
return '{}+{}'.format(key, value)
# Contains universal conditions: 1
# Disabling rescheduling because of universal conditions in original task!
# TODO: convert finite quantifiers
# /home/caelan/Programs/VAL/validate /home/caelan/Programs/pddlstream/temp/domain.pddl /home/caelan/Programs/pddlstream/temp/problem.pddl /home/caelan/Programs/pddlstream/temp/plan
# Parameters just used in search (and split by +)
#TFD_COMMAND = 'plan.py n {} {} {}' # Default in plannerParameters.h
#TFD_COMMAND = 'plan.py y+Y+a+e+r+O+1+C+1+b {} {} {}' # Default in ./plan
#TFD_COMMAND = 'plan.py y+Y+e+O+1+C+1+b {} {} {}'
#TFD_COMMAND = 'plan.py +x+X+e+O+1+C+1+b+G+m+T+10+Q+p {} {} {}'
TFD_COMMAND = 'plan.py %s {} {} {}'
# TODO: TFD sometimes returns incorrect plans
# ./VAL/validate pddlstream/temp/domain.pddl pddlstream/temp/problem.pddl pddlstream/temp/plan
# Finds a plan and then retimes it
"""
Usage: search <option characters> (input read from stdin)
Options are:
a - enable anytime search (otherwise finish on first plan found)
t <timeout secs> - total timeout in seconds for anytime search (when plan found)
T <timeout secs> - total timeout in seconds for anytime search (when no plan found)
m <monitor file> - monitor plan, validate a given plan
g - perform greedy search (follow heuristic)
l - disable lazy evaluation (Lazy = use parent's f instead of child's)
v - disable verbose printouts
y - cyclic cg CEA heuristic
Y - cyclic cg CEA heuristic - preferred operators
x - cyclic cg makespan heuristic
X - cyclic cg makespan heuristic - preferred operators
G [m|c|t|w] - G value evaluation, one of m - makespan, c - pathcost, t - timestamp, w [weight] - weighted / Note: One of those has to be set!
Q [r|p|h] - queue mode, one of r - round robin, p - priority, h - hierarchical
K - use tss known filtering (might crop search space)!
n - no_heuristic
r - reschedule_plans
O [n] - prefOpsOrderedMode, with n being the number of pref ops used
C [n] - prefOpsCheapestMode, with n being the number of pref ops used
E [n] - prefOpsMostExpensiveMode, with n being the number of pref ops used
e - epsilonize internally
f - epsilonize externally
p <plan file> - plan filename prefix
M v - monitoring: verify timestamps
u - do not use cachin in heuristic
"""
# b - reset_after_solution_was_found
# p - plan_name
# i - reward_only_pref_op_queue
# S - pref_ops_concurrent_mode
# R - number_pref_ops_rand_mode
# K use_known_by_logical_state_only=True
# Default parameters (plan.py n {} {} {})
"""
Planner Paramters:
Anytime Search: Disabled
Timeout if plan was found: 0 seconds (no timeout)
Timeout while no plan was found: 0 seconds (no timeout)
Greedy Search: Disabled
Verbose: Enabled
Lazy Heuristic Evaluation: Enabled
Use caching in heuristic.
Cyclic CG heuristic: Disabled Preferred Operators: Disabled
Makespan heuristic: Disabled Preferred Operators: Disabled
No Heuristic: Enabled
Cg Heuristic Zero Cost Waiting Transitions: Enabled
Cg Heuristic Fire Waiting Transitions Only If Local Problems Matches State: Disabled
PrefOpsOrderedMode: Disabled with 1000 goals
PrefOpsCheapestMode: Disabled with 1000 goals
PrefOpsMostExpensiveMode: Disabled with 1000 goals
PrefOpsRandMode: Disabled with 1000 goals
PrefOpsConcurrentMode: Disabled
Reset after solution was found: Disabled
Reward only preferred operators queue: Disabled
GValues by: Timestamp
Queue management mode: Priority based
Known by logical state only filtering: Disabled
use_subgoals_to_break_makespan_ties: Disabled
Reschedule plans: Disabled
Epsilonize internally: Disabled
Epsilonize externally: Disabled
Keep original plans: Enabled
Plan name: "/home/caelan/Programs/pddlstream/temp/plan"
Plan monitor file: "" (no monitoring)
Monitoring verify timestamps: Disabled
"""
# plannerParameters.h
"""
anytime_search = false;
timeout_while_no_plan_found = 0;
timeout_if_plan_found = 0;
greedy = false;
lazy_evaluation = true;
verbose = true;
insert_let_time_pass_only_when_running_operators_not_empty = false;
cyclic_cg_heuristic = false;
cyclic_cg_preferred_operators = false;
makespan_heuristic = false;
makespan_heuristic_preferred_operators = false;
no_heuristic = false;
cg_heuristic_zero_cost_waiting_transitions = true;
cg_heuristic_fire_waiting_transitions_only_if_local_problems_matches_state = false;
use_caching_in_heuristic = true;
g_values = GTimestamp;
g_weight = 0.5;
queueManagementMode = BestFirstSearchEngine::PRIORITY_BASED;
use_known_by_logical_state_only = false;
use_subgoals_to_break_makespan_ties = false;
reschedule_plans = false;
epsilonize_internally = false;
epsilonize_externally = false;
keep_original_plans = true;
pref_ops_ordered_mode = false;
pref_ops_cheapest_mode = false;
pref_ops_most_expensive_mode = false;
pref_ops_rand_mode = false;
pref_ops_concurrent_mode = false;
number_pref_ops_ordered_mode = 1000;
number_pref_ops_cheapest_mode = 1000;
number_pref_ops_most_expensive_mode = 1000;
number_pref_ops_rand_mode = 1000;
reset_after_solution_was_found = false;
reward_only_pref_op_queue = false;
plan_name = "sas_plan";
planMonitorFileName = "";
monitoring_verify_timestamps = false;
"""
##################################################
TFLAP_PATH = '/home/caelan/Programs/tflap/src'
# Usage: tflap <domain_file> <problem_file> <output_file> [-ground] [-static] [-mutex] [-trace]
# -ground: generates the GroundedDomain.pddl and GroundedProblem.pddl files.
# -static: keeps the static data in the planning task.
# -nsas: does not make translation to SAS (finite-domain variables).
# -mutex: generates the mutex.txt file with the list of static mutex facts.
# -trace: generates the trace.txt file with the search tree.
TFLAP_COMMAND = 'tflap {} {} {}'
#TFLAP_COMMAND = 'tflap {} {} {} -trace' # Seems to repeatedly fail
##################################################
OPTIC_PATH = '/home/caelan/Programs/optic2018/src/optic/src/optic'
OPTIC_COMMAND = 'optic-clp -N {} {} | tee {}'
"""
Usage: optic/src/optic/optic-clp [OPTIONS] domainfile problemfile [planfile, if -r specified]
Options are:
-N Don't optimise solution quality (ignores preferences and costs);
-0 Abstract out timed initial literals that represent recurrent windows;
-n<lim> Optimise solution quality, capping cost at <lim>;
-citation Display citation to relevant papers;
-b Disable best-first search - if EHC fails, abort;
-E Skip EHC: go straight to best-first search;
-e Use standard EHC instead of steepest descent;
-h Disable helpful-action pruning;
-k Disable compression-safe action detection;
-c Enable the tie-breaking in RPG that favour actions that slot into the partial order earlier;
-S Sort initial layer facts in RPG by availability order (only use if using -c);
-m Disable the tie-breaking in search that favours plans with shorter makespans;
-F Full FF helpful actions (rather than just those in the RP applicable in the current state);
-r Read in a plan instead of planning;
-T Rather than building a partial order, build a total-order
-v<n> Verbose to degree n (n defaults to 1 if not specified).
-L<n> LP verbose to degree n (n defaults to 1 if not specified).
"""
"""
Unfortunately, at present, the planner does not fully support ADL
unless in the rules for derived predicates. Only two aspects of
ADL can be used in action definitions:
- forall conditions, containing a simple conjunct of propositional and
numeric facts;
- Conditional (when... ) effects, and then only with numeric conditions
and numeric consequences on values which do not appear in the
preconditions of actions.
"""
##################################################
# TODO: tpshe seems to be broken
"""
usage: plan.py [-h] [--generator GENERATOR] [--time TIME] [--memory MEMORY]
[--iterated] [--no-iterated] [--plan-file PLANFILE]
[--validate] [--no-validate]
planner domain problem
"""
TPSHE_PATH = '/home/caelan/Programs/temporal-planning/'
#TPSHE_COMMAND = 'python {}bin/plan.py she {} {} --time {} --no-iterated'
TPSHE_COMMAND = 'bin/plan.py she {} {} --iterated'
#TPSHE_COMMAND = 'python {}bin/plan.py she {} {} --time {}'
#TPSHE_COMMAND = 'python {}bin/plan.py tempo-3 {} {} --time {}'
#TPSHE_COMMAND = 'python {}bin/plan.py stp-3 {} {} --time {}'
#temp_path = '.'
TPSHE_OUTPUT_PATH = 'tmp_sas_plan'
##################################################
CERB_PATH = '/home/caelan/Programs/cerberus'
#CERB_PATH = '/home/caelan/Programs/pddlstream/FastDownward'
#CERB_COMMAND = 'fast-downward.py {} {}'
CERB_COMMAND = 'plan.py {} {} {}'
# https://ipc2018-classical.bitbucket.io/planner-abstracts/teams_15_16.pdf
##################################################
def parse_temporal_solution(solution):
makespan = 0.0
plan = []
# TODO: this regex doesn't work for @
regex = r'(\d+.\d+):\s+' \
r'\(\s*(\w+(?: \S+)*)\s*\)\s+' \
r'\[(\d+.\d+)\]'
for start, action, duration in re.findall(regex, solution):
entries = action.lower().split(' ')
action = DurativeAction(entries[0], tuple(entries[1:]), float(start), float(duration))
plan.append(action)
makespan = max(action.start + action.duration, makespan)
return plan, makespan
def parse_plans(temp_path, plan_files):
best_plan, best_makespan = None, INF
for plan_file in plan_files:
solution = read(os.path.join(temp_path, plan_file))
plan, makespan = parse_temporal_solution(solution)
if makespan < best_makespan:
best_plan, best_makespan = plan, makespan
return best_plan, best_makespan
##################################################
def get_end(action):
return action.start + action.duration
def compute_start(plan):
if not plan:
return 0.
return min(action.start for action in plan)
def compute_end(plan):
if not plan:
return 0.
return max(map(get_end, plan))
def compute_duration(plan):
return compute_end(plan) - compute_start(plan)
def apply_start(plan, new_start):
if not plan:
return plan
old_start = compute_start(plan)
delta_start = new_start - old_start
return [DurativeAction(name, args, start + delta_start, duration)
for name, args, start, duration in plan]
def retime_plan(plan, duration=1):
if plan is None:
return plan
# TODO: duration per action
return [DurativeAction(name, args, i * duration, duration)
for i, (name, args) in enumerate(plan)]
def reverse_plan(plan):
if plan is None:
return None
makespan = compute_duration(plan)
return [DurativeAction(action.name, action.args, makespan - get_end(action), action.duration)
for action in plan]
##################################################
TemporalDomain = namedtuple('TemporalDomain', ['name', 'requirements', 'types', 'constants',
'predicates', 'functions', 'actions', 'durative_actions', 'axioms'])
# TODO: rename SimplifiedDomain
SimplifiedDomain = namedtuple('SimplifiedDomain', ['name', 'requirements', 'types', 'type_dict', 'constants',
'predicates', 'predicate_dict', 'functions', 'actions', 'axioms',
'durative_actions', 'pddl'])
def get_tfd_path():
if ENV_VAR not in os.environ:
raise RuntimeError('Environment variable {} is not defined!'.format(ENV_VAR))
return os.path.join(os.environ[ENV_VAR], 'downward/')
def parse_temporal_domain(domain_pddl):
translate_path = os.path.join(get_tfd_path(), 'translate/') # tfd & temporal-FD
prefixes = ['pddl', 'normalize']
deleted = delete_imports(prefixes)
sys.path.insert(0, translate_path)
import pddl
import normalize
temporal_domain = TemporalDomain(*pddl.tasks.parse_domain(pddl.parser.parse_nested_list(domain_pddl.splitlines())))
name, requirements, constants, predicates, types, functions, actions, durative_actions, axioms = temporal_domain
fluents = normalize.get_fluent_predicates(temporal_domain)
sys.path.remove(translate_path)
delete_imports(prefixes)
sys.modules.update(deleted) # This is important otherwise classes are messed up
import pddl
import pddl_parser
assert not actions
simple_from_durative = simple_from_durative_action(durative_actions, fluents)
simple_actions = [action for triplet in simple_from_durative.values() for action in triplet]
requirements = pddl.Requirements([])
types = [pddl.Type(ty.name, ty.basetype_name) for ty in types]
pddl_parser.parsing_functions.set_supertypes(types)
predicates = [pddl.Predicate(p.name, p.arguments) for p in predicates]
constants = convert_parameters(constants)
axioms = list(map(convert_axiom, axioms))
return SimplifiedDomain(name, requirements, types, {ty.name: ty for ty in types}, constants,
predicates, {p.name: p for p in predicates}, functions,
simple_actions, axioms, simple_from_durative, domain_pddl)
DURATIVE_ACTIONS = ':durative-actions'
def parse_domain(domain_pddl):
try:
return parse_sequential_domain(domain_pddl)
except AssertionError as e:
if str(e) == DURATIVE_ACTIONS:
return parse_temporal_domain(domain_pddl)
raise e
##################################################
def delete_imports(prefixes=['pddl']):
deleted = {}
for name in list(sys.modules):
if not name.startswith('pddlstream') and any(name.startswith(prefix) for prefix in prefixes):
deleted[name] = sys.modules.pop(name)
return deleted
#def simple_action_stuff(name, parameters, condition, effects):
# import pddl
# parameters = [pddl.TypedObject(param.name, param.type) for param in parameters]
# return pddl.Action(name, parameters, len(parameters), condition, effects, None)
def convert_args(args):
return [var.name for var in args]
def convert_condition(condition):
import pddl
class_name = condition.__class__.__name__
# TODO: compare class_name to the pddl class name
if class_name in ('Truth', 'FunctionComparison'):
# TODO: currently ignoring numeric conditions
return pddl.Truth()
elif class_name == 'Atom':
return pddl.Atom(condition.predicate, convert_args(condition.args))
elif class_name == 'NegatedAtom':
return pddl.NegatedAtom(condition.predicate, convert_args(condition.args))
elif class_name == 'Conjunction':
return pddl.conditions.Conjunction(list(map(convert_condition, condition.parts)))
elif class_name == 'Disjunction':
return pddl.Disjunction(list(map(convert_condition, condition.parts)))
elif class_name == 'ExistentialCondition':
return pddl.ExistentialCondition(convert_parameters(condition.parameters),
list(map(convert_condition, condition.parts)))
elif class_name == 'UniversalCondition':
return pddl.UniversalCondition(convert_parameters(condition.parameters),
list(map(convert_condition, condition.parts)))
raise NotImplementedError(class_name)
def convert_effects(effects):
import pddl
new_effects = make_effects([('_noop',)]) # To ensure the action has at least one effect
for effect in effects:
class_name = effect.__class__.__name__
if class_name == 'Effect':
peffect_name = effect.peffect.__class__.__name__
if peffect_name in ('Increase', 'Decrease'):
# TODO: currently ignoring numeric conditions
continue
new_effects.append(pddl.Effect(convert_parameters(effect.parameters),
pddl.Conjunction(list(map(convert_condition, effect.condition))).simplified(),
convert_condition(effect.peffect)))
else:
raise NotImplementedError(class_name)
return new_effects
def convert_axiom(axiom):
import pddl
parameters = convert_parameters(axiom.parameters)
return pddl.Axiom(axiom.name, parameters, len(parameters),
convert_condition(axiom.condition).simplified())
def convert_parameters(parameters):
import pddl
return [pddl.TypedObject(param.name, param.type) for param in parameters]
SIMPLE_TEMPLATE = '{}-{}'
def expand_condition(condition):
import pddl
return [part for part in get_conjunctive_parts(convert_condition(condition).simplified())
if not isinstance(part, pddl.Truth)]
def convert_durative(durative_actions, fluents):
# TODO: if static, apply as a condition to all
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
import pddl
wait_action = make_action(
name='wait',
parameters=['?t1', '?t2'],
preconditions=[
('time', '?t1'), ('time', '?t2'),
('attime', '?t1'),
#('CanMove',),
],
effects=[
('attime', '?t2'),
Not(('attime', '?t2')),
#Not(('CanMove',)),
],
#cost=None,
)
#asdf = Fact('sum', ['?t1', '?t2'])
# TODO: need to connect the function
actions = [wait_action]
for action in durative_actions:
#print(type(action.duration))
static_condition = pddl.Conjunction(list({
part for condition in action.condition for part in get_conjunctive_parts(convert_condition(condition).simplified())
if not isinstance(part, pddl.Truth) and not (get_predicates(part) & fluents)}))
parameters = convert_parameters(action.parameters)
#start_cond, over_cond, end_cond = list(map(expand_condition, action.condition))
start_cond, over_cond, end_cond = list(map(convert_condition, action.condition))
#assert not over_cond
start_effects, end_effects = list(map(convert_effects, action.effects))
#start_effects, end_effects = action.effects
durative_predicate = 'durative-{}'.format(action.name)
fact = Fact(durative_predicate, ['?t2'] + [p.name for p in parameters])
start_parameters = [make_object(t) for t in ['?t1', '?dt', '?t2']] + parameters
start_action = pddl.Action('start-{}'.format(action.name), start_parameters, len(start_parameters),
pddl.Conjunction([pddl.Atom('sum', ['?t1', '?dt', '?t2']), pddl.Atom('attime', ['?t1']),
static_condition, start_cond, over_cond]).simplified(),
make_effects([fact]) + start_effects, None) # static_condition
# TODO: case matters
end_parameters = [make_object('?t2')] + parameters
end_action = pddl.Action('stop-{}'.format(action.name), end_parameters, len(end_parameters),
pddl.Conjunction([pddl.Atom('time', ['?t2']), pddl.Atom('attime', ['?t2']),
fd_from_fact(fact), static_condition, end_cond, over_cond]).simplified(),
make_effects([Not(fact)]) + end_effects, None) # static_condition
actions.extend([start_action, end_action])
for action in actions:
action.dump()
return actions
def simple_from_durative_action(durative_actions, fluents):
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
import pddl
simple_actions = {}
for action in durative_actions:
parameters = convert_parameters(action.parameters)
conditions = list(map(convert_condition, action.condition))
start_effects, end_effects = action.effects
over_effects = []
effects = list(map(convert_effects, [start_effects, over_effects, end_effects]))
static_condition = pddl.Conjunction(list({
part for condition in conditions for part in get_conjunctive_parts(condition.simplified())
if not isinstance(part, pddl.Truth) and not (get_predicates(part) & fluents)}))
# TODO: deal with case where there are fluents
actions = []
for i, (condition, effect) in enumerate(safe_zip(conditions, effects)):
# TODO: extract the durations by pretending they are action costs
actions.append(pddl.Action(SIMPLE_TEMPLATE.format(action.name, i), parameters, len(parameters),
pddl.Conjunction([static_condition, condition]).simplified(), effect, None))
#actions[-1].dump()
simple_actions[action] = actions
return simple_actions
def sequential_from_temporal_plan(plan):
if plan is None:
return plan
over_actions = []
state_changes = [DurativeAction(None, [], 0, 0)]
for durative_action in plan:
args = durative_action.args
start, end = durative_action.start, get_end(durative_action)
start_action, over_action, end_action = [SIMPLE_TEMPLATE.format(durative_action.name, i) for i in range(3)]
state_changes.append(DurativeAction(start_action, args, start, end - start))
#state_changes.append(DurativeAction(start_action, args, start, 0))
over_actions.append(DurativeAction(over_action, args, start, end - start))
state_changes.append(DurativeAction(end_action, args, end, 0))
state_changes = sorted(state_changes, key=lambda a: a.start)
sequence = []
for i in range(1, len(state_changes)):
# Technically should check the state change points as well
start_action = state_changes[i-1]
end_action = state_changes[i]
for over_action in over_actions:
if (over_action.start < end_action.start) and (start_action.start < get_end(over_action)): # Exclusive
sequence.append(over_action)
sequence.append(end_action)
return sequence
##################################################
def solve_tfd(domain_pddl, problem_pddl, planner=TFD_OPTIONS, max_planner_time=60, debug=False, **kwargs):
if PLANNER == 'tfd':
root = get_tfd_path()
# TODO: make a function for this
args = '+'.join(sorted(filter(lambda s: s is not None, map(format_option, planner.items()))))
template = TFD_COMMAND % args.format(max_planner_time=max_planner_time)
elif PLANNER == 'cerberus':
root, template = CERB_PATH, CERB_COMMAND
elif PLANNER == 'tflap':
root, template = TFLAP_PATH, TFLAP_COMMAND
elif PLANNER == 'optic':
root, template = OPTIC_PATH, OPTIC_COMMAND
elif PLANNER == 'tpshe':
root, template = TPSHE_PATH, TPSHE_COMMAND
else:
raise ValueError(PLANNER)
start_time = time.time()
domain_path, problem_path = write_pddl(domain_pddl, problem_pddl)
plan_path = os.path.join(TEMP_DIR, PLAN_FILE)
#assert not actions, "There shouldn't be any actions - just temporal actions"
paths = [os.path.join(os.getcwd(), p) for p in (domain_path, problem_path, plan_path)]
command = os.path.join(root, template.format(*paths))
print(command)
if debug:
stdout, stderr = None, None
else:
stdout, stderr = open(os.devnull, 'w'), open(os.devnull, 'w')
proc = subprocess.call(command, shell=True, cwd=root, stdout=stdout, stderr=stderr) # timeout=None (python3)
error = proc != 0
print('Error:', error)
# TODO: returns an error when no plan was found
# TODO: close any opened resources
temp_path = os.path.join(os.getcwd(), TEMP_DIR)
plan_files = sorted(f for f in os.listdir(temp_path) if f.startswith(PLAN_FILE))
print('Plans:', plan_files)
best_plan, best_makespan = parse_plans(temp_path, plan_files)
#if not debug:
# safe_rm_dir(TEMP_DIR)
print('Makespan: ', best_makespan)
print('Time:', elapsed_time(start_time))
sequential_plan = sequential_from_temporal_plan(best_plan)
return sequential_plan, best_makespan
| 28,081 |
Python
| 40.850969 | 213 | 0.663153 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/constants.py
|
from __future__ import print_function
import os
from collections import namedtuple
from hsr_tamp.pddlstream.utils import INF, str_from_object, read
EQ = '=' # xnor
AND = 'and'
OR = 'or'
NOT = 'not'
EXISTS = 'exists'
FORALL = 'forall'
WHEN = 'when'
IMPLY = 'imply'
MINIMIZE = 'minimize'
MAXIMIZE = 'maximize'
INCREASE = 'increase'
PARAMETER = '?'
TYPE = '-'
OBJECT = 'object'
TOTAL_COST = 'total-cost' # TotalCost
TOTAL_TIME = 'total-time'
CONNECTIVES = (AND, OR, NOT, IMPLY)
QUANTIFIERS = (FORALL, EXISTS)
OBJECTIVES = (MINIMIZE, MAXIMIZE, INCREASE)
OPERATORS = CONNECTIVES + QUANTIFIERS + (WHEN,) # + OBJECTIVES
# TODO: OPTIMAL
SUCCEEDED = True
FAILED = None
INFEASIBLE = False
NOT_PLAN = [FAILED, INFEASIBLE]
# TODO: rename PDDLProblem
PDDLProblem = namedtuple('PDDLProblem', ['domain_pddl', 'constant_map',
'stream_pddl', 'stream_map', 'init', 'goal'])
Solution = namedtuple('Solution', ['plan', 'cost', 'certificate'])
Certificate = namedtuple('Certificate', ['all_facts', 'preimage_facts'])
OptPlan = namedtuple('OptPlan', ['action_plan', 'preimage_facts'])
# TODO: stream and axiom plans
# TODO: annotate which step each fact is first used via layer
Assignment = namedtuple('Assignment', ['args'])
Action = namedtuple('Action', ['name', 'args'])
DurativeAction = namedtuple('DurativeAction', ['name', 'args', 'start', 'duration'])
StreamAction = namedtuple('StreamAction', ['name', 'inputs', 'outputs'])
FunctionAction = namedtuple('FunctionAction', ['name', 'inputs'])
Head = namedtuple('Head', ['function', 'args'])
Evaluation = namedtuple('Evaluation', ['head', 'value'])
Atom = lambda head: Evaluation(head, True)
NegatedAtom = lambda head: Evaluation(head, False)
##################################################
def Output(*args):
return tuple(args)
def And(*expressions):
if len(expressions) == 1:
return expressions[0]
return (AND,) + tuple(expressions)
def Or(*expressions):
if len(expressions) == 1:
return expressions[0]
return (OR,) + tuple(expressions)
def Not(expression):
return (NOT, expression)
def Imply(expression1, expression2):
return (IMPLY, expression1, expression2)
def Equal(expression1, expression2):
return (EQ, expression1, expression2)
def Minimize(expression):
return (MINIMIZE, expression)
def Type(param, ty):
return (param, TYPE, ty)
def Exists(args, expression):
return (EXISTS, args, expression)
def ForAll(args, expression):
return (FORALL, args, expression)
##################################################
def get_prefix(expression):
return expression[0]
def get_args(head):
return head[1:]
def concatenate(*args):
output = []
for arg in args:
output.extend(arg)
return tuple(output)
def Fact(predicate, args):
return (predicate,) + tuple(args)
def is_parameter(expression):
return isinstance(expression, str) and expression.startswith(PARAMETER)
def get_parameter_name(expression):
if is_parameter(expression):
return expression[len(PARAMETER):]
return expression
def is_head(expression):
return get_prefix(expression) not in OPERATORS
##################################################
def is_plan(plan):
return not any(plan is status for status in NOT_PLAN)
def get_length(plan):
return len(plan) if is_plan(plan) else INF
def str_from_action(action):
name, args = action[:2]
return '{}{}'.format(name, str_from_object(tuple(args)))
def str_from_plan(plan):
if not is_plan(plan):
return str(plan)
return str_from_object(list(map(str_from_action, plan)))
def print_plan(plan):
if not is_plan(plan):
return
step = 1
for action in plan:
if isinstance(action, DurativeAction):
name, args, start, duration = action
print('{:.2f} - {:.2f}) {} {}'.format(start, start+duration, name,
' '.join(map(str_from_object, args))))
elif isinstance(action, Action):
name, args = action
print('{:2}) {} {}'.format(step, name, ' '.join(map(str_from_object, args))))
#print('{}) {}{}'.format(step, name, str_from_object(tuple(args))))
step += 1
elif isinstance(action, StreamAction):
name, inputs, outputs = action
print(' {}({})->({})'.format(name, ', '.join(map(str_from_object, inputs)),
', '.join(map(str_from_object, outputs))))
elif isinstance(action, FunctionAction):
name, inputs = action
print(' {}({})'.format(name, ', '.join(map(str_from_object, inputs))))
else:
raise NotImplementedError(action)
def print_solution(solution):
plan, cost, evaluations = solution
solved = is_plan(plan)
if plan is None:
num_deferred = 0
else:
num_deferred = len([action for action in plan if isinstance(action, StreamAction)
or isinstance(action, FunctionAction)])
print()
print('Solved: {}'.format(solved))
print('Cost: {:.3f}'.format(cost))
print('Length: {}'.format(get_length(plan) - num_deferred))
print('Deferred: {}'.format(num_deferred))
print('Evaluations: {}'.format(len(evaluations)))
print_plan(plan)
def get_function(term):
if get_prefix(term) in (EQ, MINIMIZE, NOT):
return term[1]
return term
def partition_facts(facts):
functions = []
negated = []
positive = []
for fact in facts:
prefix = get_prefix(fact)
func = get_function(fact)
if prefix in (EQ, MINIMIZE):
functions.append(func)
elif prefix == NOT:
negated.append(func)
else:
positive.append(func)
return positive, negated, functions
def is_cost(o):
return get_prefix(o) == MINIMIZE
def get_costs(objectives):
return [o for o in objectives if is_cost(o)]
def get_constraints(objectives):
return [o for o in objectives if not is_cost(o)]
##################################################
DOMAIN_FILE = 'domain.pddl'
PROBLEM_FILE = 'problem.pddl'
STREAM_FILE = 'stream.pddl'
PDDL_FILES = [DOMAIN_FILE, PROBLEM_FILE]
PDDLSTREAM_FILES = [DOMAIN_FILE, STREAM_FILE]
def read_relative(file, relative_path): # file=__file__
directory = os.path.dirname(file)
path = os.path.abspath(os.path.join(directory, relative_path))
return read(os.path.join(directory, path))
def read_relative_dir(file, relative_dir='./', filenames=[]):
return [read_relative(file, os.path.join(relative_dir, filename)) for filename in filenames]
def read_pddl_pair(file, **kwargs):
return read_relative_dir(file, filenames=PDDL_FILES, **kwargs)
def read_pddlstream_pair(file, **kwargs):
return read_relative_dir(file, filenames=PDDLSTREAM_FILES, **kwargs)
| 6,912 |
Python
| 25.898833 | 96 | 0.61849 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/exogenous.py
|
from collections import defaultdict
from itertools import count
from hsr_tamp.pddlstream.algorithms.common import add_fact, INTERNAL_EVALUATION
from hsr_tamp.pddlstream.algorithms.downward import make_predicate, add_predicate, make_action, make_axiom, get_fluents
from hsr_tamp.pddlstream.language.constants import Head, Evaluation, get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, \
is_atom, fact_from_evaluation, substitute_expression, objects_from_values
from hsr_tamp.pddlstream.language.external import get_domain_predicates
from hsr_tamp.pddlstream.language.generator import from_fn
from hsr_tamp.pddlstream.language.stream import Stream
EXOGENOUS_AXIOMS = True
REPLACE_STREAM = True
# TODO: timed initial literals
# TODO: can do this whole story within the focused algorithm as well
class FutureValue(object):
# TODO: use this instead of debug value?
_output_counts = defaultdict(count)
def __init__(self, stream, input_values, output_parameter):
self.stream = stream
self.input_values = input_values
self.output_parameter = output_parameter
self.index = next(self._output_counts[output_parameter])
# TODO: hash this?
def __repr__(self):
return '@{}{}'.format(self.output_parameter[1:], self.index)
class FutureStream(Stream):
def __init__(self, stream, static_domain, fluent_domain, static_certified):
prefix = 'future-' if REPLACE_STREAM else ''
stream_name = '{}{}'.format(prefix, stream.name)
self.original = stream
self.fluent_domain = tuple(fluent_domain)
super(FutureStream, self).__init__(stream_name, stream.gen_fn, stream.inputs, static_domain,
stream.outputs, static_certified, stream.info, stream.fluents)
@property
def pddl_name(self):
return self.original.pddl_name
def get_fluent_domain(result):
# TODO: add to the stream itself
if not isinstance(result.external, FutureStream):
return tuple()
return substitute_expression(result.external.fluent_domain, result.mapping)
##################################################
def create_static_stream(stream, evaluations, fluent_predicates, future_fn):
def static_fn(*input_values):
instance = stream.get_instance(objects_from_values(input_values))
if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()):
return None
return tuple(FutureValue(stream.name, input_values, o) for o in stream.outputs)
#opt_evaluations = None
def static_opt_gen_fn(*input_values):
instance = stream.get_instance(objects_from_values(input_values))
if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()):
return
for output_values in stream.opt_gen_fn(*input_values):
yield output_values
# TODO: need to replace regular opt_gen_fn to update opt_evaluations
# if I want to prevent switch from normal to static in opt
# Focused algorithm naturally biases against using future because of axiom layers
fluent_domain = list(filter(lambda a: get_prefix(a) in fluent_predicates, stream.domain))
static_domain = list(filter(lambda a: a not in fluent_domain, stream.domain))
new_domain = list(map(future_fn, static_domain))
stream_atom = ('{}-result'.format(stream.name),) + tuple(stream.inputs + stream.outputs)
new_certified = [stream_atom] + list(map(future_fn, stream.certified))
static_stream = FutureStream(stream, new_domain, fluent_domain, new_certified)
if REPLACE_STREAM:
static_stream.gen_fn = from_fn(static_fn)
static_stream.opt_gen_fn = static_opt_gen_fn
return static_stream
# def replace_gen_fn(stream):
# future_gen_fn = from_fn(lambda *args: tuple(FutureValue(stream.name, args, o) for o in stream.outputs))
# gen_fn = stream.gen_fn
# def new_gen_fn(*input_values):
# if any(isinstance(value, FutureValue) for value in input_values):
# return future_gen_fn(*input_values)
# return gen_fn(*input_values)
# stream.gen_fn = new_gen_fn
##################################################
def augment_evaluations(evaluations, future_map):
for evaluation in list(filter(is_atom, evaluations)):
name = evaluation.head.function
if name in future_map:
new_head = Head(future_map[name], evaluation.head.args)
new_evaluation = Evaluation(new_head, evaluation.value)
add_fact(evaluations, fact_from_evaluation(new_evaluation),
result=INTERNAL_EVALUATION, complexity=0)
def rename_atom(atom, mapping):
name = get_prefix(atom)
if name not in mapping:
return atom
return (mapping[name],) + get_args(atom)
def compile_to_exogenous_actions(evaluations, domain, streams):
# TODO: version of this that operates on fluents of length one?
# TODO: better instantiation when have full parameters
fluent_predicates = get_fluents(domain)
certified_predicates = {get_prefix(a) for s in streams for a in s.certified}
future_map = {p: 'f-{}'.format(p) for p in certified_predicates}
augment_evaluations(evaluations, future_map)
future_fn = lambda a: rename_atom(a, future_map)
new_streams = []
for stream in list(streams):
if not isinstance(stream, Stream):
raise NotImplementedError(stream)
# TODO: could also just have conditions asserting that one of the fluent conditions fails
new_streams.append(create_static_stream(stream, evaluations, fluent_predicates, future_fn))
stream_atom = new_streams[-1].certified[0]
add_predicate(domain, make_predicate(get_prefix(stream_atom), get_args(stream_atom)))
preconditions = [stream_atom] + list(stream.domain)
effort = 1 # TODO: use stream info
#effort = 1 if unit_cost else result.instance.get_effort()
#if effort == INF:
# continue
domain.actions.append(make_action(
name='call-{}'.format(stream.name),
parameters=get_args(stream_atom),
preconditions=preconditions,
effects=stream.certified,
cost=effort))
stream.certified = tuple(set(stream.certified) |
set(map(future_fn, stream.certified)))
if REPLACE_STREAM:
streams.extend(new_streams)
else:
streams[:] = new_streams
##################################################
def get_exogenous_predicates(domain, streams):
return list(get_fluents(domain) & get_domain_predicates(streams))
def replace_literals(replace_fn, expression, *args):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return expression # TODO: replace constants?
if isinstance(expression, pddl.conditions.JunctorCondition):
new_parts = [replace_literals(replace_fn, p, *args) for p in expression.parts]
return expression.__class__(new_parts)
if isinstance(expression, pddl.conditions.QuantifiedCondition):
new_parts = [replace_literals(replace_fn, p, *args) for p in expression.parts]
return expression.__class__(expression.parameters, new_parts)
if isinstance(expression, pddl.conditions.Literal):
return replace_fn(expression, *args)
raise ValueError(expression)
def replace_predicates(predicate_map, expression):
def replace_fn(literal):
new_predicate = predicate_map.get(literal.predicate, literal.predicate)
return literal.__class__(new_predicate, literal.args)
return replace_literals(replace_fn, expression)
##################################################
def compile_to_exogenous_axioms(evaluations, domain, streams):
# TODO: no attribute certified
# TODO: recover the streams that are required
import pddl
fluent_predicates = get_fluents(domain)
certified_predicates = {get_prefix(a) for s in streams for a in s.certified}
future_map = {p: 'f-{}'.format(p) for p in certified_predicates}
augment_evaluations(evaluations, future_map)
future_fn = lambda a: rename_atom(a, future_map)
derived_map = {p: 'd-{}'.format(p) for p in certified_predicates}
derived_fn = lambda a: rename_atom(a, derived_map)
# TODO: could prune streams that don't need this treatment
for action in domain.actions:
action.precondition = replace_predicates(derived_map, action.precondition)
for effect in action.effects:
assert(isinstance(effect, pddl.Effect))
effect.condition = replace_predicates(derived_map, effect.condition)
for axiom in domain.axioms:
axiom.condition = replace_predicates(derived_map, axiom.condition)
#fluent_predicates.update(certified_predicates)
new_streams = []
for stream in list(streams):
if not isinstance(stream, Stream):
raise NotImplementedError(stream)
new_streams.append(create_static_stream(stream, evaluations, fluent_predicates, future_fn))
stream_atom = new_streams[-1].certified[0]
add_predicate(domain, make_predicate(get_prefix(stream_atom), get_args(stream_atom)))
preconditions = [stream_atom] + list(map(derived_fn, stream.domain))
for certified_fact in stream.certified:
derived_fact = derived_fn(certified_fact)
external_params = get_args(derived_fact)
internal_params = tuple(p for p in (stream.inputs + stream.outputs)
if p not in get_args(derived_fact))
domain.axioms.extend([
make_axiom(
parameters=external_params,
preconditions=[certified_fact],
derived=derived_fact),
make_axiom(
parameters=external_params+internal_params,
preconditions=preconditions,
derived=derived_fact),
])
stream.certified = tuple(set(stream.certified) |
set(map(future_fn, stream.certified)))
if REPLACE_STREAM:
streams.extend(new_streams)
else:
streams[:] = new_streams
##################################################
def compile_to_exogenous(evaluations, domain, streams):
exogenous_predicates = get_exogenous_predicates(domain, streams)
if not exogenous_predicates:
return False
print('Warning! The following predicates are mentioned in both action effects '
'and stream domain conditions: {}'.format(exogenous_predicates))
if EXOGENOUS_AXIOMS:
compile_to_exogenous_axioms(evaluations, domain, streams)
else:
compile_to_exogenous_actions(evaluations, domain, streams)
return True
| 10,844 |
Python
| 45.745689 | 119 | 0.658982 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/statistics.py
|
from __future__ import print_function
import os
import pickle
from collections import Counter, namedtuple
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.utils import INF, read_pickle, ensure_dir, write_pickle, get_python_version
LOAD_STATISTICS = True
SAVE_STATISTICS = True
DATA_DIR = 'statistics/py{:d}/'
DEFAULT_SEARCH_OVERHEAD = 1e2 # TODO: update this over time
EPSILON = 1e-6
# Can also include the overhead to process skeletons
Stats = namedtuple('Stats', ['p_success', 'overhead'])
# TODO: ability to "burn in" streams by sampling artificially to get better estimates
def safe_ratio(numerator, denominator, undefined=None):
if denominator == 0:
return undefined
return float(numerator) / denominator
def geometric_cost(cost, p):
return safe_ratio(cost, p, undefined=INF)
def check_effort(effort, max_effort):
if max_effort is None:
return True
return effort < max_effort # Exclusive
def compute_plan_effort(stream_plan, **kwargs):
# TODO: compute effort in the delete relaxation way
if not is_plan(stream_plan):
return INF
if not stream_plan:
return 0
return sum(result.get_effort(**kwargs) for result in stream_plan)
##################################################
# TODO: write to a "local" folder containing temp, data2, data3, visualizations
def get_data_path(stream_name):
data_dir = DATA_DIR.format(get_python_version())
file_name = '{}.pkl'.format(stream_name)
return os.path.join(data_dir, file_name)
def load_data(pddl_name):
if not LOAD_STATISTICS:
return {}
filename = get_data_path(pddl_name)
if not os.path.exists(filename):
return {}
#try:
data = read_pickle(filename) # TODO: try/except
#except pickle.UnpicklingError:
#return {}
#print('Loaded:', filename)
return data
def load_stream_statistics(externals):
if not externals:
return
pddl_name = externals[0].pddl_name # TODO: ensure the same
# TODO: fresh restart flag
data = load_data(pddl_name)
for external in externals:
if external.name in data:
external.load_statistics(data[external.name])
##################################################
def dump_online_statistics(externals):
print('\nLocal External Statistics')
overall_calls = 0
overall_overhead = 0
for external in externals:
external.dump_online()
overall_calls += external.online_calls
overall_overhead += external.online_overhead
print('Overall calls: {} | Overall overhead: {:.3f}'.format(overall_calls, overall_overhead))
def dump_total_statistics(externals):
print('\nTotal External Statistics')
for external in externals:
external.dump_total()
# , external.get_effort()) #, data[external.name])
##################################################
def merge_data(external, previous_data):
# TODO: compute distribution of successes given feasible
# TODO: can estimate probability of success given feasible
# TODO: single tail hypothesis testing (probability that came from this distribution)
distribution = []
for instance in external.instances.values():
if instance.results_history:
# attempts = len(instance.results_history)
# successes = sum(map(bool, instance.results_history))
# print(instance, successes, attempts)
# TODO: also first attempt, first success
last_success = -1
for i, results in enumerate(instance.results_history):
if results:
distribution.append(i - last_success)
# successful = (0 <= last_success)
last_success = i
combined_distribution = previous_data.get('distribution', []) + distribution
# print(external, distribution)
# print(external, Counter(combined_distribution))
# TODO: count num failures as well
# Alternatively, keep metrics on the lower bound and use somehow
# Could assume that it is some other distribution beyond that point
return {
'calls': external.total_calls,
'overhead': external.total_overhead,
'successes': external.total_successes,
'distribution': combined_distribution,
}
# TODO: make an instance method
def write_stream_statistics(externals, verbose):
# TODO: estimate conditional to affecting history on skeleton
# TODO: estimate conditional to first & attempt and success
# TODO: relate to success for the full future plan
# TODO: Maximum Likelihood Exponential - average (biased in general)
if not externals:
return
if verbose:
#dump_online_statistics(externals)
dump_total_statistics(externals)
pddl_name = externals[0].pddl_name # TODO: ensure the same
previous_data = load_data(pddl_name)
data = {}
for external in externals:
if not hasattr(external, 'instances'):
continue # TODO: SynthesizerStreams
#total_calls = 0 # TODO: compute these values
previous_statistics = previous_data.get(external.name, {})
data[external.name] = merge_data(external, previous_statistics)
if not SAVE_STATISTICS:
return
filename = get_data_path(pddl_name)
ensure_dir(filename)
write_pickle(filename, data)
if verbose:
print('Wrote:', filename)
##################################################
def hash_object(evaluations, obj):
# TODO: hash an object by the DAG of streams that produced it
# Use this to more finely estimate the parameters of a stream
# Can marginalize over conditional information to recover the same overall statistics
# Can also apply this directly to domain facts
raise NotImplementedError()
##################################################
class PerformanceInfo(object):
def __init__(self, p_success=1-EPSILON, overhead=EPSILON, effort=None, estimate=False):
# TODO: make info just a dict
self.estimate = estimate
if self.estimate:
p_success = overhead = effort = None
if p_success is not None:
assert 0. <= p_success <= 1.
if overhead is not None:
assert 0. <= overhead
#if effort is not None:
# assert 0 <= effort
self.p_success = p_success
self.overhead = overhead
self.effort = effort
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, repr(self.__dict__))
class Performance(object):
def __init__(self, name, info):
self.name = name.lower()
self.info = info
self.initial_calls = 0
self.initial_overhead = 0.
self.initial_successes = 0
# TODO: online learning vs offline learning
self.online_calls = 0
self.online_overhead = 0.
self.online_successes = 0
@property
def total_calls(self):
return self.initial_calls + self.online_calls
@property
def total_overhead(self):
return self.initial_overhead + self.online_overhead
@property
def total_successes(self):
return self.initial_successes + self.online_successes
def load_statistics(self, statistics):
self.initial_calls = statistics['calls']
self.initial_overhead = statistics['overhead']
self.initial_successes = statistics['successes']
def update_statistics(self, overhead, success):
self.online_calls += 1
self.online_overhead += overhead
self.online_successes += success
def _estimate_p_success(self, reg_p_success=1., reg_calls=1):
# TODO: use prior from info instead?
return safe_ratio(self.total_successes + reg_p_success * reg_calls,
self.total_calls + reg_calls,
undefined=reg_p_success)
def _estimate_overhead(self, reg_overhead=1e-6, reg_calls=1):
# TODO: use prior from info instead?
return safe_ratio(self.total_overhead + reg_overhead * reg_calls,
self.total_calls + reg_calls,
undefined=reg_overhead)
def get_p_success(self):
# TODO: could precompute and store
if self.info.p_success is None:
return self._estimate_p_success()
return self.info.p_success
def get_overhead(self):
if self.info.overhead is None:
return self._estimate_overhead()
return self.info.overhead
def could_succeed(self):
return self.get_p_success() > 0
def _estimate_effort(self, search_overhead=DEFAULT_SEARCH_OVERHEAD):
p_success = self.get_p_success()
return geometric_cost(self.get_overhead(), p_success) + \
(1 - p_success) * geometric_cost(search_overhead, p_success)
def get_effort(self, **kwargs):
if self.info.effort is None:
return self._estimate_effort(**kwargs)
elif callable(self.info.effort):
return 0 # This really is a bound on the effort
return self.info.effort
def get_statistics(self, negate=False): # negate=True is for the "worst-case" ordering
sign = -1 if negate else +1
return Stats(p_success=self.get_p_success(), overhead=sign * self.get_overhead())
def dump_total(self):
print('External: {} | n: {:d} | p_success: {:.3f} | overhead: {:.3f}'.format(
self.name, self.total_calls, self._estimate_p_success(), self._estimate_overhead()))
def dump_online(self):
if not self.online_calls:
return
print('External: {} | n: {:d} | p_success: {:.3f} | mean overhead: {:.3f} | overhead: {:.3f}'.format(
self.name, self.online_calls,
safe_ratio(self.online_successes, self.online_calls),
safe_ratio(self.online_overhead, self.online_calls),
self.online_overhead))
##################################################
# TODO: cannot easily do Bayesian hypothesis testing because might never receive ground truth when empty
# In some cases, the stream does finish though
# Estimate probability that will generate result
# Need to produce belief that has additional samples
# P(Success | Samples) = estimated parameter
# P(Success | ~Samples) = 0
# T(Samples | ~Samples) = 0
# T(~Samples | Samples) = 1-p
# TODO: estimate a parameter conditioned on successful streams?
# Need a transition fn as well because generating a sample might change state
# Problem with estimating prior. Don't always have data on failed streams
# Goal: estimate P(Success | History)
# P(Success | History) = P(Success | Samples) * P(Samples | History)
# Previously in Instance
# def get_belief(self):
# #return 1.
# #prior = self.external.prior
# prior = 1. - 1e-2
# n = self.num_calls
# p_obs_given_state = self.external.get_p_success()
# p_state = prior
# for i in range(n):
# p_nobs_and_state = (1-p_obs_given_state)*p_state
# p_nobs_and_nstate = (1-p_state)
# p_nobs = p_nobs_and_state + p_nobs_and_nstate
# p_state = p_nobs_and_state/p_nobs
# return p_state
# def update_belief(self, success):
# # Belief that remaining sequence is non-empty
# # Belief only degrades in this case
# nonempty = 0.9
# p_success_nonempty = 0.5
# if success:
# p_success = p_success_nonempty*nonempty
# else:
# p_success = (1-p_success_nonempty)*nonempty + (1-nonempty)
#def get_p_success(self):
#p_success_belief = self.external.get_p_success()
#belief = self.get_belief()
#return p_success_belief*belief
# TODO: use the external as a prior
# TODO: Bayesian estimation of likelihood that has result
# Model hidden state of whether has values or if will produce values?
# TODO: direct estimation of different buckets in which it will finish
# TODO: we have samples from the CDF or something
#def get_p_success(self):
# return self.external.get_p_success()
#
#def get_overhead(self):
# return self.external.get_overhead()
| 12,081 |
Python
| 37.113565 | 109 | 0.634716 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/attachments.py
|
import os
import sys
import copy
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
from hsr_tamp.pddlstream.algorithms.downward import get_literals, get_conjunctive_parts, fd_from_fact, EQ, make_object, \
pddl_from_instance, DEFAULT_MAX_TIME, get_cost_scale
from hsr_tamp.pddlstream.language.object import Object
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl, substitute_fact
from hsr_tamp.pddlstream.language.fluent import get_predicate_map, remap_certified
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import INF, invert_dict, get_mapping, safe_zip
# Intuition: static facts about whether this state satisfies a condition
# The state can be seen as a hidden parameter with a precondition that you are at it
# TODO: refactor to algorithms
PYPLANNERS_VAR = 'PYPLANNERS_PATH'
PLACEHOLDER_OBJ = Object.from_value('~')
DEFAULT_PYPLANNER = {
'search': 'eager',
'evaluator': 'greedy',
'heuristic': 'ff',
'successors': 'all',
}
def get_pyplanners_path():
return os.environ.get(PYPLANNERS_VAR, None)
def has_attachments(domain):
return any(getattr(action, 'attachments', {}) for action in domain.actions)
##################################################
def compile_fluents_as_attachments(domain, externals):
state_streams = set(filter(lambda e: isinstance(e, Stream) and e.is_fluent, externals)) # TODO: is_negated/is_special
if not state_streams:
return externals
predicate_map = get_predicate_map(state_streams)
if predicate_map and (get_pyplanners_path() is None):
# TODO: fluent streams with outputs
# Could convert the free parameter to a constant
raise NotImplementedError('Algorithm does not support fluent streams: {}'.format(
[stream.name for stream in state_streams]))
import pddl
domain.constants.append(make_object(PLACEHOLDER_OBJ.pddl))
for action in domain.actions:
for effect in action.effects:
# TODO: conditional effects
if any(literal.predicate in predicate_map for literal in get_literals(effect.condition)):
raise ValueError('Attachments cannot be in action effects: {}'.format(effect))
action.attachments = {}
preconditions = set()
for literal in get_conjunctive_parts(action.precondition):
#if not isinstance(literal, pddl.Literal):
# raise NotImplementedError('Only literals are supported: {}'.format(literal))
if not get_predicates(literal) & set(predicate_map):
preconditions.add(literal)
continue
if not isinstance(literal, pddl.Literal):
raise NotImplementedError(literal)
# Drops the original precondition
stream = predicate_map[literal.predicate]
mapping = remap_certified(literal, stream)
assert mapping is not None
action.attachments[literal] = stream
preconditions.update(pddl.Atom(EQ, (mapping[out], PLACEHOLDER_OBJ.pddl))
for out in stream.outputs)
preconditions.update(fd_from_fact(substitute_fact(fact, mapping))
for fact in stream.domain)
action.precondition = pddl.Conjunction(preconditions).simplified()
#fn = lambda l: pddl.Truth() if l.predicate in predicate_map else l
#action.precondition = replace_literals(fn, action.precondition).simplified()
#action.dump()
return [external for external in externals if external not in state_streams]
##################################################
def get_attachment_test(action_instance):
from hsr_tamp.pddlstream.algorithms.scheduling.apply_fluents import get_fluent_instance
from hsr_tamp.pddlstream.language.fluent import remap_certified
# TODO: support for focused (need to resolve after binding)
# TODO: ensure no OptimisticObjects
fd_action_from_state = {}
def test(state):
if state in fd_action_from_state:
return True
#new_instance = action_instance
new_instance = copy.deepcopy(action_instance)
if not hasattr(action_instance.action, 'attachments'):
fd_action_from_state[state] = new_instance
return True
for literal, stream in new_instance.action.attachments.items():
param_from_inp = remap_certified(literal, stream)
input_objects = tuple(obj_from_pddl(
new_instance.var_mapping[param_from_inp[inp]]) for inp in stream.inputs)
stream_instance = get_fluent_instance(stream, input_objects, state) # Output automatically cached
results = stream_instance.first_results(num=1)
#results = stream_instance.all_results()
failure = not results
if literal.negated != failure:
return False
#args = action_instance.name.strip('()').split(' ')
#idx_from_param = {p.name: i for i, p in enumerate(action_instance.action.parameters)}
param_from_out = remap_certified(literal, stream)
result = results[0] # Arbitrary
out_from_obj = invert_dict(result.mapping)
for obj in result.output_objects:
param = param_from_out[out_from_obj[obj]]
new_instance.var_mapping[param] = obj.pddl
# idx = idx_from_param[param]
# args[1+idx] = obj.pddl
#action_instance.name = '({})'.format(' '.join(args))
fd_action_from_state[state] = new_instance
return True
return test, fd_action_from_state
def solve_pyplanners(instantiated, planner=None, max_planner_time=DEFAULT_MAX_TIME, max_cost=INF):
if instantiated is None:
return None, INF
# https://github.mit.edu/caelan/stripstream/blob/c8c6cd1d6bd5e2e8e31cd5603e28a8e0d7bb2cdc/stripstream/algorithms/search/pyplanners.py
pyplanners_path = get_pyplanners_path()
if pyplanners_path is None:
raise RuntimeError('Must clone https://github.com/caelan/pyplanners '
'and set the environment variable {} to its path'.format(PYPLANNERS_VAR))
if pyplanners_path not in sys.path:
sys.path.append(pyplanners_path)
# TODO: could operate on translated SAS instead
from strips.states import State, PartialState
from strips.operators import Action, Axiom
from strips.utils import solve_strips, default_derived_plan
import pddl
# TODO: PLUSONE costs
pyplanner = dict(DEFAULT_PYPLANNER)
if isinstance(planner, dict):
pyplanner.update(planner)
fd_action_from_py_action = {}
py_actions = []
for action in instantiated.actions:
#action.dump()
py_action = Action({'fd_action': action})
py_action.conditions = set(action.precondition)
py_action.effects = set()
for condition, effect in action.del_effects:
assert not condition
py_action.effects.add(effect.negate())
for condition, effect in action.add_effects:
assert not condition
py_action.effects.add(effect)
py_action.cost = action.cost
py_action.test, fd_action_from_py_action[py_action] = get_attachment_test(action)
py_actions.append(py_action)
py_axioms = []
for axiom in instantiated.axioms:
#axiom.dump()
py_axiom = Axiom({'fd_axiom_id': id(axiom)}) # Not hashable for some reason
py_axiom.conditions = set(axiom.condition)
py_axiom.effects = {axiom.effect}
py_axioms.append(py_axiom)
goal = PartialState(instantiated.goal_list)
fluents = {f.positive() for f in goal.conditions}
for py_operator in py_actions + py_axioms:
fluents.update(f.positive() for f in py_operator.conditions)
initial = State(atom for atom in instantiated.task.init
if isinstance(atom, pddl.Atom) and (atom in fluents))
plan, state_space = solve_strips(initial, goal, py_actions, py_axioms,
max_time=max_planner_time, max_cost=max_cost, **pyplanner)
if plan is None:
return None, INF
#fd_plan = [action.fd_action for action in plan.operators]
states = plan.get_states() # get_states | get_derived_states
fd_plan = [fd_action_from_py_action[action][state] for state, action in safe_zip(states[:-1], plan.operators)]
actions = [pddl_from_instance(action) for action in fd_plan]
#print(actions)
cost = plan.cost / get_cost_scale()
return actions, cost
| 8,628 |
Python
| 43.479381 | 137 | 0.653106 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/external.py
|
from collections import Counter
from hsr_tamp.pddlstream.algorithms.common import compute_complexity
from hsr_tamp.pddlstream.language.constants import get_args, is_parameter, get_prefix, Fact
from hsr_tamp.pddlstream.language.conversion import values_from_objects, substitute_fact, obj_from_value_expression
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.statistics import Performance, PerformanceInfo, DEFAULT_SEARCH_OVERHEAD, Stats
from hsr_tamp.pddlstream.utils import elapsed_time, get_mapping, flatten, INF, safe_apply_mapping, Score, INF
DEBUG = 'debug'
SHARED_DEBUG = 'shared_debug'
DEBUG_MODES = [DEBUG, SHARED_DEBUG]
never_defer = lambda *args, **kwargs: False
defer_unique = lambda result, *args, **kwargs: result.is_refined()
defer_shared = lambda *args, **kwargs: True
def select_inputs(instance, inputs):
external = instance.external
assert set(inputs) <= set(external.inputs)
mapping = get_mapping(external.inputs, instance.input_objects)
return safe_apply_mapping(inputs, mapping)
def get_defer_any_unbound(unique=False):
def defer_any_unbound(result, bound_objects=set(), *args, **kwargs):
# The set bound_objects may contain shared objects in which case replanning is required
if unique and not defer_unique(result):
return False
return not all(isinstance(obj, Object) or (obj in bound_objects) for obj in result.input_objects)
return defer_any_unbound
def get_defer_all_unbound(inputs='', unique=False): # TODO: shortcut for all inputs
inputs = tuple(inputs.split())
# Empty implies defer_shared
def defer_all_unbound(result, bound_objects=set(), *args, **kwargs):
if unique and not defer_unique(result):
return False
return not any(isinstance(obj, Object) or (obj in bound_objects)
for obj in select_inputs(result.instance, inputs))
return defer_all_unbound
def get_domain_predicates(streams):
return {get_prefix(a) for s in streams for a in s.domain}
def convert_constants(fact):
# TODO: take the constant map as an input
# TODO: throw an error if undefined
return Fact(get_prefix(fact), [p if is_parameter(p) else Object.from_name(p) for p in get_args(fact)])
##################################################
class ExternalInfo(PerformanceInfo):
def __init__(self, eager=False, eager_skeleton=False, defer_fn=never_defer, **kwargs):
super(ExternalInfo, self).__init__(**kwargs)
# TODO: enable eager=True for inexpensive test streams by default
# TODO: change p_success and overhead if it's a function or test stream
self.eager = eager
self.eager_skeleton = eager_skeleton # TODO: apply in binding and adaptive
# TODO: automatically set tests and costs to be eager
self.defer_fn = defer_fn # Old syntax was defer=True
#self.complexity_fn = complexity_fn
##################################################
class Result(object):
def __init__(self, instance, opt_index, call_index, optimistic):
self.instance = instance
self.opt_index = opt_index
self.call_index = call_index
self.optimistic = optimistic
@property
def external(self):
return self.instance.external
@property
def info(self):
return self.external.info
@property
def name(self):
return self.external.name
@property
def input_objects(self):
return self.instance.input_objects
@property
def domain(self):
return self.instance.domain
def is_refined(self):
# TODO: result.opt_index is None
return self.opt_index == 0 # TODO: base on output objects instead
def is_deferrable(self, *args, **kwargs):
return self.info.defer_fn(self, *args, **kwargs)
def get_domain(self):
return self.instance.get_domain()
def get_certified(self):
raise NotImplementedError()
def get_components(self):
return [self]
def get_unsatisfiable(self):
return [self.get_components()]
def get_action(self):
raise NotImplementedError()
def remap_inputs(self, bindings):
raise NotImplementedError()
def is_successful(self):
raise NotImplementedError()
def compute_complexity(self, evaluations, **kwargs):
# Should be constant
return compute_complexity(evaluations, self.get_domain(), **kwargs) + \
self.external.get_complexity(self.call_index)
def get_effort(self, **kwargs):
if not self.optimistic:
return 0 # Unit efforts?
if self.external.is_negated:
return 0
# TODO: this should be the min of all instances
return self.instance.get_effort(**kwargs)
def success_heuristic(self): # High is likely to succeed
# self.external.is_function
num_free = sum(isinstance(obj, OptimisticObject) for obj in self.input_objects)
return Score(num_free, -len(self.external.inputs)) # TODO: treat objects in the same domain as a unit
def overhead_heuristic(self): # Low is cheap
return self.external.overhead_heuristic()
def stats_heuristic(self): # Low is cheap and unlikely to succeed
#return self.overhead_heuristic() + self.success_heuristic()
return Score(self.overhead_heuristic(), self.success_heuristic())
#return Stats(self.overhead_heuristic(), self.success_heuristic())
def effort_heuristic(self): # Low is cheap and likely to succeed
return Score(self.overhead_heuristic(), -self.success_heuristic())
##################################################
class Instance(object):
_Result = None
def __init__(self, external, input_objects):
self.external = external
self.input_objects = tuple(input_objects)
self.disabled = False # TODO: perform disabled using complexity
self.history = [] # TODO: facts history
self.results_history = []
self._mapping = None
self._domain = None
self.reset()
@property
def info(self):
return self.external.info
@property
def mapping(self):
if self._mapping is None:
self._mapping = get_mapping(self.external.inputs, self.input_objects)
#for constant in self.external.constants: # TODO: no longer needed
# self._mapping[constant] = Object.from_name(constant)
return self._mapping
@property
def domain(self):
if self._domain is None:
#self._domain = substitute_expression(self.external.domain, self.mapping)
self._domain = tuple(substitute_fact(atom, self.mapping)
for atom in self.external.domain)
return self._domain
def get_iteration(self):
return INF if self.enumerated else self.num_calls
def get_domain(self):
return self.domain
def get_all_input_objects(self):
return set(self.input_objects)
def get_input_values(self):
return values_from_objects(self.input_objects)
#def is_first_call(self): # TODO: use in streams
# return self.online_calls == 0
#def has_previous_success(self):
# return self.online_success != 0
def reset(self):
#self.enable(evaluations={}, domain=None)
self.disabled = False
self.opt_index = self.external.num_opt_fns
self.num_calls = 0
self.enumerated = False
self.successful = False
def is_refined(self):
return self.opt_index == 0
def refine(self):
# TODO: could instead create a new Instance per opt_index
if not self.is_refined():
self.opt_index -= 1
return self.opt_index
def next_results(self, verbose=False):
raise NotImplementedError()
def first_results(self, num=1, **kwargs):
results = []
index = 0
while len(results) < num:
while index >= len(self.results_history):
if self.enumerated:
return results
self.next_results(**kwargs)
results.extend(self.results_history[index])
index += 1
return results
def all_results(self, **kwargs):
return self.first_results(num=INF, **kwargs)
def get_results(self, start=0):
results = []
for index in range(start, self.num_calls):
results.extend(self.results_history[index])
return results
def compute_complexity(self, evaluations, **kwargs):
# Will change as self.num_calls increases
#num_calls = INF if self.enumerated else self.num_calls
return compute_complexity(evaluations, self.get_domain(), **kwargs) + \
self.external.get_complexity(self.num_calls)
def get_effort(self, search_overhead=DEFAULT_SEARCH_OVERHEAD):
# TODO: handle case where resampled several times before the next search (search every ith time)
replan_effort = self.opt_index * search_overhead # By linearity of expectation
effort_fn = self.external.info.effort
if callable(effort_fn):
return replan_effort + effort_fn(*self.get_input_values())
return replan_effort + self.external.get_effort(search_overhead=search_overhead)
def update_statistics(self, start_time, results):
overhead = elapsed_time(start_time)
successes = sum(r.is_successful() for r in results)
self.external.update_statistics(overhead, bool(successes))
self.results_history.append(results)
#self.successes += successes
def disable(self, evaluations, domain):
self.disabled = True
def enable(self, evaluations, domain):
self.disabled = False
##################################################
class External(Performance):
_Instance = None
def __init__(self, name, info, inputs, domain):
super(External, self).__init__(name, info)
self.inputs = tuple(inputs)
self.domain = tuple(map(convert_constants, domain))
for p, c in Counter(self.inputs).items():
if not is_parameter(p):
# AssertionError: Expected item to be a variable: q2 in (?q1 q2)
raise ValueError('Input [{}] for stream [{}] is not a parameter'.format(p, name))
if c != 1:
raise ValueError('Input [{}] for stream [{}] is not unique'.format(p, name))
parameters = {a for i in self.domain for a in get_args(i) if is_parameter(a)}
for p in (parameters - set(self.inputs)):
raise ValueError('Parameter [{}] for stream [{}] is not included within inputs'.format(p, name))
for p in (set(self.inputs) - parameters):
print('Warning! Input [{}] for stream [{}] is not covered by a domain condition'.format(p, name))
self.constants = {a for i in self.domain for a in get_args(i) if not is_parameter(a)}
self.instances = {}
def reset(self, *args, **kwargs):
for instance in self.instances.values():
instance.reset(*args, **kwargs)
# TODO: naming convention for statics and fluents
@property
def has_outputs(self):
raise NotImplementedError()
@property
def is_fluent(self):
raise NotImplementedError()
@property
def is_negated(self):
raise NotImplementedError()
@property
def is_special(self):
return self.is_fluent or self.is_negated
@property
def is_function(self):
raise NotImplementedError()
@property
def is_cost(self):
return False
@property
def zero_complexity(self):
return self.is_special or not self.has_outputs
def get_complexity(self, num_calls=0):
if self.zero_complexity:
return 0
return num_calls + 1
def get_instance(self, input_objects):
input_objects = tuple(input_objects)
assert len(input_objects) == len(self.inputs)
if input_objects not in self.instances:
self.instances[input_objects] = self._Instance(self, input_objects)
return self.instances[input_objects]
def overhead_heuristic(self): # Low is little overhead
# TODO: infer other properties from use in the context of a stream plan
# TODO: use num_certified (only those that are in another stream) instead of num_outputs?
#num_inputs = len(self.inputs)
#num_domain = len(self.domain)
return Score(self.is_fluent, not self.is_function, self.has_outputs, len(self.inputs)) # structural/relational overhead
#overhead = 1e0*num_inputs + 1e1*num_outputs + 1e2*bool(num_fluents)
#return overhead
##################################################
def get_procedure_fn(stream_map, name):
if not isinstance(stream_map, dict): # DEBUG_MODES
return stream_map
if name not in stream_map:
raise ValueError('Undefined external procedure: {}'.format(name))
return stream_map[name]
def is_attribute(attribute):
return isinstance(attribute, str) and attribute.startswith(':')
def parse_lisp_list(lisp_list):
attributes = [lisp_list[i] for i in range(0, len(lisp_list), 2)]
for attribute in attributes:
if not is_attribute(attribute):
raise ValueError('Expected an attribute but got: {}'.format(attribute))
values = [lisp_list[i] for i in range(1, len(lisp_list), 2)]
if len(lisp_list) % 2 != 0:
raise ValueError('No value specified for attribute [{}]'.format(lisp_list[-1]))
return get_mapping(attributes, values)
| 13,621 |
Python
| 41.04321 | 127 | 0.639601 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/write_pddl.py
|
import re
import math
from hsr_tamp.pddlstream.language.constants import AND, OR, OBJECT, TOTAL_COST, TOTAL_TIME, is_cost, get_prefix, \
CONNECTIVES, QUANTIFIERS
from hsr_tamp.pddlstream.language.conversion import pddl_from_object, is_atom, is_negated_atom, objects_from_evaluations
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
DEFAULT_TYPE = OBJECT # number
def pddl_parameter(param):
return '{} - {}'.format(param, DEFAULT_TYPE)
#return param
def pddl_parameters(parameters):
return ' '.join(map(pddl_parameter, parameters))
def pddl_head(name, args):
return '({})'.format(' '.join([name] + list(map(pddl_from_object, args))))
def pddl_from_evaluation(evaluation):
#if evaluation.head.function == TOTAL_COST:
# return None
head = pddl_head(evaluation.head.function, evaluation.head.args)
if is_atom(evaluation):
return head
elif is_negated_atom(evaluation):
return '(not {})'.format(head)
#value = int(evaluation.value)
value = evaluation.value # floats are fine for temporal planners
#value = int(math.ceil(evaluation.value))
return '(= {} {})'.format(head, value)
def pddl_functions(predicates):
return '\n\t\t'.join(sorted(p.pddl() for p in predicates))
def pddl_connective(literals, connective):
if not literals:
return '()'
if len(literals) == 1:
return literals[0].pddl()
return '({} {})'.format(connective, ' '.join(l.pddl() for l in literals))
def pddl_conjunction(literals):
return pddl_connective(literals, AND)
def pddl_disjunction(literals):
return pddl_connective(literals, OR)
def pddl_from_expression(expression):
if isinstance(expression, Object) or isinstance(expression, OptimisticObject):
return pddl_from_object(expression)
if isinstance(expression, str):
return expression
return '({})'.format(' '.join(map(pddl_from_expression, expression)))
##################################################
def pddl_problem(problem, domain, evaluations, goal_expression, objective=None):
objects = objects_from_evaluations(evaluations)
s = '(define (problem {})\n' \
'\t(:domain {})\n' \
'\t(:objects {})\n' \
'\t(:init \n\t\t{})\n' \
'\t(:goal {})'.format(
problem, domain,
' '.join(sorted(map(pddl_from_object, objects))), # map(pddl_parameter,
'\n\t\t'.join(sorted(filter(lambda p: p is not None,
map(pddl_from_evaluation, evaluations)))),
pddl_from_expression(goal_expression))
if objective is not None:
s += '\n\t(:metric minimize ({}))'.format(objective)
return s + ')\n'
def get_problem_pddl(evaluations, goal_exp, domain_pddl, temporal=True):
[domain_name] = re.findall(r'\(domain ([^ ]+)\)', domain_pddl)
problem_name = domain_name
objective = TOTAL_TIME if temporal else TOTAL_COST
problem_pddl = pddl_problem(domain_name, problem_name, evaluations, goal_exp, objective=objective)
#write_pddl(domain_pddl, problem_pddl, TEMP_DIR)
return problem_pddl
| 3,115 |
Python
| 37 | 120 | 0.649117 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/generator.py
|
import time
from collections import Iterator, namedtuple, deque
from itertools import count
from hsr_tamp.pddlstream.utils import INF, elapsed_time
# TODO: indicate wild stream output just from the output form
# TODO: depth limited and cycle-free optimistic objects
class BoundedGenerator(Iterator):
"""
A generator with a fixed length.
The generator tracks its number of calls, allowing it to terminate with one fewer call
"""
def __init__(self, generator, max_calls=INF):
self.generator = generator
self.max_calls = max_calls
self.stopped = False
self.history = []
@property
def calls(self):
return len(self.history)
@property
def enumerated(self):
return self.stopped or (self.max_calls <= self.calls)
def next(self):
if self.enumerated:
raise StopIteration()
try:
self.history.append(next(self.generator))
except StopIteration:
self.stopped = True
raise StopIteration()
return self.history[-1]
__next__ = next
def get_next(generator, default=[]):
new_values = default
enumerated = False
try:
new_values = next(generator)
except StopIteration:
enumerated = True
if isinstance(generator, BoundedGenerator):
enumerated |= generator.enumerated
return new_values, enumerated
##################################################
# Methods that convert some procedure -> function to a generator of lists
def from_list_gen_fn(list_gen_fn):
# Purposefully redundant for now
return list_gen_fn
def from_gen_fn(gen_fn):
return from_list_gen_fn(lambda *args, **kwargs: ([] if ov is None else [ov]
for ov in gen_fn(*args, **kwargs)))
def from_sampler(sampler, max_attempts=INF):
def gen_fn(*input_values):
attempts = count()
while next(attempts) < max_attempts:
yield sampler(*input_values)
return from_gen_fn(gen_fn)
##################################################
# Methods that convert some procedure -> function to a BoundedGenerator
def from_list_fn(list_fn):
#return lambda *args, **kwargs: iter([list_fn(*args, **kwargs)])
return lambda *args, **kwargs: BoundedGenerator(iter([list_fn(*args, **kwargs)]), max_calls=1)
def from_fn(fn):
def list_fn(*args, **kwargs):
outputs = fn(*args, **kwargs)
return [] if outputs is None else [outputs]
return from_list_fn(list_fn)
def outputs_from_boolean(boolean):
return tuple() if boolean else None
def from_test(test):
return from_fn(lambda *args, **kwargs: outputs_from_boolean(test(*args, **kwargs)))
def from_constant(constant):
return from_fn(fn_from_constant(constant))
def negate_test(test):
return lambda *args, **kwargs: not test(*args, **kwargs)
def from_gen(gen):
return from_gen_fn(lambda *args, **kwargs: iter(gen))
def empty_gen():
return from_gen([])
##################################################
# Methods that convert some procedure -> function
def fn_from_constant(constant):
return lambda *args, **kwargs: constant
universe_test = fn_from_constant(True)
empty_test = fn_from_constant(False)
##################################################
def accelerate_list_gen_fn(list_gen_fn, num_elements=1, max_attempts=1, max_time=INF):
"""
Accelerates a list_gen_fn by eagerly generating num_elements at a time if possible
"""
def new_list_gen_fn(*inputs):
generator = list_gen_fn(*inputs)
terminated = False
while not terminated:
start_time = time.time()
elements = []
for i in range(max_attempts):
if terminated or (num_elements <= len(elements)) or (max_time <= elapsed_time(start_time)):
break
new_elements, terminated = get_next(generator)
elements.extend(new_elements)
yield elements
return new_list_gen_fn
##################################################
Composed = namedtuple('Composed', ['outputs', 'step', 'generator'])
def compose_gen_fns(*gen_fns):
assert gen_fns
# Assumes consistent ordering of inputs/outputs
# Samplers are a special case where only the first needs to be a generator
# TODO: specify info about what to compose
# TODO: alternatively, make a new stream that composes several
def gen_fn(*inputs):
queue = deque([Composed([], 0, gen_fns[0](*inputs))])
while queue:
composed = queue.popleft()
new_outputs_list, terminated = get_next(composed.generator)
for new_outputs in new_outputs_list:
outputs = composed.outputs + new_outputs
if composed.step == (len(gen_fns) - 1):
yield outputs
else:
next_step = composed.step + 1
generator = gen_fns[next_step](*(inputs + composed.output_values))
queue.append(Composed(outputs, next_step, generator))
if not new_outputs_list:
yield None
if not terminated:
queue.append(composed)
return gen_fn
def wild_gen_fn_from_gen_fn(gen_fn):
def wild_gen_fn(*args, **kwargs):
for output_list in gen_fn(*args, **kwargs):
fact_list = []
yield output_list, fact_list
return wild_gen_fn
def gen_fn_from_wild_gen_fn(wild_gen_fn):
def gen_fn(*args, **kwargs):
for output_list, _ in wild_gen_fn(*args, **kwargs):
yield output_list
return wild_gen_fn
| 5,672 |
Python
| 30.17033 | 107 | 0.592031 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/conversion.py
|
from __future__ import print_function
import collections
from itertools import product
from hsr_tamp.pddlstream.language.constants import EQ, AND, OR, NOT, CONNECTIVES, QUANTIFIERS, OPERATORS, OBJECTIVES, \
Head, Evaluation, get_prefix, get_args, is_parameter, is_plan, Fact, Not, Equal, Action, StreamAction, \
FunctionAction, DurativeAction, Solution, Assignment, OptPlan, Certificate
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import str_from_object, apply_mapping
def replace_expression(parent, fn):
prefix = get_prefix(parent)
if prefix == EQ:
assert(len(parent) == 3)
value = parent[2]
if isinstance(parent[2], collections.Sequence):
value = replace_expression(value, fn)
return prefix, replace_expression(parent[1], fn), value
elif prefix in (CONNECTIVES + OBJECTIVES):
children = parent[1:]
return (prefix,) + tuple(replace_expression(child, fn) for child in children)
elif prefix in QUANTIFIERS:
assert(len(parent) == 3)
parameters = parent[1]
child = parent[2]
return prefix, parameters, replace_expression(child, fn)
name = get_prefix(parent).lower()
args = get_args(parent)
return Fact(name, map(fn, args))
def obj_from_value_expression(parent):
return replace_expression(parent, lambda o: o if is_parameter(o) else Object.from_value(o))
def value_from_obj_expression(parent):
return replace_expression(parent, lambda o: o.value)
def value_from_evaluation(evaluation):
return value_from_obj_expression(fact_from_evaluation(evaluation))
##################################################
def get_formula_operators(formula):
if formula is None:
return set()
prefix = get_prefix(formula)
if prefix not in OPERATORS:
return set()
operators = {prefix}
for subformula in formula[1:]:
operators.update(get_formula_operators(subformula))
return operators
def dnf_from_positive_formula(parent):
if parent is None:
return []
prefix = get_prefix(parent)
assert(prefix not in (QUANTIFIERS + (NOT, EQ))) # also check if atom?
children = []
if prefix == AND:
for combo in product(*(dnf_from_positive_formula(child) for child in parent[1:])):
children.append([fact for clause in combo for fact in clause])
elif prefix == OR:
for child in parent[1:]:
children.extend(dnf_from_positive_formula(child))
else:
# TODO: IMPLY
children.append([tuple(parent)])
return children
def list_from_conjunction(parent):
if parent is None:
return []
clauses = dnf_from_positive_formula(parent)
if not clauses:
return clauses
if len(clauses) >= 2:
raise ValueError('Formula {} has more than one conjunctive clauses'.format(parent))
return clauses[0]
def substitute_expression(parent, mapping):
if any(isinstance(parent, Class) for Class in [str, Object, OptimisticObject]):
return mapping.get(parent, parent)
return tuple(substitute_expression(child, mapping) for child in parent)
def substitute_fact(fact, mapping):
return Fact(get_prefix(fact), apply_mapping(get_args(fact), mapping))
##################################################
def pddl_from_object(obj):
if isinstance(obj, str):
return obj
return obj.pddl
def pddl_list_from_expression(tree):
if isinstance(tree, Object) or isinstance(tree, OptimisticObject):
return pddl_from_object(tree)
if isinstance(tree, str):
return tree
return tuple(map(pddl_list_from_expression, tree))
##################################################
def is_atom(evaluation):
return evaluation.value is True
def is_negated_atom(evaluation):
return evaluation.value is False
def objects_from_evaluations(evaluations):
# TODO: assumes object predicates
objects = set()
for evaluation in evaluations:
objects.update(evaluation.head.args)
return objects
##################################################
def head_from_fact(fact):
return Head(get_prefix(fact), get_args(fact))
def evaluation_from_fact(fact):
prefix = get_prefix(fact)
if prefix == EQ:
head, value = fact[1:]
elif prefix == NOT:
head = fact[1]
value = False
else:
head = fact
value = True
return Evaluation(head_from_fact(head), value)
def fact_from_evaluation(evaluation):
fact = Fact(evaluation.head.function, evaluation.head.args)
if is_atom(evaluation):
return fact
elif is_negated_atom(evaluation):
return Not(fact)
return Equal(fact, evaluation.value)
# def state_from_evaluations(evaluations):
# # TODO: default value?
# # TODO: could also implement within predicates
# state = {}
# for evaluation in evaluations:
# if evaluation.head in state:
# assert(evaluation.value == state[evaluation.head])
# state[evaluation.head] = evaluation.value
# return state
##################################################
def obj_from_pddl(pddl):
if pddl in Object._obj_from_name:
return Object.from_name(pddl)
elif pddl in OptimisticObject._obj_from_name:
return OptimisticObject.from_name(pddl)
raise ValueError(pddl)
def values_from_objects(objects):
return tuple(obj.value for obj in objects)
#return tuple(map(value_from_object, objects))
def temporal_from_sequential(action):
# TODO: clean this up
assert isinstance(action, DurativeAction)
name, args, start, duration = action
if name[-2] != '-':
return action
new_name, index = name[:-2], int(name[-1])
if index != 0: # Only keeps the start action
return None
return DurativeAction(new_name, args, start, duration)
def transform_action_args(action, fn):
if isinstance(action, Action):
name, args = action
return Action(name, tuple(map(fn, args)))
elif isinstance(action, DurativeAction):
action = temporal_from_sequential(action)
if action is None:
return None
name, args, start, duration = action
return DurativeAction(name, tuple(map(fn, args)), start, duration)
elif isinstance(action, StreamAction):
name, inputs, outputs = action
return StreamAction(name, tuple(map(fn, inputs)), tuple(map(fn, outputs)))
elif isinstance(action, FunctionAction):
name, inputs = action
return FunctionAction(name, tuple(map(fn, inputs)))
elif isinstance(action, Assignment):
args, = action
return Assignment(tuple(map(fn, args)))
raise NotImplementedError(action)
def transform_plan_args(plan, fn):
if not is_plan(plan):
return plan
return list(filter(lambda a: a is not None, [transform_action_args(action, fn) for action in plan]))
# TODO: would be better just to rename everything at the start. Still need to handle constants
def obj_from_pddl_plan(pddl_plan):
return transform_plan_args(pddl_plan, obj_from_pddl)
def param_from_object(obj):
if isinstance(obj, OptimisticObject):
return repr(obj)
#return obj.pddl
if isinstance(obj, Object):
return obj.value
raise ValueError(obj)
def params_from_objects(objects):
return tuple(map(param_from_object, objects))
def objects_from_values(values):
return tuple(map(Object.from_value, values))
##################################################
#def expression_holds(expression, evaluations):
# pass
def revert_solution(plan, cost, evaluations):
all_facts = list(map(value_from_evaluation, evaluations))
if isinstance(plan, OptPlan):
action_plan = transform_plan_args(plan.action_plan, param_from_object)
preimage_facts = list(map(value_from_obj_expression, plan.preimage_facts))
else:
action_plan = transform_plan_args(plan, param_from_object)
preimage_facts = None
certificate = Certificate(all_facts, preimage_facts)
return Solution(action_plan, cost, certificate)
#def opt_obj_from_value(value):
# if Object.has_value(value):
# return Object.from_value(value)
# return OptimisticObject.from_opt(value)
# # TODO: better way of doing this?
# #return OptimisticObject._obj_from_inputs.get(value, Object.from_value(value))
def str_from_head(head):
return '{}{}'.format(get_prefix(head), str_from_object(get_args(head)))
def str_from_fact(fact):
prefix = get_prefix(fact)
if prefix == NOT:
return '~{}'.format(str_from_fact(fact[1]))
if prefix == EQ: # TODO: predicate = vs function =
_, head, value = fact
return '{}={}'.format(str_from_fact(head), value)
return str_from_head(fact)
| 8,812 |
Python
| 33.560784 | 119 | 0.650817 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/object.py
|
from collections import namedtuple, defaultdict
from itertools import count
from hsr_tamp.pddlstream.language.constants import get_parameter_name
#from hsr_tamp.pddlstream.language.conversion import values_from_objects
from hsr_tamp.pddlstream.utils import str_from_object, is_hashable
USE_HASH = True
USE_OBJ_STR = True
USE_OPT_STR = True
OPT_PREFIX = '#'
PREFIX_LEN = 1
class Object(object):
_prefix = 'v'
_obj_from_id = {}
_obj_from_value = {}
_obj_from_name = {}
def __init__(self, value, stream_instance=None, name=None):
self.value = value
self.index = len(Object._obj_from_name)
if name is None:
#name = str(value) # TODO: use str for the name when possible
name = '{}{}'.format(self._prefix, self.index)
self.pddl = name
self.stream_instance = stream_instance # TODO: store first created stream instance
Object._obj_from_id[id(self.value)] = self
Object._obj_from_name[self.pddl] = self
if is_hashable(value):
Object._obj_from_value[self.value] = self
def is_unique(self):
return True
def is_shared(self):
return False
@staticmethod
def from_id(value):
if id(value) not in Object._obj_from_id:
return Object(value)
return Object._obj_from_id[id(value)]
@staticmethod
def has_value(value):
if USE_HASH and not is_hashable(value):
return id(value) in Object._obj_from_id
return value in Object._obj_from_value
@staticmethod
def from_value(value):
if USE_HASH and not is_hashable(value):
return Object.from_id(value)
if value not in Object._obj_from_value:
return Object(value)
return Object._obj_from_value[value]
@staticmethod
def from_name(name):
return Object._obj_from_name[name]
@staticmethod
def reset():
Object._obj_from_id.clear()
Object._obj_from_value.clear()
Object._obj_from_name.clear()
def __lt__(self, other): # For heapq on python3
return self.index < other.index
def __repr__(self):
if USE_OBJ_STR:
return str_from_object(self.value) # str
return self.pddl
##################################################
class UniqueOptValue(namedtuple('UniqueOptTuple', ['instance', 'sequence_index', 'output'])):
@property
def parameter(self):
# return self.instance.external.outputs[self.output_index]
return self.output
class SharedOptValue(namedtuple('SharedOptTuple', ['stream', 'inputs', 'input_objects', 'output'])):
@property
def values(self):
return tuple(obj.value for obj in self.input_objects)
#return values_from_objects(self.input_objects)
##################################################
class DebugValue(object): # TODO: could just do an object
_output_counts = defaultdict(count)
_prefix = '@' # $ | @
def __init__(self, stream, input_values, output_parameter):
self.stream = stream
self.input_values = input_values
self.output_parameter = output_parameter
self.index = next(self._output_counts[output_parameter])
# def __iter__(self):
# return self.stream, self.input_values, self.output_parameter
# def __hash__(self):
# return hash(tuple(self)) # self.__class__
# def __eq__(self, other):
# return (self.__class__ == other.__class__) and (tuple(self) == tuple(other))
def __repr__(self):
# Can also just return first letter of the prefix
return '{}{}{}'.format(self._prefix, get_parameter_name(self.output_parameter), self.index)
class SharedDebugValue(namedtuple('SharedDebugValue', ['stream', 'output_parameter'])):
# TODO: this alone doesn't refining at the shared object level
_prefix = '&' # $ | @ | &
def __repr__(self):
#index = hash(self.stream) % 1000
#index = self.stream.outputs.index(self.output_parameter) # TODO: self.stream is a str
#return '{}{}{}'.format(self._prefix, get_parameter_name(self.output_parameter), index)
#return '{}{}'.format(self._prefix, self.stream)
return '{}{}'.format(self._prefix, get_parameter_name(self.output_parameter))
##################################################
# TODO: just one object class or have Optimistic extend Object
# TODO: make a parameter class that has access to some underlying value
class OptimisticObject(object):
_prefix = '{}o'.format(OPT_PREFIX) # $ % #
_obj_from_inputs = {}
_obj_from_name = {}
_count_from_prefix = {}
def __init__(self, value, param):
# TODO: store first created instance
self.value = value
self.param = param
self.index = len(OptimisticObject._obj_from_inputs)
if USE_OPT_STR and isinstance(self.param, UniqueOptValue):
# TODO: instead just endow UniqueOptValue with a string function
#parameter = self.param.instance.external.outputs[self.param.output_index]
parameter = self.param.output
prefix = get_parameter_name(parameter)[:PREFIX_LEN]
var_index = next(self._count_from_prefix.setdefault(prefix, count()))
self.repr_name = '{}{}{}'.format(OPT_PREFIX, prefix, var_index) #self.index)
self.pddl = self.repr_name
else:
self.pddl = '{}{}'.format(self._prefix, self.index)
self.repr_name = self.pddl
OptimisticObject._obj_from_inputs[(value, param)] = self
OptimisticObject._obj_from_name[self.pddl] = self
def is_unique(self):
return isinstance(self.param, UniqueOptValue)
def is_shared(self):
#return isinstance(self.param, SharedOptValue)
return not isinstance(self.param, UniqueOptValue) # OptValue
@staticmethod
def from_opt(value, param):
# TODO: make param have a default value?
key = (value, param)
if key not in OptimisticObject._obj_from_inputs:
return OptimisticObject(value, param)
return OptimisticObject._obj_from_inputs[key]
@staticmethod
def from_name(name):
return OptimisticObject._obj_from_name[name]
@staticmethod
def reset():
OptimisticObject._obj_from_inputs.clear()
OptimisticObject._obj_from_name.clear()
OptimisticObject._count_from_prefix.clear()
def __lt__(self, other): # For heapq on python3
return self.index < other.index
def __repr__(self):
return self.repr_name
#return repr(self.repr_name) # Prints in quotations
| 6,602 |
Python
| 39.509202 | 100 | 0.617995 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/serialized.py
|
from __future__ import print_function
from hsr_tamp.pddlstream.algorithms.meta import solve_restart, solve
from hsr_tamp.pddlstream.language.temporal import parse_domain
from hsr_tamp.pddlstream.utils import INF, Verbose, str_from_object, SEPARATOR
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.focused import solve_focused
from hsr_tamp.pddlstream.language.conversion import Certificate, Object, \
transform_plan_args, value_from_evaluation
from hsr_tamp.pddlstream.language.constants import PDDLProblem, get_function, get_prefix, print_solution, AND, get_args, And, \
Solution, Or, is_plan
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, \
get_action_instances, apply_action, evaluation_from_fd, get_fluents
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init
def serialize_goal(goal):
if get_prefix(goal) == AND:
return get_args(goal)
return [goal]
def partition_facts(domain, facts):
fluents = get_fluents(domain)
static_facts = []
fluent_facts = []
for fact in facts:
if get_prefix(get_function(fact)).lower() in fluents:
fluent_facts.append(fact)
else:
static_facts.append(fact)
return static_facts, fluent_facts
def apply_actions(domain, state, plan, unit_costs=False):
import pddl
# Goal serialization just assumes the tail of the plan includes an abstract action to achieve each condition
static_state, _ = partition_facts(domain, state)
print('Static:', static_state)
# TODO: might need properties that involve an object that aren't useful yet
evaluations = evaluations_from_init(state)
#goal_exp = obj_from_value_expression(goal)
goal_exp = None
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
task.init = set(task.init)
for instance in get_action_instances(task, transform_plan_args(plan, Object.from_value)):
apply_action(task.init, instance)
fluents = get_fluents(domain)
fluent_state = [value_from_evaluation(evaluation_from_fd(atom))
for atom in task.init if isinstance(atom, pddl.Atom) and (atom.predicate in fluents)]
print('Fluent:', fluent_state)
state = static_state + fluent_state
return state
##################################################
def solve_serialized(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True,
retain_facts=True, **kwargs):
# TODO: be careful of CanMove deadends
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = initial_problem
_, _, domain, streams = parse_problem(
initial_problem, stream_info, constraints=None, unit_costs=unit_costs, unit_efforts=unit_efforts)
static_init, _ = partition_facts(domain, init) # might not be able to reprove static_int
#global_all, global_preimage = [], []
global_plan = []
global_cost = 0
state = list(init)
goals = serialize_goal(goal)
# TODO: instead just track how the true init updates
for i in range(len(goals)):
# TODO: option in algorithms to pass in existing facts
for stream in streams:
stream.reset()
goal = And(*goals[:i+1])
print('Goal:', str_from_object(goal))
# No strict need to reuse streams because generator functions
#local_problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, state, goal)
local_problem = PDDLProblem(domain_pddl, constant_map, streams, None, state, goal)
with Verbose(verbose):
solution = solve_focused(local_problem, stream_info=stream_info, unit_costs=unit_costs,
unit_efforts=unit_efforts, verbose=True, **kwargs)
print_solution(solution)
local_plan, local_cost, local_certificate = solution
if local_plan is None:
# TODO: replan upon failure
global_certificate = Certificate(all_facts={}, preimage_facts=None)
return Solution(None, INF, global_certificate)
if retain_facts:
state = local_certificate.all_facts
else:
_, fluent_facts = partition_facts(domain, state)
state = static_init + fluent_facts + local_certificate.preimage_facts # TODO: include functions
#print('State:', state)
# TODO: indicate when each fact is used
# TODO: record failed facts
global_plan.extend(local_plan) # TODO: compute preimage of the executed plan
global_cost += local_cost
static_state, _ = partition_facts(domain, state)
#global_all.extend(partition_facts(domain, local_certificate.all_facts)[0])
#global_preimage.extend(static_state)
print('Static:', static_state)
state = apply_actions(domain, state, local_plan, unit_costs=unit_costs)
print(SEPARATOR)
#user_input('Continue?')
# TODO: could also just test the goal here
# TODO: constrain future plan skeletons
global_certificate = Certificate(all_facts={}, preimage_facts=None)
return global_plan, global_cost, global_certificate
##################################################
def solve_deferred(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True,
retain_facts=True, **kwargs):
# TODO: serialize solving deferred problems
# TODO: can impose plan skeleton constraints as well
# TODO: investigate case where the first plan skeleton isn't feasible (e.g. due to blockage)
raise NotImplementedError()
#######################################################
def create_simplified_problem(problem, use_actions=False, use_streams=False, new_goal=None):
# TODO: check whether goal is a conjunction
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = problem
if not use_streams:
stream_pddl = None
if new_goal is None:
new_goal = goal_parts
domain = parse_domain(domain_pddl) # TODO: Constant map value @base not mentioned in domain :constants
if not use_actions:
domain.actions[:] = [] # No actions
return PDDLProblem(domain, constant_map, stream_pddl, stream_map, init, new_goal)
def test_init_goal(problem, **kwargs):
problem = create_simplified_problem(problem, use_actions=False, use_streams=False, new_goal=None)
plan, cost, certificate = solve(problem, **kwargs)
assert not plan
is_goal = is_plan(plan)
return is_goal, certificate
#######################################################
def solve_all_goals(initial_problem, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
# TODO(caelan): cleaner specification of goal ordering
goal_formula = And(*goal_parts)
print(solve_all_goals.__name__, goal_formula)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
return solve_restart(problem, **kwargs)
def solve_first_goal(initial_problem, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
achieved_parts = []
unachieved_parts = []
for task_part in goal_parts:
# TODO: store any stream evaluations (tests) and limit complexity
problem = create_simplified_problem(initial_problem, new_goal=task_part)
solution = solve_restart(problem, **kwargs)
plan, _, _ = solution
if plan is None:
unachieved_parts.append(task_part)
elif len(plan) == 0:
achieved_parts.append(task_part)
else:
raise RuntimeError(task_part)
# print(achieved_parts)
# print(unachieved_parts)
# TODO: reset to initial state if not achieved
goal_formula = And(*achieved_parts)
if unachieved_parts:
goal_formula = And(Or(*unachieved_parts), goal_formula)
print(solve_all_goals.__name__, goal_formula)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
return solve_restart(problem, **kwargs)
def solve_next_goal(initial_problem, serialize=True, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
# TODO: store serialization state to ensure progress is made
# goal_formula = And(Or(*task_parts), *reset_parts) # TODO: still possibly having the disjunctive goal issue
indices = list(range(0, len(goal_parts), 1)) if serialize else [len(goal_parts)]
for i in indices:
goal_parts = goal_parts[:i+1]
goal_formula = And(*goal_parts)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
print('Goal {}: {}'.format(i, goal_formula))
# TODO: reset streams?
solution = solve_restart(problem, **kwargs)
# TODO: detect which goals were achieved
plan, _, _ = solution
if plan is None:
return solution
if (i == len(indices) - 1) or (len(plan) >= 1):
return solution
return Solution(plan=[], cost=0, certificate=[])
| 9,246 |
Python
| 44.777228 | 127 | 0.660934 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/common.py
|
import time
from collections import namedtuple, OrderedDict
from hsr_tamp.pddlstream.language.constants import is_plan, get_length, FAILED #, INFEASIBLE, SUCCEEDED
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, obj_from_value_expression, revert_solution
from hsr_tamp.pddlstream.utils import INF, elapsed_time, check_memory
# Complexity is a way to characterize the number of external evaluations required for a solution
# Most algorithms regularize to prefer lower complexity solutions
# Also consider depth, level, priority, layer
# Effort incorporates success rate while complexity doesn't
# Complexity could incorporate how likely something is to help with a task in general though
# Effort relates to future expected time while complexity refers to past time
COMPLEXITY_OP = max # max | sum
INIT_EVALUATION = None
INTERNAL_EVALUATION = False
UNKNOWN_EVALUATION = 'unknown'
EvaluationNode = namedtuple('EvaluationNode', ['complexity', 'result'])
Solution = namedtuple('Solution', ['plan', 'cost', 'time'])
SOLUTIONS = [] # TODO: remove global variable
class SolutionStore(object):
def __init__(self, evaluations, max_time, success_cost, verbose, max_memory=INF):
# TODO: store a map from head to value?
# TODO: include other problem information here?
# TODO: determine when the plan converges
self.evaluations = evaluations
#self.initial_evaluations = copy.copy(evaluations)
self.start_time = time.time()
self.max_time = max_time
self.max_memory = max_memory
self.success_cost = success_cost # Inclusive
self.verbose = verbose
#self.best_cost = self.cost_fn(self.best_plan)
self.solutions = []
self.sample_time = 0.
@property
def search_time(self):
return self.elapsed_time() - self.sample_time
@property
def best_plan(self):
# TODO: return INFEASIBLE if can prove no solution
return self.solutions[-1].plan if self.solutions else FAILED
@property
def best_cost(self):
return self.solutions[-1].cost if self.solutions else INF
def add_plan(self, plan, cost):
# TODO: double-check that plan is a solution
if is_plan(plan) and (cost < self.best_cost):
self.solutions.append(Solution(plan, cost, elapsed_time(self.start_time)))
def has_solution(self):
return is_plan(self.best_plan)
def is_solved(self):
return self.has_solution() and (self.best_cost <= self.success_cost)
def elapsed_time(self):
return elapsed_time(self.start_time)
def is_timeout(self):
return (self.max_time <= self.elapsed_time()) or not check_memory(self.max_memory)
def is_terminated(self):
return self.is_solved() or self.is_timeout()
#def __repr__(self):
# raise NotImplementedError()
def extract_solution(self):
SOLUTIONS[:] = self.solutions
return revert_solution(self.best_plan, self.best_cost, self.evaluations)
def export_summary(self): # TODO: log, etc...
# TODO: SOLUTIONS
#status = SUCCEEDED if self.is_solved() else FAILED # TODO: INFEASIBLE, OPTIMAL
return {
'solved': self.is_solved(),
#'solved': self.has_solution(),
'solutions': len(self.solutions),
'cost': self.best_cost,
'length': get_length(self.best_plan),
'evaluations': len(self.evaluations),
'search_time': self.search_time,
'sample_time': self.sample_time,
'run_time': self.elapsed_time(),
'timeout': self.is_timeout(),
#'status': status,
}
##################################################
def add_fact(evaluations, fact, result=INIT_EVALUATION, complexity=0):
evaluation = evaluation_from_fact(fact)
if (evaluation not in evaluations) or (complexity < evaluations[evaluation].complexity):
evaluations[evaluation] = EvaluationNode(complexity, result)
return True
return False
def add_facts(evaluations, facts, **kwargs):
new_evaluations = []
for fact in facts:
if add_fact(evaluations, fact, **kwargs):
new_evaluations.append(evaluation_from_fact(fact))
return new_evaluations
def add_certified(evaluations, result, **kwargs):
complexity = result.compute_complexity(evaluations, **kwargs)
return add_facts(evaluations, result.get_certified(), result=result, complexity=complexity)
def evaluations_from_init(init):
evaluations = OrderedDict()
for raw_fact in init:
fact = obj_from_value_expression(raw_fact)
add_fact(evaluations, fact, result=INIT_EVALUATION, complexity=0)
return evaluations
def combine_complexities(complexities, complexity_op=COMPLEXITY_OP):
return complexity_op([0] + list(complexities))
def compute_complexity(evaluations, facts, complexity_op=COMPLEXITY_OP):
if not facts:
return 0
return complexity_op(evaluations[evaluation_from_fact(fact)].complexity for fact in facts)
##################################################
def optimistic_complexity(evaluations, optimistic_facts, fact):
if fact in optimistic_facts: # Matters due to reachieving
return optimistic_facts[fact]
evaluation = evaluation_from_fact(fact)
#if evaluation in evaluations:
return evaluations[evaluation].complexity
#return optimistic_facts[fact]
def stream_plan_preimage(stream_plan):
# Easy because monotonic
preimage = set()
achieved = set()
for stream in stream_plan:
preimage.update(set(stream.get_domain()) - achieved)
achieved.update(stream.get_certified())
return preimage
def stream_plan_complexity(evaluations, stream_plan, stream_calls, complexity_op=COMPLEXITY_OP):
if not is_plan(stream_plan):
return INF
# TODO: difference between a result having a particular complexity and the next result having something
#optimistic_facts = {}
optimistic_facts = {fact: evaluations[evaluation_from_fact(fact)].complexity
for fact in stream_plan_preimage(stream_plan)}
result_complexities = []
#complexity = 0
for i, result in enumerate(stream_plan):
# if result.external.get_complexity(num_calls=INF) == 0: # TODO: skip if true
result_complexity = complexity_op([0] + [optimistic_facts[fact]
#optimistic_complexity(evaluations, optimistic_facts, fact)
for fact in result.get_domain()])
# if stream_calls is None:
# num_calls = result.instance.num_calls
# else:
num_calls = stream_calls[i]
result_complexity += result.external.get_complexity(num_calls)
result_complexities.append(result_complexity)
#complexity = complexity_op(complexity, result_complexity)
for fact in result.get_certified():
if fact not in optimistic_facts:
optimistic_facts[fact] = result_complexity
complexity = complexity_op([0] + result_complexities)
return complexity
def is_instance_ready(evaluations, instance):
return all(evaluation_from_fact(f) in evaluations
for f in instance.get_domain())
| 7,300 |
Python
| 40.016854 | 116 | 0.661918 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/refinement.py
|
from __future__ import print_function
import time
from itertools import product
from copy import deepcopy, copy
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.scheduling.plan_streams import plan_streams, OptSolution
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.algorithms.constraints import add_plan_constraints, PlanConstraints, WILD
from hsr_tamp.pddlstream.language.constants import FAILED, INFEASIBLE, is_plan
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, substitute_expression
from hsr_tamp.pddlstream.language.function import FunctionResult, Function
from hsr_tamp.pddlstream.language.stream import StreamResult, Result
from hsr_tamp.pddlstream.language.statistics import check_effort, compute_plan_effort
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import INF, safe_zip, get_mapping, implies, elapsed_time
CONSTRAIN_STREAMS = False
CONSTRAIN_PLANS = False
MAX_DEPTH = INF # 1 | INF
def is_refined(stream_plan):
# TODO: lazily expand the shared objects in some cases to prevent increase in size
if not is_plan(stream_plan):
return True
# TODO: some of these opt_index equal None
return all((result.opt_index is None) or result.is_refined()
for result in stream_plan)
##################################################
def optimistic_process_instance(instantiator, instance, verbose=False):
for result in instance.next_optimistic():
if verbose:
print(result) # TODO: make a debug tools that reports the optimistic streams
new_facts = False
complexity = instantiator.compute_complexity(instance)
for fact in result.get_certified():
new_facts |= instantiator.add_atom(evaluation_from_fact(fact), complexity)
if isinstance(result, FunctionResult) or new_facts:
yield result
def prune_high_effort_streams(streams, max_effort=INF, **effort_args):
# TODO: convert streams to test streams with extremely high effort
low_effort_streams = []
for stream in streams:
effort = stream.get_effort(**effort_args)
if isinstance(stream, Function) or check_effort(effort, max_effort):
low_effort_streams.append(stream)
return low_effort_streams
def optimistic_process_streams(evaluations, streams, complexity_limit=INF, **effort_args):
optimistic_streams = prune_high_effort_streams(streams, **effort_args)
instantiator = Instantiator(optimistic_streams)
for evaluation, node in evaluations.items():
if node.complexity <= complexity_limit:
instantiator.add_atom(evaluation, node.complexity)
results = []
while instantiator and (instantiator.min_complexity() <= complexity_limit):
results.extend(optimistic_process_instance(instantiator, instantiator.pop_stream()))
# TODO: instantiate and solve to avoid repeated work
exhausted = not instantiator
return results, exhausted
##################################################
def optimistic_stream_instantiation(instance, bindings, opt_evaluations, only_immediate=False):
# TODO: combination for domain predicates
new_instances = []
input_candidates = [bindings.get(i, [i]) for i in instance.input_objects]
if only_immediate and not all(len(candidates) == 1 for candidates in input_candidates):
return new_instances
for input_combo in product(*input_candidates):
mapping = get_mapping(instance.input_objects, input_combo)
domain_evaluations = set(map(evaluation_from_fact, substitute_expression(
instance.get_domain(), mapping))) # TODO: could just instantiate first
if domain_evaluations <= opt_evaluations:
new_instance = instance.external.get_instance(input_combo)
# TODO: method for eagerly evaluating some of these?
if not new_instance.is_refined():
new_instance.refine()
new_instances.append(new_instance)
return new_instances
def optimistic_stream_evaluation(evaluations, stream_plan, use_bindings=True):
# TODO: can also use the instantiator and operate directly on the outputs
# TODO: could bind by just using new_evaluations
evaluations = set(evaluations) # Converts to a set for subset testing
opt_evaluations = set(evaluations)
new_results = []
bindings = {} # TODO: report the depth considered
for opt_result in stream_plan: # TODO: just refine the first step of the plan
for new_instance in optimistic_stream_instantiation(
opt_result.instance, (bindings if use_bindings else {}), opt_evaluations):
for new_result in new_instance.next_optimistic():
opt_evaluations.update(map(evaluation_from_fact, new_result.get_certified()))
new_results.append(new_result)
if isinstance(new_result, StreamResult): # Could not add if same value
for opt, obj in safe_zip(opt_result.output_objects, new_result.output_objects):
bindings.setdefault(opt, []).append(obj)
return new_results, bindings
##################################################
# def compute_stream_results(evaluations, opt_results, externals, complexity_limit, **effort_args):
# # TODO: revisit considering double bound streams
# functions = list(filter(lambda s: type(s) is Function, externals))
# opt_evaluations = evaluations_from_stream_plan(evaluations, opt_results)
# new_results, _ = optimistic_process_streams(opt_evaluations, functions, complexity_limit, **effort_args)
# return opt_results + new_results
def compute_skeleton_constraints(opt_plan, bindings):
skeleton = []
groups = {arg: values for arg, values in bindings.items() if len(values) != 1}
action_plan, preimage_facts = opt_plan
for name, args in action_plan:
new_args = []
for arg in args:
if isinstance(arg, Object):
new_args.append(arg)
elif isinstance(arg, OptimisticObject):
new_args.append(WILD)
# TODO: might cause some strange effects on continuous_tamp -p blocked
#assert bindings.get(arg, [])
#if len(bindings[arg]) == 1:
# new_args.append(bindings[arg][0])
#else:
# #new_args.append(WILD)
# new_args.append(arg)
else:
raise ValueError(arg)
skeleton.append((name, new_args))
# exact=False because we might need new actions
return PlanConstraints(skeletons=[skeleton], groups=groups, exact=False, max_cost=INF)
def get_optimistic_solve_fn(goal_exp, domain, negative, max_cost=INF, **kwargs):
# TODO: apply to hierarchical actions representations (will need to instantiate more actions)
def fn(evaluations, results, constraints):
if constraints is None:
return plan_streams(evaluations, goal_exp, domain, results, negative,
max_cost=max_cost, **kwargs)
#print(*relaxed_stream_plan(evaluations, goal_exp, domain, results, negative,
# max_cost=max_cost, **kwargs))
#constraints.dump()
domain2 = deepcopy(domain)
evaluations2 = copy(evaluations)
goal_exp2 = add_plan_constraints(constraints, domain2, evaluations2, goal_exp, internal=True)
max_cost2 = max_cost if (constraints is None) else min(max_cost, constraints.max_cost)
return plan_streams(evaluations2, goal_exp2, domain2, results, negative,
max_cost=max_cost2, **kwargs)
return fn
##################################################
def hierarchical_plan_streams(evaluations, externals, results, optimistic_solve_fn, complexity_limit,
depth, constraints, **effort_args):
if MAX_DEPTH <= depth:
return OptSolution(None, None, INF), depth
stream_plan, opt_plan, cost = optimistic_solve_fn(evaluations, results, constraints)
if not is_plan(opt_plan) or is_refined(stream_plan):
return OptSolution(stream_plan, opt_plan, cost), depth
#action_plan, preimage_facts = opt_plan
#dump_plans(stream_plan, action_plan, cost)
#create_visualizations(evaluations, stream_plan, depth)
#print(depth, get_length(stream_plan))
#print('Stream plan ({}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format(
# get_length(stream_plan), compute_plan_effort(stream_plan), stream_plan,
# get_length(action_plan), cost, str_from_plan(action_plan)))
#if is_plan(stream_plan):
# for result in stream_plan:
# effort = compute_result_effort(result, unit_efforts=True)
# if effort != 0:
# print(result, effort)
#print()
# TODO: identify control parameters that can be separated across actions
new_depth = depth + 1
new_results, bindings = optimistic_stream_evaluation(evaluations, stream_plan)
if not (CONSTRAIN_STREAMS or CONSTRAIN_PLANS):
return OptSolution(FAILED, FAILED, INF), new_depth
#if CONSTRAIN_STREAMS:
# next_results = compute_stream_results(evaluations, new_results, externals, complexity_limit, **effort_args)
#else:
next_results, _ = optimistic_process_streams(evaluations, externals, complexity_limit, **effort_args)
next_constraints = None
if CONSTRAIN_PLANS:
next_constraints = compute_skeleton_constraints(opt_plan, bindings)
return hierarchical_plan_streams(evaluations, externals, next_results, optimistic_solve_fn, complexity_limit,
new_depth, next_constraints, **effort_args)
def iterative_plan_streams(all_evaluations, externals, optimistic_solve_fn, complexity_limit, **effort_args):
# Previously didn't have unique optimistic objects that could be constructed at arbitrary depths
start_time = time.time()
complexity_evals = {e: n for e, n in all_evaluations.items() if n.complexity <= complexity_limit}
num_iterations = 0
while True:
num_iterations += 1
results, exhausted = optimistic_process_streams(complexity_evals, externals, complexity_limit, **effort_args)
opt_solution, final_depth = hierarchical_plan_streams(
complexity_evals, externals, results, optimistic_solve_fn, complexity_limit,
depth=0, constraints=None, **effort_args)
stream_plan, action_plan, cost = opt_solution
print('Attempt: {} | Results: {} | Depth: {} | Success: {} | Time: {:.3f}'.format(
num_iterations, len(results), final_depth, is_plan(action_plan), elapsed_time(start_time)))
if is_plan(action_plan):
return OptSolution(stream_plan, action_plan, cost)
if final_depth == 0:
status = INFEASIBLE if exhausted else FAILED
return OptSolution(status, status, cost)
# TODO: should streams along the sampled path automatically have no optimistic value
| 11,177 |
Python
| 51.478873 | 117 | 0.668426 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/instantiate_task.py
|
from __future__ import print_function
import os
from collections import namedtuple, defaultdict, deque, Counter
from time import time
from hsr_tamp.pddlstream.algorithms.downward import get_literals, get_precondition, get_fluents, get_function_assignments, \
TRANSLATE_OUTPUT, parse_sequential_domain, parse_problem, task_from_domain_problem, GOAL_NAME, literal_holds, \
get_conjunctive_parts, get_conditional_effects
from hsr_tamp.pddlstream.algorithms.relation import Relation, compute_order, solve_satisfaction
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import flatten, apply_mapping, MockSet, elapsed_time, Verbose, safe_remove, ensure_dir, \
str_from_object, user_input, Profiler
import pddl
import instantiate
import translate
import normalize
FD_INSTANTIATE = True
InstantiatedTask = namedtuple('InstantiatedTask', ['task', 'atoms', 'actions', 'axioms',
'reachable_action_params', 'goal_list'])
def instantiate_goal(goal):
goal_list = get_conjunctive_parts(goal)
assert all(isinstance(item, pddl.Literal) for item in goal_list)
return goal_list
def get_goal_instance(goal):
return pddl.PropositionalAction(GOAL_NAME, instantiate_goal(goal), [], None)
##################################################
def get_constants(atom):
return tuple((i, a) for i, a in enumerate(atom.args) if not is_parameter(a))
def instantiate_condition(action, is_static, args_from_predicate):
parameters = {p.name for p in action.parameters}
#if not parameters:
# yield {}
# return
static_conditions = list(filter(is_static, get_literals(get_precondition(action))))
static_parameters = set(filter(is_parameter, flatten(atom.args for atom in static_conditions)))
if not (parameters <= static_parameters):
raise NotImplementedError('Could not instantiate action {} due to parameters: {}'.format(
action.name, str_from_object(parameters - static_parameters)))
atoms_from_cond = {condition: args_from_predicate[condition.predicate, get_constants(condition)]
for condition in static_conditions}
conditions, atoms = zip(*atoms_from_cond.items())
relations = [Relation(conditions[index].args, atoms[index])
for index in compute_order(conditions, atoms)]
solution = solve_satisfaction(relations)
for element in solution.body:
yield solution.get_mapping(element)
def get_reachable_action_params(instantiated_actions):
# TODO: use pddl_from_instance
reachable_action_params = defaultdict(list)
for inst_action in instantiated_actions:
action = inst_action.action
parameters = [p.name for p in action.parameters]
args = apply_mapping(parameters, inst_action.var_mapping)
reachable_action_params[action].append(args) # TODO: does this actually do anything
return reachable_action_params
##################################################
def filter_negated(conditions, negated_from_name):
return list(filter(lambda a: a.predicate not in negated_from_name, conditions))
def get_achieving_axioms(state, operators, negated_from_name={}):
# TODO: order by stream effort
# marking algorithm for propositional Horn logic
unprocessed_from_literal = defaultdict(list)
operator_from_literal = {}
remaining_from_stream = {}
reachable_operators = set() # TODO: only keep facts
queue = deque()
def process_axiom(op, effect):
reachable_operators.add(id(op))
if effect not in operator_from_literal:
operator_from_literal[effect] = op
queue.append(effect)
# TODO: could produce a list of all derived conditions
for op in operators:
preconditions = get_precondition(op)
for cond, effect in get_conditional_effects(op):
conditions = cond + preconditions
remaining_from_stream[id(op), effect] = 0
for literal in filter_negated(conditions, negated_from_name):
if literal_holds(state, literal):
operator_from_literal[literal] = None
else:
remaining_from_stream[id(op), effect] += 1
unprocessed_from_literal[literal].append((op, effect))
if remaining_from_stream[id(op), effect] == 0:
process_axiom(op, effect)
while queue:
literal = queue.popleft()
for op, effect in unprocessed_from_literal[literal]:
remaining_from_stream[id(op), effect] -= 1
if remaining_from_stream[id(op), effect] == 0:
process_axiom(op, effect)
return operator_from_literal, [op for op in operators if id(op) in reachable_operators]
##################################################
def instantiate_domain(task, prune_static=True):
fluent_predicates = get_fluents(task)
is_static = lambda a: isinstance(a, pddl.Atom) and (a.predicate not in fluent_predicates)
fluent_facts = MockSet(lambda a: not prune_static or not is_static(a))
init_facts = set(task.init)
function_assignments = get_function_assignments(task)
type_to_objects = instantiate.get_objects_by_type(task.objects, task.types)
constants_from_predicate = defaultdict(set)
for action in task.actions + task.axioms:
for atom in filter(is_static, get_literals(get_precondition(action))):
constants = tuple((i, a) for i, a in enumerate(atom.args) if not is_parameter(a))
constants_from_predicate[atom.predicate].add(constants)
predicate_to_atoms = defaultdict(set)
args_from_predicate = defaultdict(set)
for atom in filter(is_static, task.init): # TODO: compute which predicates might involve constants
predicate_to_atoms[atom.predicate].add(atom)
args_from_predicate[atom.predicate].add(atom.args)
for constants in constants_from_predicate[atom.predicate]:
if all(atom.args[i] == o for i, o in constants):
args_from_predicate[atom.predicate, constants].add(atom.args)
instantiated_actions = []
for action in task.actions:
for variable_mapping in instantiate_condition(action, is_static, args_from_predicate):
inst_action = action.instantiate(variable_mapping, init_facts, fluent_facts, type_to_objects,
task.use_min_cost_metric, function_assignments, predicate_to_atoms)
if inst_action:
instantiated_actions.append(inst_action)
instantiated_axioms = []
for axiom in task.axioms:
for variable_mapping in instantiate_condition(axiom, is_static, args_from_predicate):
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
reachable_facts, reachable_operators = get_achieving_axioms(init_facts, instantiated_actions + instantiated_axioms)
atoms = {atom.positive() for atom in (init_facts | set(reachable_facts)) if isinstance(atom, pddl.Literal)}
relaxed_reachable = all(literal_holds(init_facts, goal) or goal in reachable_facts
for goal in instantiate_goal(task.goal))
reachable_actions = [action for action in reachable_operators
if isinstance(action, pddl.PropositionalAction)]
reachable_axioms = [axiom for axiom in reachable_operators
if isinstance(axiom, pddl.PropositionalAxiom)]
return relaxed_reachable, atoms, reachable_actions, reachable_axioms
##################################################
def dump_instantiated(instantiated):
print('Instantiated frequencies:\n'
'Atoms: {}\n'
'Actions: {}\n'
'Axioms: {}'.format(
str_from_object(Counter(atom.predicate for atom in instantiated.atoms)),
str_from_object(Counter(action.action.name for action in instantiated.actions)),
str_from_object(Counter(axiom.axiom.name for axiom in instantiated.axioms))))
def instantiate_task(task, check_infeasible=True, use_fd=FD_INSTANTIATE, **kwargs):
start_time = time()
print()
normalize.normalize(task)
#with Profiler(field='tottime', num=25):
if use_fd:
# TODO: recover relaxed reachability (from model)
relaxed_reachable, atoms, actions, axioms, reachable_action_params = instantiate.explore(task)
else:
relaxed_reachable, atoms, actions, axioms = instantiate_domain(task, **kwargs)
reachable_action_params = get_reachable_action_params(actions)
#for atom in sorted(filter(lambda a: isinstance(a, pddl.Literal), set(task.init) | set(atoms)),
# key=lambda a: a.predicate):
# print(fact_from_fd(atom))
#print(axioms)
#for i, action in enumerate(sorted(actions, key=lambda a: a.name)):
# print(i, transform_action_args(pddl_from_instance(action), obj_from_pddl))
print('Infeasible:', not relaxed_reachable)
print('Instantiation time: {:.3f}s'.format(elapsed_time(start_time)))
if check_infeasible and not relaxed_reachable:
return None
goal_list = instantiate_goal(task.goal)
instantiated = InstantiatedTask(task, atoms, actions, axioms, reachable_action_params, goal_list)
dump_instantiated(instantiated)
return instantiated
##################################################
def sas_from_instantiated(instantiated_task):
import timers
import fact_groups
import options
import simplify
import variable_order
from translate import translate_task, unsolvable_sas_task, strips_to_sas_dictionary, \
build_implied_facts, build_mutex_key, solvable_sas_task
start_time = time()
print()
if not instantiated_task:
return unsolvable_sas_task("No relaxed solution")
task, atoms, actions, axioms, reachable_action_params, goal_list = instantiated_task
# TODO: option to skip and just use binary variables
with timers.timing("Computing fact groups", block=True):
groups, mutex_groups, translation_key = fact_groups.compute_groups(
task, atoms, reachable_action_params)
with timers.timing("Building STRIPS to SAS dictionary"):
ranges, strips_to_sas = strips_to_sas_dictionary(
groups, assert_partial=options.use_partial_encoding)
with timers.timing("Building dictionary for full mutex groups"):
mutex_ranges, mutex_dict = strips_to_sas_dictionary(
mutex_groups, assert_partial=False)
if options.add_implied_preconditions:
with timers.timing("Building implied facts dictionary..."):
implied_facts = build_implied_facts(strips_to_sas, groups,
mutex_groups)
else:
implied_facts = {}
with timers.timing("Building mutex information", block=True):
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
with timers.timing("Translating task", block=True):
sas_task = translate_task(
strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
task.init, goal_list, actions, axioms, task.use_min_cost_metric,
implied_facts)
if options.filter_unreachable_facts:
with timers.timing("Detecting unreachable propositions", block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task("Simplified to trivially false goal")
except simplify.TriviallySolvable:
return solvable_sas_task("Simplified to empty goal")
if options.reorder_variables or options.filter_unimportant_vars:
with timers.timing("Reordering and filtering variables", block=True):
variable_order.find_and_apply_variable_order(
sas_task, options.reorder_variables,
options.filter_unimportant_vars)
translate.dump_statistics(sas_task)
print('Translation time: {:.3f}s'.format(elapsed_time(start_time)))
return sas_task
##################################################
def write_sas_task(sas_task, temp_dir):
translate_path = os.path.join(temp_dir, TRANSLATE_OUTPUT)
#clear_dir(temp_dir)
safe_remove(translate_path)
ensure_dir(translate_path)
with open(os.path.join(temp_dir, TRANSLATE_OUTPUT), "w") as output_file:
sas_task.output(output_file)
return translate_path
def sas_from_pddl(task, debug=False):
#normalize.normalize(task)
#sas_task = translate.pddl_to_sas(task)
with Verbose(debug):
instantiated = instantiate_task(task)
#instantiated = convert_instantiated(instantiated)
sas_task = sas_from_instantiated(instantiated)
sas_task.metric = task.use_min_cost_metric # TODO: are these sometimes not equal?
return sas_task
def translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, verbose):
domain = parse_sequential_domain(domain_pddl)
problem = parse_problem(domain, problem_pddl)
task = task_from_domain_problem(domain, problem, add_identical=False)
sas_task = sas_from_pddl(task)
write_sas_task(sas_task, temp_dir)
return task
def convert_instantiated(instantiated_task, verbose=False):
task, atoms, actions, axioms, reachable_action_params, goal_list = instantiated_task
normalize.normalize(task)
import axiom_rules
#axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goal_list)
#init = task.init + axiom_init
import options
with Verbose(verbose):
axioms, axiom_layers = axiom_rules.handle_axioms(actions, axioms, goal_list, options.layer_strategy)
init = task.init
# axioms.sort(key=lambda axiom: axiom.name)
# for axiom in axioms:
# axiom.dump()
#return InstantiatedTask(task, atoms, actions, axioms, reachable_action_params, goal_list)
return InstantiatedTask(task, init, actions, axioms, reachable_action_params, goal_list) # init instead of atoms
| 14,150 |
Python
| 44.210862 | 124 | 0.665654 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/focused.py
|
from __future__ import print_function
import time
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.advanced import enforce_simultaneous, automatically_negate_externals
from hsr_tamp.pddlstream.algorithms.common import SolutionStore
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.disabled import push_disabled, reenable_disabled, process_stream_plan
from hsr_tamp.pddlstream.algorithms.disable_skeleton import create_disabled_axioms
#from hsr_tamp.pddlstream.algorithms.downward import has_costs
from hsr_tamp.pddlstream.algorithms.incremental import process_stream_queue
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.refinement import iterative_plan_streams, get_optimistic_solve_fn
from hsr_tamp.pddlstream.algorithms.scheduling.plan_streams import OptSolution
from hsr_tamp.pddlstream.algorithms.reorder import reorder_stream_plan
from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.algorithms.visualization import reset_visualizations, create_visualizations, \
has_pygraphviz, log_plans
from hsr_tamp.pddlstream.language.constants import is_plan, get_length, str_from_plan, INFEASIBLE
from hsr_tamp.pddlstream.language.fluent import compile_fluent_streams
from hsr_tamp.pddlstream.language.function import Function, Predicate
from hsr_tamp.pddlstream.language.optimizer import ComponentStream
from hsr_tamp.pddlstream.algorithms.recover_optimizers import combine_optimizers
from hsr_tamp.pddlstream.language.statistics import load_stream_statistics, \
write_stream_statistics, compute_plan_effort
from hsr_tamp.pddlstream.language.stream import Stream, StreamResult
from hsr_tamp.pddlstream.utils import INF, implies, str_from_object, safe_zip
def get_negative_externals(externals):
negative_predicates = list(filter(lambda s: type(s) is Predicate, externals)) # and s.is_negative()
negated_streams = list(filter(lambda s: isinstance(s, Stream) and s.is_negated, externals))
return negative_predicates + negated_streams
def partition_externals(externals, verbose=False):
functions = list(filter(lambda s: type(s) is Function, externals))
negative = get_negative_externals(externals)
optimizers = list(filter(lambda s: isinstance(s, ComponentStream) and (s not in negative), externals))
streams = list(filter(lambda s: s not in (functions + negative + optimizers), externals))
if verbose:
print('Streams: {}\nFunctions: {}\nNegated: {}\nOptimizers: {}'.format(
streams, functions, negative, optimizers))
return streams, functions, negative, optimizers
##################################################
def recover_optimistic_outputs(stream_plan):
if not is_plan(stream_plan):
return stream_plan
new_mapping = {}
new_stream_plan = []
for result in stream_plan:
new_result = result.remap_inputs(new_mapping)
new_stream_plan.append(new_result)
if isinstance(new_result, StreamResult):
opt_result = new_result.instance.opt_results[0] # TODO: empty if disabled
new_mapping.update(safe_zip(new_result.output_objects, opt_result.output_objects))
return new_stream_plan
def check_dominated(skeleton_queue, stream_plan):
if not is_plan(stream_plan):
return True
for skeleton in skeleton_queue.skeletons:
# TODO: has stream_plans and account for different output object values
if frozenset(stream_plan) <= frozenset(skeleton.stream_plan):
print(stream_plan)
print(skeleton.stream_plan)
raise NotImplementedError()
##################################################
def solve_abstract(problem, constraints=PlanConstraints(), stream_info={}, replan_actions=set(),
unit_costs=False, success_cost=INF,
max_time=INF, max_iterations=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
max_skeletons=INF, search_sample_ratio=0, bind=True, max_failures=0,
unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True,
visualize=False, verbose=True, **search_kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param constraints: PlanConstraints on the set of legal solutions
:param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
:param replan_actions: the actions declared to induce replanning for the purpose of deferred stream evaluation
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param max_skeletons: the maximum number of plan skeletons (max_skeletons=None indicates not adaptive)
:param search_sample_ratio: the desired ratio of sample time / search time when max_skeletons!=None
:param bind: if True, propagates parameter bindings when max_skeletons=None
:param max_failures: the maximum number of stream failures before switching phases when max_skeletons=None
:param unit_efforts: use unit stream efforts rather than estimated numeric efforts
:param max_effort: the maximum amount of stream effort
:param effort_weight: a multiplier for stream effort compared to action costs
:param reorder: if True, reorder stream plans to minimize the expected sampling overhead
:param visualize: if True, draw the constraint network and stream plan as a graphviz file
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# TODO: select whether to search or sample based on expected success rates
# TODO: no optimizers during search with relaxed_stream_plan
# TODO: locally optimize only after a solution is identified
# TODO: replan with a better search algorithm after feasible
# TODO: change the search algorithm and unit costs based on the best cost
use_skeletons = (max_skeletons is not None)
#assert implies(use_skeletons, search_sample_ratio > 0)
eager_disabled = (effort_weight is None) # No point if no stream effort biasing
num_iterations = eager_calls = 0
complexity_limit = initial_complexity
evaluations, goal_exp, domain, externals = parse_problem(
problem, stream_info=stream_info, constraints=constraints,
unit_costs=unit_costs, unit_efforts=unit_efforts)
automatically_negate_externals(domain, externals)
enforce_simultaneous(domain, externals)
compile_fluent_streams(domain, externals)
# TODO: make effort_weight be a function of the current cost
# if (effort_weight is None) and not has_costs(domain):
# effort_weight = 1
load_stream_statistics(externals)
if visualize and not has_pygraphviz():
visualize = False
print('Warning, visualize=True requires pygraphviz. Setting visualize=False')
if visualize:
reset_visualizations()
streams, functions, negative, optimizers = partition_externals(externals, verbose=verbose)
eager_externals = list(filter(lambda e: e.info.eager, externals))
positive_externals = streams + functions + optimizers
has_optimizers = bool(optimizers) # TODO: deprecate
assert implies(has_optimizers, use_skeletons)
################
store = SolutionStore(evaluations, max_time, success_cost, verbose, max_memory=max_memory)
skeleton_queue = SkeletonQueue(store, domain, disable=not has_optimizers)
disabled = set() # Max skeletons after a solution
while (not store.is_terminated()) and (num_iterations < max_iterations) and (complexity_limit <= max_complexity):
num_iterations += 1
eager_instantiator = Instantiator(eager_externals, evaluations) # Only update after an increase?
if eager_disabled:
push_disabled(eager_instantiator, disabled)
if eager_externals:
eager_calls += process_stream_queue(eager_instantiator, store,
complexity_limit=complexity_limit, verbose=verbose)
################
print('\nIteration: {} | Complexity: {} | Skeletons: {} | Skeleton Queue: {} | Disabled: {} | Evaluations: {} | '
'Eager Calls: {} | Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
num_iterations, complexity_limit, len(skeleton_queue.skeletons), len(skeleton_queue), len(disabled),
len(evaluations), eager_calls, store.best_cost, store.search_time, store.sample_time, store.elapsed_time()))
optimistic_solve_fn = get_optimistic_solve_fn(goal_exp, domain, negative,
replan_actions=replan_actions, reachieve=use_skeletons,
max_cost=min(store.best_cost, constraints.max_cost),
max_effort=max_effort, effort_weight=effort_weight, **search_kwargs)
# TODO: just set unit effort for each stream beforehand
if (max_skeletons is None) or (len(skeleton_queue.skeletons) < max_skeletons):
disabled_axioms = create_disabled_axioms(skeleton_queue) if has_optimizers else []
if disabled_axioms:
domain.axioms.extend(disabled_axioms)
stream_plan, opt_plan, cost = iterative_plan_streams(evaluations, positive_externals,
optimistic_solve_fn, complexity_limit, max_effort=max_effort)
for axiom in disabled_axioms:
domain.axioms.remove(axiom)
else:
stream_plan, opt_plan, cost = OptSolution(INFEASIBLE, INFEASIBLE, INF) # TODO: apply elsewhere
################
#stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) or stream_plan
stream_plan = combine_optimizers(evaluations, stream_plan)
#stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations
# [s for s in synthesizers if not s.post_only])
#stream_plan = recover_optimistic_outputs(stream_plan)
if reorder:
# TODO: this blows up memory wise for long stream plans
stream_plan = reorder_stream_plan(store, stream_plan)
num_optimistic = sum(r.optimistic for r in stream_plan) if stream_plan else 0
action_plan = opt_plan.action_plan if is_plan(opt_plan) else opt_plan
print('Stream plan ({}, {}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format(
get_length(stream_plan), num_optimistic, compute_plan_effort(stream_plan), stream_plan,
get_length(action_plan), cost, str_from_plan(action_plan)))
if is_plan(stream_plan) and visualize:
log_plans(stream_plan, action_plan, num_iterations)
create_visualizations(evaluations, stream_plan, num_iterations)
################
if (stream_plan is INFEASIBLE) and (not eager_instantiator) and (not skeleton_queue) and (not disabled):
break
if not is_plan(stream_plan):
print('No plan: increasing complexity from {} to {}'.format(complexity_limit, complexity_limit+complexity_step))
complexity_limit += complexity_step
if not eager_disabled:
reenable_disabled(evaluations, domain, disabled)
#print(stream_plan_complexity(evaluations, stream_plan))
if not use_skeletons:
process_stream_plan(store, domain, disabled, stream_plan, opt_plan, cost, bind=bind, max_failures=max_failures)
continue
################
#optimizer_plan = replan_with_optimizers(evaluations, stream_plan, domain, optimizers)
optimizer_plan = None
if optimizer_plan is not None:
# TODO: post process a bound plan
print('Optimizer plan ({}, {:.3f}): {}'.format(
get_length(optimizer_plan), compute_plan_effort(optimizer_plan), optimizer_plan))
skeleton_queue.new_skeleton(optimizer_plan, opt_plan, cost)
allocated_sample_time = (search_sample_ratio * store.search_time) - store.sample_time \
if len(skeleton_queue.skeletons) <= max_skeletons else INF
if skeleton_queue.process(stream_plan, opt_plan, cost, complexity_limit, allocated_sample_time) is INFEASIBLE:
break
################
summary = store.export_summary()
summary.update({
'iterations': num_iterations,
'complexity': complexity_limit,
'skeletons': len(skeleton_queue.skeletons),
})
print('Summary: {}'.format(str_from_object(summary, ndigits=3))) # TODO: return the summary
write_stream_statistics(externals, verbose)
return store.extract_solution()
solve_focused = solve_abstract # TODO: deprecate solve_focused
##################################################
def solve_focused_original(problem, fail_fast=False, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param fail_fast: whether to switch phases as soon as a stream fails
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_failures = 0 if fail_fast else INF
return solve_abstract(problem, max_skeletons=None, search_sample_ratio=None,
bind=False, max_failures=max_failures, **kwargs)
def solve_binding(problem, fail_fast=False, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param fail_fast: whether to switch phases as soon as a stream fails
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_failures = 0 if fail_fast else INF
return solve_abstract(problem, max_skeletons=None, search_sample_ratio=None,
bind=True, max_failures=max_failures, **kwargs)
def solve_adaptive(problem, max_skeletons=INF, search_sample_ratio=1, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param max_skeletons: the maximum number of plan skeletons to consider
:param search_sample_ratio: the desired ratio of search time / sample time
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_skeletons = INF if max_skeletons is None else max_skeletons
#search_sample_ratio = clip(search_sample_ratio, lower=0) # + EPSILON
#assert search_sample_ratio > 0
return solve_abstract(problem, max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
bind=None, max_failures=None, **kwargs)
def solve_hierarchical(problem, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param search_sample_ratio: the desired ratio of sample time / search time
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_adaptive(problem, max_skeletons=1, search_sample_ratio=INF, # TODO: rename to sample_search_ratio
bind=None, max_failures=None, **kwargs)
| 17,028 |
Python
| 54.288961 | 124 | 0.689453 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/search.py
|
from __future__ import print_function
from copy import deepcopy
from time import time
from hsr_tamp.pddlstream.algorithms.downward import run_search, TEMP_DIR, write_pddl
from hsr_tamp.pddlstream.algorithms.instantiate_task import write_sas_task, translate_and_write_pddl
from hsr_tamp.pddlstream.utils import INF, Verbose, safe_rm_dir, elapsed_time
# TODO: manual_patterns
# Specify on the discrete variables that are updated via conditional effects
# http://www.fast-downward.org/Doc/PatternCollectionGenerator
# TODO: receding horizon planning
# TODO: allow switch to higher-level in heuristic
# TODO: recursive application of these
# TODO: write the domain and problem PDDL files that are used for debugging purposes
def solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **search_args):
# TODO: can solve using another planner and then still translate using FastDownward
# Can apply plan constraints (skeleton constraints) here as well
start_time = time()
with Verbose(debug):
print('\n' + 50*'-' + '\n')
write_sas_task(sas_task, temp_dir)
solution = run_search(temp_dir, debug=True, **search_args)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
#for axiom in sas_task.axioms:
# # TODO: return the set of axioms here as well
# var, value = axiom.effect
# print(sas_task.variables.value_names[var])
# axiom.dump()
return solution
def solve_from_pddl(domain_pddl, problem_pddl, temp_dir=TEMP_DIR, clean=False, debug=False, **search_kwargs):
# TODO: combine with solve_from_task
#return solve_tfd(domain_pddl, problem_pddl)
start_time = time()
with Verbose(debug):
write_pddl(domain_pddl, problem_pddl, temp_dir)
#run_translate(temp_dir, verbose)
translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, debug)
solution = run_search(temp_dir, debug=debug, **search_kwargs)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return solution
##################################################
def apply_sas_operator(init, op):
for var, pre, post, cond in op.pre_post:
assert (pre == -1) or (init.values[var] == pre)
assert not cond
init.values[var] = post
def name_from_action(action, args):
return '({})'.format(' '.join((action,) + args))
def parse_sas_plan(sas_task, plan):
op_from_name = {op.name: op for op in sas_task.operators} # No need to keep repeats
sas_plan = []
for action, args in plan:
name = name_from_action(action, args)
sas_plan.append(op_from_name[name])
return sas_plan
##################################################
SERIALIZE = 'serialize'
def plan_subgoals(sas_task, subgoal_plan, temp_dir, **kwargs):
full_plan = []
full_cost = 0
for subgoal in subgoal_plan:
sas_task.goal.pairs = subgoal
write_sas_task(sas_task, temp_dir)
plan, cost = run_search(temp_dir, debug=True, **kwargs)
if plan is None:
return None, INF
full_plan.extend(plan)
full_cost += cost
for sas_action in parse_sas_plan(sas_task, plan):
apply_sas_operator(sas_task.init, sas_action)
return full_plan, full_cost
def serialized_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs):
# TODO: specify goal grouping / group by predicate & objects
# TODO: version that solves for all disjuctive subgoals at once
start_time = time()
with Verbose(debug):
print('\n' + 50*'-' + '\n')
subgoal_plan = [sas_task.goal.pairs[:i+1] for i in range(len(sas_task.goal.pairs))]
plan, cost = plan_subgoals(sas_task, subgoal_plan, temp_dir, **kwargs)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return plan, cost
##################################################
class ABSTRIPSLayer(object):
def __init__(self, pos_pre=[], neg_pre=[], pos_eff=[], neg_eff=[], horizon=INF):
self.pos_pre = pos_pre
self.neg_pre = neg_pre
self.pos_eff = pos_eff
self.neg_eff = neg_eff
self.horizon = horizon # TODO: cost units instead?
assert 1 <= self.horizon
if self.pos_eff:
raise NotImplementedError()
if self.neg_eff:
raise NotImplementedError()
##################################################
def prune_hierarchy_pre_eff(sas_task, layers):
positive_template = 'Atom {}('
negated_template = 'NegatedAtom {}('
pruned_pre = set() # TODO: effects
for layer in layers:
pruned_pre.update(positive_template.format(p.lower()) for p in layer.pos_pre)
pruned_pre.update(negated_template.format(p.lower()) for p in layer.neg_pre)
pruned = set()
for var, names in enumerate(sas_task.variables.value_names):
for val, name in enumerate(names):
if any(name.startswith(p) for p in pruned_pre):
pruned.add((var, val))
for op in sas_task.operators:
for k, pair in reversed(list(enumerate(op.prevail))):
if pair in pruned:
op.prevail.pop(0)
for k, pair in reversed(list(enumerate(sas_task.goal.pairs))):
if pair in pruned:
sas_task.goal.pairs.pop(0)
return pruned
def add_subgoals(sas_task, subgoal_plan):
if not subgoal_plan:
return None
subgoal_var = len(sas_task.variables.ranges)
subgoal_range = len(subgoal_plan) + 1
sas_task.variables.ranges.append(subgoal_range)
sas_task.variables.axiom_layers.append(-1)
sas_task.variables.value_names.append(
['subgoal{}'.format(i) for i in range(subgoal_range)])
sas_task.init.values.append(0)
sas_task.goal.pairs.append((subgoal_var, subgoal_range - 1))
# TODO: make this a subroutine that depends on the length
for i, op in enumerate(sas_task.operators):
if op.name not in subgoal_plan:
continue
subgoal = subgoal_plan.index(op.name) + 1
pre_post = (subgoal_var, subgoal - 1, subgoal, [])
op.pre_post.append(pre_post)
# TODO: maybe this should be the resultant state instead?
# TODO: prevail should just be the last prevail
# name = '(subgoal{}_{})'.format(subgoal, i)
# subgoal_cost = 1 # Can strengthen for stronger heuristics
# local_sas_task.operators.append(sas_tasks.SASOperator(
# name, op.prevail, [pre_post], subgoal_cost))
return subgoal_var
def abstrips_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs):
# Like partial order planning in terms of precondition order
# TODO: add achieve subgoal actions
# TODO: most generic would be a heuristic on each state
if hierarchy == SERIALIZE:
return serialized_solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
if not hierarchy:
return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
start_time = time()
plan, cost = None, INF
with Verbose(debug):
print('\n' + 50*'-' + '\n')
last_plan = []
for level in range(len(hierarchy)+1):
local_sas_task = deepcopy(sas_task)
prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned
add_subgoals(local_sas_task, last_plan)
write_sas_task(local_sas_task, temp_dir)
plan, cost = run_search(temp_dir, debug=True, **kwargs)
if (level == len(hierarchy)) or (plan is None):
# TODO: fall back on standard search
break
last_plan = [name_from_action(action, args) for action, args in plan]
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return plan, cost
##################################################
# TODO: can structure these subproblems prioritizing depth rather than width
# TODO: reconcile shared objects on each level
# Each operator in the hierarchy is a legal "operator" that may need to be refined
def abstrips_solve_from_task_sequential(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False,
hierarchy=[], subgoal_horizon=1, **kwargs):
# TODO: version that plans for each goal individually
# TODO: can reduce to goal serialization if binary flag for each subgoal
if not hierarchy:
return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
start_time = time()
plan, cost = None, INF
with Verbose(debug):
last_plan = None
for level in range(len(hierarchy) + 1):
local_sas_task = deepcopy(sas_task)
prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned
# The goal itself is effectively a subgoal
# Handle this subgoal horizon
subgoal_plan = [local_sas_task.goal.pairs[:]]
# TODO: do I want to consider the "subgoal action" as a real action?
if last_plan is not None:
subgoal_var = add_subgoals(local_sas_task, last_plan)
subgoal_plan = [[(subgoal_var, val)] for val in range(1,
local_sas_task.variables.ranges[subgoal_var], subgoal_horizon)] + subgoal_plan
hierarchy_horizon = min(hierarchy[level-1].horizon, len(subgoal_plan))
subgoal_plan = subgoal_plan[:hierarchy_horizon]
plan, cost = plan_subgoals(local_sas_task, subgoal_plan, temp_dir, **kwargs)
if (level == len(hierarchy)) or (plan is None):
# TODO: fall back on normal
# TODO: search in space of subgoals
break
last_plan = [name_from_action(action, args) for action, args in plan]
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
# TODO: record which level of abstraction each operator is at when returning
# TODO: return instantiated actions here rather than names (including pruned pre/eff)
return plan, cost
| 10,419 |
Python
| 42.416666 | 110 | 0.621173 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/advanced.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, get_conjunctive_parts, get_disjunctive_parts
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import substitute_expression
from hsr_tamp.pddlstream.language.fluent import get_predicate_map
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE, ConstraintStream
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import find_unique, get_mapping
UNIVERSAL_TO_CONDITIONAL = False
AUTOMATICALLY_NEGATE = True # TODO: fix Yang's bug
# TODO: AUTOMATICALLY_NEGATE = False can omit collisions
def get_predicates(expression):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return set()
if isinstance(expression, pddl.conditions.JunctorCondition) or \
isinstance(expression, pddl.conditions.QuantifiedCondition):
predicates = set()
for part in expression.parts:
predicates.update(get_predicates(part))
return predicates
if isinstance(expression, pddl.conditions.Literal):
return {expression.predicate}
raise ValueError(expression)
def universal_to_conditional(action):
import pddl
new_parts = []
unsatisfiable = fd_from_fact((UNSATISFIABLE,))
for quant in get_conjunctive_parts(action.precondition):
if isinstance(quant, pddl.UniversalCondition):
condition = quant.parts[0]
# TODO: normalize first?
if isinstance(condition, pddl.Disjunction) or isinstance(condition, pddl.Literal):
action.effects.append(pddl.Effect(quant.parameters, condition.negate(), unsatisfiable))
continue
new_parts.append(quant)
action.precondition = pddl.Conjunction(new_parts)
def process_conditional_effect(effect, negative_from_predicate):
import pddl
new_parts = []
stream_facts = []
for disjunctive in get_conjunctive_parts(effect.condition):
for literal in get_disjunctive_parts(disjunctive):
# TODO: assert only one disjunctive part
if isinstance(literal, pddl.Literal) and (literal.predicate in negative_from_predicate):
stream = negative_from_predicate[literal.predicate]
if not isinstance(stream, ConstraintStream):
new_parts.append(literal)
continue
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
stream_facts.append(fd_from_fact(substitute_expression(stream.stream_fact, mapping)))
# TODO: add the negated literal as precondition here?
else:
new_parts.append(literal)
return new_parts, stream_facts
def optimizer_conditional_effects(domain, externals):
import pddl
#from hsr_tamp.pddlstream.algorithms.scheduling.negative import get_negative_predicates
# TODO: extend this to predicates
if UNIVERSAL_TO_CONDITIONAL:
negative_streams = list(filter(lambda e: e.is_negated, externals))
else:
negative_streams = list(filter(lambda e: isinstance(e, ConstraintStream) and e.is_negated, externals))
negative_from_predicate = get_predicate_map(negative_streams)
if not negative_from_predicate:
return
for action in domain.actions:
universal_to_conditional(action)
new_effects = []
for effect in action.effects:
if effect.literal.predicate != UNSATISFIABLE:
new_effects.append(effect)
continue
new_parts, stream_facts = process_conditional_effect(effect, negative_from_predicate)
if not stream_facts:
new_effects.append(effect)
for stream_fact in stream_facts:
new_effects.append(pddl.Effect(effect.parameters, pddl.Conjunction(new_parts), stream_fact))
action.effects = new_effects
def enforce_simultaneous(domain, externals):
optimizer_conditional_effects(domain, externals)
axiom_predicates = set()
for axiom in domain.axioms:
axiom_predicates.update(get_predicates(axiom.condition))
for external in externals:
if isinstance(external, ConstraintStream) and not external.info.simultaneous:
#isinstance(external, ComponentStream) and not external.outputs
# Only need for ConstraintStream because VariableStream used in action args
# TODO: apply recursively to domain conditions?
predicates = {get_prefix(fact) for fact in external.certified}
if predicates & axiom_predicates:
external.info.simultaneous = True
##################################################
def get_domain_predicates(external):
return set(map(get_prefix, external.domain))
def get_certified_predicates(external):
if isinstance(external, Stream):
return set(map(get_prefix, external.certified))
if isinstance(external, Function):
return {get_prefix(external.head)}
raise ValueError(external)
def get_interacting_externals(externals):
external_pairs = set()
for external1 in externals:
for external2 in externals:
# TODO: handle case where no domain conditions
if get_certified_predicates(external1) & get_domain_predicates(external2):
# TODO: count intersection when arity of zero
external_pairs.add((external1, external2))
if external1.is_negated:
raise ValueError('Stream [{}] can certify [{}] and thus cannot be negated'.format(
external1.name, external2.name))
return external_pairs
def get_certifiers(externals):
certifiers = defaultdict(set)
for external in externals:
for predicate in get_certified_predicates(external):
certifiers[predicate].add(external)
return certifiers
def get_negated_predicates(domain):
# TODO: generalize to more complicated formulas and recursive axioms
import pddl
negated_action_preconditions = set()
for action in domain.actions:
for part in get_conjunctive_parts(action.precondition):
# TODO: at least check more complicated parts for usage
if isinstance(part, pddl.NegatedAtom):
negated_action_preconditions.add(part.predicate)
negated_predicates = set()
for axiom in domain.axioms:
if axiom.name not in negated_action_preconditions:
continue
for part in get_conjunctive_parts(axiom.condition):
if isinstance(part, pddl.NegatedAtom):
negated_predicates.add(part.predicate)
return negated_predicates
def automatically_negate_externals(domain, externals):
negated_predicates = get_negated_predicates(domain)
certifiers = get_certifiers(externals)
producers = {e1 for e1, _ in get_interacting_externals(externals)}
non_producers = set(externals) - producers
for external in non_producers:
#if external.is_fluent:
#external.num_opt_fns = 0 # Streams that can be evaluated at the end as tests
if isinstance(external, Stream) and not external.is_negated \
and external.is_test and not external.is_fluent and external.could_succeed() \
and all((predicate in negated_predicates) and (len(certifiers[predicate]) == 1)
for predicate in get_certified_predicates(external)):
# TODO: could instead only negate if in a negative axiom
external.info.negate = True
print('Setting negate={} for stream [{}]'.format(external.is_negated, external.name))
| 7,925 |
Python
| 43.52809 | 110 | 0.679117 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/instantiation.py
|
from collections import defaultdict, namedtuple, Sized
from heapq import heappush, heappop
from itertools import product
from hsr_tamp.pddlstream.algorithms.common import COMPLEXITY_OP
from hsr_tamp.pddlstream.algorithms.relation import compute_order, Relation, solve_satisfaction
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.language.conversion import is_atom, head_from_fact
from hsr_tamp.pddlstream.utils import safe_zip, HeapElement, safe_apply_mapping
USE_RELATION = True
# TODO: maybe store unit complexity here as well as a tiebreaker
Priority = namedtuple('Priority', ['complexity', 'num']) # num ensures FIFO
def is_instance(atom, schema):
return (atom.function == schema.function) and \
all(is_parameter(b) or (a == b)
for a, b in safe_zip(atom.args, schema.args))
def test_mapping(atoms1, atoms2):
mapping = {}
for a1, a2 in safe_zip(atoms1, atoms2):
assert a1.function == a2.function
for arg1, arg2 in safe_zip(a1.args, a2.args):
if mapping.get(arg1, arg2) == arg2:
mapping[arg1] = arg2
else:
return None
return mapping
##################################################
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.43.7049&rep=rep1&type=pdf
class Instantiator(Sized): # Dynamic Instantiator
def __init__(self, streams, evaluations={}, verbose=False):
# TODO: lazily instantiate upon demand
self.streams = streams
self.verbose = verbose
#self.streams_from_atom = defaultdict(list)
self.queue = []
self.num_pushes = 0 # shared between the queues
# TODO: rename atom to head in most places
self.complexity_from_atom = {}
self.atoms_from_domain = defaultdict(list)
for stream in self.streams:
if not stream.domain:
assert not stream.inputs
self.push_instance(stream.get_instance([]))
for atom, node in evaluations.items():
self.add_atom(atom, node.complexity)
# TODO: revisit deque and add functions to front
# TODO: record the stream instances or results?
#########################
def __len__(self):
return len(self.queue)
def compute_complexity(self, instance):
domain_complexity = COMPLEXITY_OP([self.complexity_from_atom[head_from_fact(f)]
for f in instance.get_domain()] + [0])
return domain_complexity + instance.external.get_complexity(instance.num_calls)
def push_instance(self, instance):
# TODO: flush stale priorities?
complexity = self.compute_complexity(instance)
priority = Priority(complexity, self.num_pushes)
heappush(self.queue, HeapElement(priority, instance))
self.num_pushes += 1
if self.verbose:
print(self.num_pushes, instance)
def pop_stream(self):
priority, instance = heappop(self.queue)
return instance
def min_complexity(self):
priority, _ = self.queue[0]
return priority.complexity
#########################
def _add_combinations(self, stream, atoms):
if not all(atoms):
return
domain = list(map(head_from_fact, stream.domain))
# Most constrained variable/atom to least constrained
for combo in product(*atoms):
mapping = test_mapping(domain, combo)
if mapping is not None:
input_objects = safe_apply_mapping(stream.inputs, mapping)
self.push_instance(stream.get_instance(input_objects))
def _add_combinations_relation(self, stream, atoms):
if not all(atoms):
return
# TODO: might be a bug here?
domain = list(map(head_from_fact, stream.domain))
# TODO: compute this first?
relations = [Relation(filter(is_parameter, domain[index].args),
[tuple(a for a, b in safe_zip(atom.args, domain[index].args)
if is_parameter(b)) for atom in atoms[index]])
for index in compute_order(domain, atoms)]
solution = solve_satisfaction(relations)
for element in solution.body:
mapping = solution.get_mapping(element)
input_objects = safe_apply_mapping(stream.inputs, mapping)
self.push_instance(stream.get_instance(input_objects))
def _add_new_instances(self, new_atom):
for s_idx, stream in enumerate(self.streams):
for d_idx, domain_fact in enumerate(stream.domain):
domain_atom = head_from_fact(domain_fact)
if is_instance(new_atom, domain_atom):
# TODO: handle domain constants more intelligently
self.atoms_from_domain[s_idx, d_idx].append(new_atom)
atoms = [self.atoms_from_domain[s_idx, d2_idx] if d_idx != d2_idx else [new_atom]
for d2_idx in range(len(stream.domain))]
if USE_RELATION:
self._add_combinations_relation(stream, atoms)
else:
self._add_combinations(stream, atoms)
def add_atom(self, atom, complexity):
if not is_atom(atom):
return False
head = atom.head
if head in self.complexity_from_atom:
assert self.complexity_from_atom[head] <= complexity
return False
self.complexity_from_atom[head] = complexity
self._add_new_instances(head)
return True
| 5,655 |
Python
| 40.588235 | 101 | 0.603537 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/disable_skeleton.py
|
from hsr_tamp.pddlstream.algorithms.downward import make_axiom
from hsr_tamp.pddlstream.algorithms.disabled import get_free_objects
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders, get_stream_plan_components
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
from hsr_tamp.pddlstream.language.conversion import get_args, substitute_expression
from hsr_tamp.pddlstream.language.object import OptimisticObject, UniqueOptValue
from hsr_tamp.pddlstream.utils import grow_component, adjacent_from_edges, incoming_from_edges, get_mapping, user_input, flatten
from collections import Counter
def increase_free_variables(stream_plan):
# TODO: could decrease the number of variables if a cluster is removed
free_objects = Counter(flatten(result.instance.input_objects for result in stream_plan))
for obj, num in free_objects.items():
# TODO: wait until the full plan has failed (accomplished through levels)
if isinstance(obj, OptimisticObject):
assert isinstance(obj.param, UniqueOptValue)
instance = obj.param.instance
instance.num_optimistic = max(instance.num_optimistic, num + 1)
def create_disable_axiom(external_plan, use_parameters=True):
# TODO: express constraint mutexes upfront
# TODO: investigate why use_parameters=True hurts satisfaction
# TODO: better mix optimization and sampling by determining a splitting point
# TODO: be careful about the shared objects as parameters
# TODO: need to block functions & predicates
stream_plan, _ = partition_external_plan(external_plan)
assert stream_plan
#component_plan = stream_plan
[unsatisfiable] = stream_plan[-1].get_unsatisfiable()
component_plan = list(flatten(r.get_components() for r in stream_plan[:-1])) + list(unsatisfiable)
increase_free_variables(component_plan)
#output_objects = get_free_objects(component_plan) if use_parameters else set()
constraints = [result.stream_fact for result in component_plan]
optimistic_objects = {o for f in constraints for o in get_args(f)
if isinstance(o, OptimisticObject)} # TODO: consider case when variables are free
#assert optimistic_objects <= output_objects
#free_objects = list(optimistic_objects & output_objects) # TODO: need to return all variables
free_objects = optimistic_objects
parameters = ['?p{}'.format(i) for i in range(len(free_objects))]
param_from_obj = get_mapping(free_objects, parameters)
preconditions = substitute_expression(constraints, param_from_obj)
effect = (UNSATISFIABLE,)
axiom = make_axiom(parameters, preconditions, effect)
#axiom.dump()
return axiom
def compute_failed_indices(skeleton):
failed_indices = set()
for binding in skeleton.root.post_order():
result = binding.result
if (result is not None) and result.instance.num_calls and (not result.instance.successful):
failed_indices.add(binding.index)
#assert not binding.children
return sorted(failed_indices)
def current_failed_cluster(binding):
assert 1 <= binding.visits
failed_result = binding.skeleton.stream_plan[binding.index]
successful_results = [result for i, result in enumerate(binding.skeleton.stream_plan)
if i not in binding.stream_indices]
stream_plan = successful_results + [failed_result]
partial_orders = get_partial_orders(stream_plan)
# All connected components
#return get_connected_components(stream_plan, partial_orders)
# Only the failed connected component
return [grow_component([failed_result], adjacent_from_edges(partial_orders))]
def current_failure_contributors(binding):
# Alternatively, find unsuccessful streams in cluster and add ancestors
assert (1 <= binding.visits) or binding.is_dominated()
failed_result = binding.skeleton.stream_plan[binding.index]
failed_indices = compute_failed_indices(binding.skeleton) # Use last index?
partial_orders = get_partial_orders(binding.skeleton.stream_plan)
incoming = incoming_from_edges(partial_orders)
failed_ancestors = grow_component([failed_result], incoming)
for index in reversed(failed_indices):
if index == binding.index:
continue
result = binding.skeleton.stream_plan[index]
ancestors = grow_component([result], incoming)
if ancestors & failed_ancestors:
failed_ancestors.update(ancestors)
return [failed_ancestors]
def extract_disabled_clusters(queue, full_cluster=False):
# TODO: include costs within clustering?
# What is goal is to be below a cost threshold?
# In satisfaction, no need because costs are fixed
# Make stream_facts for externals to prevent use of the same ones
# This ordering is why it's better to put likely to fail first
# Branch on the different possible binding outcomes
# TODO: consider a nonlinear version of this that evaluates out of order
# Need extra sampling effort to identify infeasible subsets
# Treat unevaluated optimistically, as in always satisfiable
# Need to keep streams with outputs to connect if downstream is infeasible
# TODO: prune streams that always have at least one success
# TODO: CSP identification of irreducible unsatisfiable subsets
# TODO: take into consideration if a stream is enumerated to mark as a hard failure
#clusters = set()
ordered_clusters = []
for skeleton in queue.skeletons:
# TODO: consider all up to the most progress
#cluster_plans = [skeleton.stream_plan]
cluster_plans = get_stream_plan_components(skeleton.stream_plan)
binding = skeleton.best_binding
if not binding.is_fully_bound:
# TODO: block if cost sensitive to possibly get cheaper solutions
cluster_plans = current_failed_cluster(binding) if full_cluster else current_failure_contributors(binding)
for cluster_plan in cluster_plans:
ordered_clusters.append(cluster_plan)
#clusters.add(frozenset(cluster_plan))
# TODO: could instead prune at this stage
return ordered_clusters
def create_disabled_axioms(queue, last_clusters=None, **kwargs):
clusters = extract_disabled_clusters(queue)
return [create_disable_axiom(cluster, **kwargs) for cluster in clusters]
| 6,468 |
Python
| 50.341269 | 128 | 0.726654 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/relation.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import INF, get_mapping
def compute_order(domain, atoms):
# Most constrained variable/atom to least constrained
# TODO: dynamically select the atom with the fewest options (minimize new additions)
# Operating on dual (select constraints rather than vars) because lower arity
order = []
parameters = set() # Include constants
for _ in range(len(domain)):
min_new = INF
min_index = None
for index in set(range(len(domain))) - set(order):
if set(filter(is_parameter, domain[index].args)) <= parameters:
min_new = 0
min_index = index
if len(atoms[index]) < min_new:
min_new = len(atoms[index])
min_index = index
order.append(min_index)
parameters.update(filter(is_parameter, domain[min_index].args))
return order
##################################################
# TODO: all solutions constraint satisfaction point of view: constraint propagation
# https://en.wikipedia.org/wiki/Local_consistency
# Cluster into components and then order?
class Relation(object):
def __init__(self, heading, body):
self.heading = tuple(heading)
self.body = list(body)
def get_mapping(self, element):
return get_mapping(self.heading, element)
def project_element(self, attributes, element):
value_from_attribute = self.get_mapping(element)
assert all(attr in value_from_attribute for attr in attributes)
return tuple(value_from_attribute[attr] for attr in attributes)
def get_conditional(self, inputs):
outputs = [attribute for attribute in self.heading if attribute not in inputs]
two_from_overlap = defaultdict(set)
for element in self.body:
key = self.project_element(inputs, element)
value = self.project_element(outputs, element)
two_from_overlap[key].add(value) # TODO: preserve ordering
# TODO: return a relation object?
return two_from_overlap
def subtract_attributes(self, attributes):
return tuple(attribute for attribute in self.heading if attribute not in attributes)
def dump(self):
print(self.heading)
for element in self.body:
print(element)
def __repr__(self):
return '|{}| x {}'.format(', '.join(map(str, self.heading)), len(self.body))
def overlapping_attributes(relation1, relation2):
return tuple(attribute for attribute in relation2.heading if attribute in relation1.heading)
def join(relation1, relation2):
# Alternatively, Cartesian product then filter
overlap = overlapping_attributes(relation1, relation2)
new_heading = relation1.heading + relation2.subtract_attributes(overlap)
new_body = []
two_from_overlap = relation2.get_conditional(overlap)
for element in relation1.body:
key = relation1.project_element(overlap, element)
for value in two_from_overlap[key]:
new_body.append(element + value)
return Relation(new_heading, new_body)
def solve_satisfaction(relations):
solution = Relation([], [tuple()])
for relation in relations:
solution = join(solution, relation)
return solution
| 3,364 |
Python
| 39.542168 | 96 | 0.662604 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/downward.py
|
from __future__ import print_function
import os
import re
import sys
import subprocess
from collections import namedtuple, defaultdict
from time import time
from hsr_tamp.pddlstream.language.constants import EQ, NOT, Head, Evaluation, get_prefix, get_args, OBJECT, TOTAL_COST, Action, Not
from hsr_tamp.pddlstream.language.conversion import is_atom, is_negated_atom, objects_from_evaluations, pddl_from_object, \
pddl_list_from_expression, obj_from_pddl
from hsr_tamp.pddlstream.utils import read, write, INF, clear_dir, get_file_path, MockSet, find_unique, int_ceil, \
safe_remove, safe_zip, elapsed_time
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
USE_CERBERUS = False
#CERBERUS_PATH = '/home/caelan/Programs/cerberus' # Check if this path exists
CERBERUS_PATH = '/home/caelan/Programs/fd-redblack-ipc2018' # Check if this path exists
# Does not support derived predicates
USE_FORBID = False
FORBID_PATH = '/Users/caelan/Programs/external/ForbidIterative'
# --planner topk,topq,topkq,diverse
FORBID_TEMPLATE = 'plan.py --planner topk --number-of-plans {num} --domain {domain} --problem {problem}'
FORBID_COMMAND = os.path.join(FORBID_PATH, FORBID_TEMPLATE)
assert not USE_CERBERUS or not USE_FORBID
# Does not support derived predicates
##################################################
filepath = os.path.abspath(__file__)
if ' ' in filepath:
raise RuntimeError('The path to pddlstream cannot include spaces')
def find_build(fd_path):
for release in ['release', 'release64', 'release32']: # TODO: list the directory
path = os.path.join(fd_path, 'builds/{}/'.format(release))
if os.path.exists(path):
return path
# TODO: could also just automatically compile
raise RuntimeError('Please compile FastDownward first [.../pddlstream$ ./downward/build.py]')
# TODO: check at runtime so users can use utils without FD
FD_PATH = get_file_path(__file__, '../../downward/')
#FD_PATH = get_file_path(__file__, '../../FastDownward/')
TRANSLATE_PATH = os.path.join(find_build(FD_PATH), 'bin/translate')
FD_BIN = os.path.join(find_build(CERBERUS_PATH if USE_CERBERUS else FD_PATH), 'bin')
DOMAIN_INPUT = 'domain.pddl'
PROBLEM_INPUT = 'problem.pddl'
TRANSLATE_FLAGS = [] #if USE_CERBERUS else ['--negative-axioms']
original_argv = sys.argv[:]
sys.argv = sys.argv[:1] + TRANSLATE_FLAGS + [DOMAIN_INPUT, PROBLEM_INPUT]
sys.path.append(TRANSLATE_PATH)
# TODO: max translate time
import pddl.f_expression
import pddl
import instantiate
import pddl_parser.lisp_parser
import normalize
import pddl_parser
from pddl_parser.parsing_functions import parse_domain_pddl, parse_task_pddl, \
parse_condition, check_for_duplicates
sys.argv = original_argv
TEMP_DIR = 'temp/'
TRANSLATE_OUTPUT = 'output.sas'
SEARCH_OUTPUT = 'sas_plan'
SEARCH_COMMAND = 'downward --internal-plan-file {} {} < {}'
INFINITY = 'infinity'
GOAL_NAME = '@goal' # @goal-reachable
INTERNAL_AXIOM = 'new-axiom' # @0
IDENTICAL = "identical" # lowercase is critical (!= instead?)
INTERNAL_PREDICATES = [EQ, IDENTICAL, INTERNAL_AXIOM]
##################################################
# TODO: cost_type=PLUSONE can lead to suboptimality but often doesn't in practice due to COST_SCALE
# TODO: modify parsing_functions to support multiple costs
# bound (int): exclusive depth bound on g-values. Cutoffs are always performed according to the real cost.
# (i.e. solutions must be strictly better than the bound)
HEURISTICS = ['add', 'blind', 'cea', 'ff', 'goalcount', 'hmax', 'lmcut'] # hm
TRANSFORMS = ['NORMAL', 'ONE', 'PLUSONE']
# TODO: move toward using this style
def Heuristic(heuristic='ff', cost_type='PLUSONE'):
return '--heuristic "h={heuristic}(transform=adapt_costs(cost_type={cost_type}))"'.format(
heuristic=heuristic, cost_type=cost_type)
def EagerWeightedAStar(heuristic='ff', weight=1, cost_type='PLUSONE'):
return '--search eager_wastar(evals=[h()], preferred=[], reopen_closed=true, boost=0, w={weight}, pruning=null(), ' \
'cost_type={cost_type}, bound=infinity, max_time=infinity, verbosity=normal)'.format(
weight=weight, cost_type=cost_type)
SEARCH_OPTIONS = {
# See FastDownward's documentation for more configurations
# http://www.fast-downward.org/Doc/Evaluator
# http://www.fast-downward.org/Doc/SearchEngine
# Optimal (when cost_type=NORMAL)
'dijkstra': '--heuristic "h=blind(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
#'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=PLUSONE))"'
# ' --search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"', # cost_type=NORMAL
'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=PLUSONE))"'
' --search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"', # cost_type=PLUSONE
'lmcut-astar': '--heuristic "h=lmcut(transform=adapt_costs(cost_type=PLUSONE))"'
' --search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
# Suboptimal
'ff-astar': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-eager': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([h],max_time=%s,bound=%s)"',
'ff-eager-pref': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([h],preferred=[h],max_time=%s,bound=%s)"',
'ff-lazy': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([h],preferred=[h],max_time=%s,bound=%s)"',
'goal-lazy': '--heuristic "h=goalcount(transform=no_transform())" '
'--search "lazy_greedy([h],randomize_successors=True,max_time=%s,bound=%s)"',
'add-random-lazy': '--heuristic "h=add(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([h],randomize_successors=True,max_time=%s,bound=%s)"',
'ff-eager-tiebreak': '--heuristic "h=ff(transform=no_transform())" '
'--search "eager(tiebreaking([h, g()]),reopen_closed=false,'
'cost_type=PLUSONE,max_time=%s,bound=%s, f_eval=sum([g(), h]))"', # preferred=[h],
'ff-lazy-tiebreak': '--heuristic "h=ff(transform=no_transform())" '
'--search "lazy(tiebreaking([h, g()]),reopen_closed=false,'
'randomize_successors=True,cost_type=PLUSONE,max_time=%s,bound=%s)"', # preferred=[h],
# TODO: eagerly evaluate goal count but lazily compute relaxed plan
'ff-ehc': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "ehc(h,preferred=[h],preferred_usage=RANK_PREFERRED_FIRST,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"',
# The key difference is that ehc resets the open list upon finding an improvement
# TODO: iterated search
}
# TODO: do I want to sort operators in FD hill-climbing search?
# TODO: greedily prioritize operators with less cost. Useful when prioritizing actions that have no stream cost
for w in range(1, 1+5):
SEARCH_OPTIONS.update({
# TODO: specify whether lazy or eager
'ff-wastar{w}'.format(w=w): '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=true,boost=100,w={w},'
'randomize_successors=false,preferred_successors_first=true,random_seed=-1,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
'cea-wastar{w}'.format(w=w): '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w={w},'
'randomize_successors=false,preferred_successors_first=true,random_seed=-1,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
# TODO: eager_wastar
# http://www.fast-downward.org/Doc/SearchEngine#Eager_weighted_A.2A_search
'ff-astar{w}'.format(w=w): '--evaluator "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager(alt([single(sum([g(), weight(h,{w})])),'
'single(sum([g(),weight(h,{w})]),pref_only=true)]),'
'preferred=[h],cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
})
if USE_CERBERUS:
# --internal-previous-portfolio-plans
#import imp
#plan_path = os.path.join(CERBERUS_PATH, 'plan.py')
#plan = imp.load_source('plan', plan_path)
sys.path.append(CERBERUS_PATH)
import importlib
mod = importlib.import_module("plan-agl") # plan | plan-agl | plan-cbo | plan-sat
#SEARCH_OPTIONS['cerberus'] = ' '.join(p.strip() for s in mod.config_string() for p in s.split('\n')) # .replace('\n', ' ')
SEARCH_OPTIONS['cerberus'] = ' '.join(s if s.startswith('--') else '"{}"'.format(s)
for s in mod.config_string())
# TODO: throw a warning if max_planner_time is met
DEFAULT_MAX_TIME = 30 # INF
DEFAULT_CONSERVATIVE_PLANNER = 'ff-astar'
DEFAULT_GREEDY_PLANNER = 'ff-astar2'
DEFAULT_PLANNER = DEFAULT_GREEDY_PLANNER
def print_search_options():
for i, (name, command) in enumerate(sorted(SEARCH_OPTIONS.items())):
print('\n{}) {}: {}'.format(i, name, command))
##################################################
# WARNING: overflow on h^add! Costs clamped to 100000000
MAX_FD_COST = 1e8
def round_cost(cost):
cost_scale = get_cost_scale()
return int(cost_scale * cost) / cost_scale
def get_cost_scale():
return pddl.f_expression.COST_SCALE
def set_cost_scale(cost_scale):
pddl.f_expression.COST_SCALE = cost_scale
def convert_value(value):
if value == INF:
return INFINITY
return int_ceil(value)
def scale_cost(cost):
if cost == INF:
return INF
return int_ceil(get_cost_scale() * float(cost))
def get_min_unit():
return 1. / get_cost_scale()
set_cost_scale(cost_scale=1e3) # TODO: make unit costs be equivalent to cost scale = 0
##################################################
def parse_lisp(lisp):
return pddl_parser.lisp_parser.parse_nested_list(lisp.splitlines())
# TODO: dynamically generate type_dict and predicate_dict
Domain = namedtuple('Domain', ['name', 'requirements', 'types', 'type_dict', 'constants',
'predicates', 'predicate_dict', 'functions', 'actions', 'axioms', 'pddl'])
def parse_sequential_domain(domain_pddl):
if isinstance(domain_pddl, Domain):
return domain_pddl
args = list(parse_domain_pddl(parse_lisp(domain_pddl))) + [domain_pddl]
domain = Domain(*args)
# for action in domain.actions:
# if (action.cost is not None) and isinstance(action.cost, pddl.Increase) and isinstance(action.cost.expression, pddl.NumericConstant):
# action.cost.expression.value = scale_cost(action.cost.expression.value)
return domain
Problem = namedtuple('Problem', ['task_name', 'task_domain_name', 'task_requirements',
'objects', 'init', 'goal', 'use_metric', 'pddl'])
def parse_problem(domain, problem_pddl):
if isinstance(problem_pddl, Problem):
return problem_pddl
args = list(parse_task_pddl(parse_lisp(problem_pddl), domain.type_dict, domain.predicate_dict)) + [problem_pddl]
return Problem(*args)
#def parse_action(lisp_list):
# action = [':action', 'test'
# ':parameters', [],
# ':precondition', [],
# ':effect', []]
# parse_action(action)
# pddl_parser.parsing_functions.parse_action(lisp_list, [], {})
# return pddl.Action
##################################################
# fact -> evaluation -> fd
def fd_from_fact(fact):
# TODO: convert to evaluation?
prefix = get_prefix(fact)
if prefix == NOT:
return fd_from_fact(fact[1]).negate()
#if prefix == EQ:
# _, head, value = fact
# predicate = get_prefix(head)
# args = list(map(pddl_from_object, get_args(head)))
# fluent = pddl.f_expression.PrimitiveNumericExpression(symbol=predicate, args=args)
# expression = pddl.f_expression.NumericConstant(value)
# return pddl.f_expression.Assign(fluent, expression)
args = list(map(pddl_from_object, get_args(fact)))
return pddl.Atom(prefix, args)
def fact_from_fd(fd):
assert(isinstance(fd, pddl.Literal))
atom = (fd.predicate,) + tuple(map(obj_from_pddl, fd.args))
return Not(atom) if fd.negated else atom
def evaluation_from_fd(fd):
if isinstance(fd, pddl.Literal):
head = Head(fd.predicate, tuple(map(obj_from_pddl, fd.args)))
return Evaluation(head, not fd.negated)
if isinstance(fd, pddl.f_expression.Assign):
head = Head(fd.fluent.symbol, tuple(map(obj_from_pddl, fd.fluent.args)))
return Evaluation(head, float(fd.expression.value) / get_cost_scale()) # Need to be careful due to rounding
raise ValueError(fd)
def fd_from_evaluation(evaluation):
name = evaluation.head.function
args = tuple(map(pddl_from_object, evaluation.head.args))
if is_atom(evaluation):
return pddl.Atom(name, args)
elif is_negated_atom(evaluation):
return pddl.NegatedAtom(name, args)
fluent = pddl.f_expression.PrimitiveNumericExpression(symbol=name, args=args)
expression = pddl.f_expression.NumericConstant(evaluation.value)
return pddl.f_expression.Assign(fluent, expression)
def fd_from_evaluations(evaluations):
return [fd_from_evaluation(e) for e in evaluations if not is_negated_atom(e)]
##################################################
def parse_goal(goal_exp, domain):
#try:
# pass
#except SystemExit as e:
# return False
return parse_condition(pddl_list_from_expression(goal_exp),
domain.type_dict, domain.predicate_dict).simplified()
def get_problem(evaluations, goal_exp, domain, unit_costs=False):
objects = objects_from_evaluations(evaluations)
typed_objects = list({make_object(pddl_from_object(obj)) for obj in objects} - set(domain.constants))
# TODO: this doesn't include =
init = fd_from_evaluations(evaluations)
goal = pddl.Truth() if goal_exp is None else parse_goal(goal_exp, domain)
#print('{} objects and {} atoms'.format(len(objects), len(init)))
problem_pddl = None
if USE_FORBID:
problem_pddl = get_problem_pddl(evaluations, goal_exp, domain.pddl, temporal=False)
write_pddl(domain.pddl, problem_pddl, temp_dir=TEMP_DIR)
return Problem(task_name=domain.name, task_domain_name=domain.name,
objects=sorted(typed_objects, key=lambda o: o.name),
task_requirements=pddl.tasks.Requirements([]), init=init, goal=goal,
use_metric=not unit_costs, pddl=problem_pddl)
def get_identical_atoms(objects):
# TODO: optimistically evaluate (not (= ?o1 ?o2))
init = []
for fd_obj in objects:
obj = obj_from_pddl(fd_obj.name)
if obj.is_unique():
init.append(pddl.Atom(IDENTICAL, (fd_obj.name, fd_obj.name)))
else:
assert obj.is_shared()
return init
def task_from_domain_problem(domain, problem, add_identical=True):
# TODO: prune evaluation that aren't needed in actions
#domain_name, domain_requirements, types, type_dict, constants, \
# predicates, predicate_dict, functions, actions, axioms = domain
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric, problem_pddl = problem
assert domain.name == task_domain_name
requirements = pddl.Requirements(sorted(set(domain.requirements.requirements +
task_requirements.requirements)))
objects = domain.constants + objects
check_for_duplicates([o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init.extend(pddl.Atom(EQ, (obj.name, obj.name)) for obj in objects)
if add_identical:
init.extend(get_identical_atoms(objects))
#print('{} objects and {} atoms'.format(len(objects), len(init)))
task = pddl.Task(domain.name, task_name, requirements, domain.types, objects,
domain.predicates, domain.functions, init, goal,
domain.actions, domain.axioms, use_metric)
normalize.normalize(task)
# task.add_axiom
return task
##################################################
def get_derived_predicates(axioms):
axioms_from_name = defaultdict(list)
for axiom in axioms:
axioms_from_name[axiom.name].append(axiom)
return axioms_from_name
def get_fluents(domain):
fluent_predicates = set(get_derived_predicates(domain.axioms))
for action in domain.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
return fluent_predicates
def is_literal(condition):
return isinstance(condition, pddl.Literal)
def get_literals(condition):
if is_literal(condition):
return [condition]
if isinstance(condition, pddl.Truth):
return []
if isinstance(condition, pddl.Conjunction):
literals = []
for c in condition.parts:
literals.extend(get_literals(c))
return literals
raise ValueError(condition)
def get_conjunctive_parts(condition):
# TODO: apply recursively
return condition.parts if isinstance(condition, pddl.Conjunction) else [condition]
def get_disjunctive_parts(condition):
return condition.parts if isinstance(condition, pddl.Disjunction) else [condition]
##################################################
def normalize_domain_goal(domain, goal_exp):
evaluations = []
problem = get_problem(evaluations, goal_exp, domain, unit_costs=False)
task = task_from_domain_problem(domain, problem)
normalize.normalize(task)
return task
def run_search(temp_dir, planner=DEFAULT_PLANNER, max_planner_time=DEFAULT_MAX_TIME,
max_cost=INF, debug=False):
"""
Runs FastDownward's search phase on translated SAS+ problem TRANSLATE_OUTPUT
:param temp_dir: the directory for temporary FastDownward input and output files
:param planner: a keyword for the FastDownward search configuration in SEARCH_OPTIONS
:param max_planner_time: the maximum runtime of FastDownward
:param max_cost: the maximum FastDownward plan cost
:param debug: If True, print the FastDownward search output
:return: a tuple (plan, cost) where plan is a sequence of PDDL actions
(or None) and cost is the cost of the plan (INF if no plan)
"""
max_time = convert_value(max_planner_time)
max_cost = convert_value(scale_cost(max_cost))
start_time = time()
search = os.path.abspath(os.path.join(FD_BIN, SEARCH_COMMAND))
if planner == 'cerberus':
planner_config = SEARCH_OPTIONS[planner] # Check if max_time, max_cost exist
else:
planner_config = SEARCH_OPTIONS[planner] % (max_time, max_cost)
temp_dir = os.path.abspath(temp_dir)
command = search.format(os.path.join(temp_dir, SEARCH_OUTPUT), planner_config,
os.path.join(temp_dir, TRANSLATE_OUTPUT))
domain_path = os.path.abspath(os.path.join(temp_dir, DOMAIN_INPUT))
problem_path = os.path.abspath(os.path.join(temp_dir, PROBLEM_INPUT))
if USE_FORBID:
command = FORBID_COMMAND.format(num=2, domain=domain_path, problem=problem_path)
if debug:
print('Search command:', command)
# os.popen is deprecated
# run, call, check_call, check_output
#with subprocess.Popen(command.split(), stdout=subprocess.PIPE, shell=True, cwd=None) as proc:
# output = proc.stdout.read()
# CalledProcessError
#try:
# output = subprocess.check_output(command, shell=True, cwd=None) #, timeout=None)
#except subprocess.CalledProcessError as e:
# print(e)
#temp_path = temp_dir
temp_path = os.path.join(os.getcwd(), TEMP_DIR) # TODO: temp dir?
for filename in os.listdir(temp_path):
if filename.startswith(SEARCH_OUTPUT):
safe_remove(os.path.join(temp_path, filename))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, cwd=None, close_fds=True)
output, error = proc.communicate()
#if proc.returncode not in [0, 12]: # Good: [0, 12] | Bad: [127]
# raise RuntimeError(proc.returncode)
if USE_FORBID:
for filename in os.listdir(FORBID_PATH):
if filename.startswith(SEARCH_OUTPUT):
os.rename(os.path.join(FORBID_PATH, filename), os.path.join(temp_path, filename))
if debug:
print(output.decode(encoding='UTF-8')[:-1])
print('Search runtime: {:.3f}'.format(elapsed_time(start_time)))
plan_files = sorted(f for f in os.listdir(temp_path) if f.startswith(SEARCH_OUTPUT))
print('Plans:', plan_files)
return parse_solutions(temp_path, plan_files)
##################################################
def parse_action(line):
entries = line.strip('( )').split(' ')
name = entries[0]
args = tuple(entries[1:])
return Action(name, args)
def parse_solution(solution):
#action_regex = r'\((\w+(\s+\w+)\)' # TODO: regex
cost = INF
if solution is None:
return None, cost
cost_regex = r'cost\s*=\s*(\d+)'
matches = re.findall(cost_regex, solution)
if matches:
cost = float(matches[0]) / get_cost_scale()
# TODO: recover the actual cost of the plan from the evaluations
lines = solution.split('\n')[:-2] # Last line is newline, second to last is cost
plan = list(map(parse_action, lines))
return plan, cost
def parse_solutions(temp_path, plan_files):
# TODO: return multiple solutions for focused
best_plan, best_cost = None, INF
for plan_file in plan_files:
solution = read(os.path.join(temp_path, plan_file))
plan, cost = parse_solution(solution)
if cost < best_cost:
best_plan, best_cost = plan, cost
return best_plan, best_cost
def write_pddl(domain_pddl=None, problem_pddl=None, temp_dir=TEMP_DIR):
clear_dir(temp_dir)
domain_path = os.path.join(temp_dir, DOMAIN_INPUT)
if domain_pddl is not None:
write(domain_path, domain_pddl)
problem_path = os.path.join(temp_dir, PROBLEM_INPUT)
if problem_pddl is not None:
write(problem_path, problem_pddl)
return domain_path, problem_path
##################################################
def literal_holds(state, literal):
#return (literal in state) != literal.negated
return (literal.positive() in state) != literal.negated
def conditions_hold(state, conditions):
return all(literal_holds(state, cond) for cond in conditions)
def get_precondition(operator):
if isinstance(operator, pddl.Action) or isinstance(operator, pddl.PropositionalAction):
return operator.precondition
elif isinstance(operator, pddl.Axiom) or isinstance(operator, pddl.PropositionalAxiom):
return operator.condition
raise ValueError(operator)
def get_conditional_effects(operator):
if isinstance(operator, pddl.PropositionalAction):
return [(cond, effect.negate()) for cond, effect in operator.del_effects] + \
[(cond, effect) for cond, effect in operator.add_effects]
elif isinstance(operator, pddl.PropositionalAxiom):
return [([], operator.effect)]
raise ValueError(operator)
def get_effects(operator):
# TODO: conditional effects
return [effect for _, effect in get_conditional_effects(operator)]
def is_applicable(state, action):
return conditions_hold(state, get_precondition(action))
def apply_action(state, action):
assert(isinstance(action, pddl.PropositionalAction))
# TODO: signed literals
# TODO: relaxed_apply_action
for conditions, effect in action.del_effects:
if conditions_hold(state, conditions):
state.discard(effect)
for conditions, effect in action.add_effects:
if conditions_hold(state, conditions):
state.add(effect)
def apply_axiom(state, axiom):
assert(isinstance(state, pddl.PropositionalAxiom))
state.add(axiom.effect)
def is_valid_plan(initial_state, plan): #, goal):
state = set(initial_state)
for action in plan:
if not is_applicable(state, action):
return False
apply_action(state, action)
return True
#def apply_lifted_action(state, action):
# assert(isinstance(state, pddl.Action))
# assert(not action.parameters)
# for effect in state.effects:
# assert(not effect.parameters)
def plan_cost(plan):
cost = 0
for action in plan:
cost += action.cost
return cost
def substitute_derived(axiom_plan, action_instance):
# TODO: what if the propositional axiom has conditional derived
axiom_pre = {p for ax in axiom_plan for p in ax.condition}
axiom_eff = {ax.effect for ax in axiom_plan}
action_instance.precondition = list((set(action_instance.precondition) | axiom_pre) - axiom_eff)
##################################################
def get_function_assignments(task):
return {f.fluent: f.expression for f in task.init
if isinstance(f, pddl.f_expression.FunctionAssignment)}
def get_action_instances(task, action_plan):
type_to_objects = instantiate.get_objects_by_type(task.objects, task.types)
function_assignments = get_function_assignments(task)
predicate_to_atoms = instantiate.get_atoms_by_predicate(task.init)
fluent_facts = MockSet()
init_facts = set()
action_instances = []
for name, objects in action_plan:
# TODO: what if more than one action of the same name due to normalization?
# Normalized actions have same effects, so I just have to pick one
# TODO: conditional effects and internal parameters
action = find_unique(lambda a: a.name == name, task.actions)
args = list(map(pddl_from_object, objects))
variable_mapping = {p.name: a for p, a in safe_zip(action.parameters, args)}
instance = action.instantiate(variable_mapping, init_facts, fluent_facts, type_to_objects,
task.use_min_cost_metric, function_assignments, predicate_to_atoms)
assert (instance is not None)
action_instances.append(instance)
return action_instances
##################################################
def add_preimage_condition(condition, preimage, i):
for literal in condition:
#preimage[literal] = preimage.get(literal, set()) | {i}
preimage.setdefault(literal, set()).add(i)
#preimage.update(condition)
def add_preimage_effect(effect, preimage):
preimage.pop(effect, None)
#if effect in preimage:
# # Fluent effects kept, static dropped
# preimage.remove(effect)
def has_conditional_effects(action_instance):
for conditions, effect in (action_instance.add_effects + action_instance.del_effects):
if conditions:
return True
return False
def action_preimage(action, preimage, i):
for conditions, effect in (action.add_effects + action.del_effects):
assert(not conditions)
# TODO: can later select which conditional effects are used
# TODO: might need to truely decide whether one should hold or not for a preimage
# Maybe I should do that here
add_preimage_effect(effect, preimage)
add_preimage_condition(action.precondition, preimage, i)
def axiom_preimage(axiom, preimage, i):
add_preimage_effect(axiom.effect, preimage)
add_preimage_condition(axiom.condition, preimage, i)
def plan_preimage(combined_plan, goal=[]):
#preimage = set(goal)
action_plan = [action for action in combined_plan if isinstance(action, pddl.PropositionalAction)]
step = len(action_plan)
preimage = {condition: {step} for condition in goal}
for operator in reversed(combined_plan):
if isinstance(operator, pddl.PropositionalAction):
step -= 1
action_preimage(operator, preimage, step)
elif isinstance(operator, pddl.PropositionalAxiom):
axiom_preimage(operator, preimage, step)
else:
raise ValueError(operator)
return preimage
##################################################
def add_predicate(domain, predicate):
if predicate.name in domain.predicate_dict:
return False
domain.predicates.append(predicate)
domain.predicate_dict[predicate.name] = predicate
return True
def make_object(obj, type=OBJECT):
return pddl.TypedObject(obj, type)
def make_parameters(parameters, **kwargs):
return tuple(make_object(p, **kwargs) for p in parameters)
def make_predicate(name, parameters):
return pddl.Predicate(name, make_parameters(parameters))
def make_preconditions(preconditions):
return pddl.Conjunction(list(map(fd_from_fact, preconditions)))
def make_effects(effects):
return [pddl.Effect(parameters=[], condition=pddl.Truth(),
literal=fd_from_fact(fact)) for fact in effects]
def make_cost(cost):
if cost is None:
return cost
fluent = pddl.PrimitiveNumericExpression(symbol=TOTAL_COST, args=[])
try:
expression = pddl.NumericConstant(cost)
except TypeError:
expression = pddl.PrimitiveNumericExpression(
symbol=get_prefix(cost), args=list(map(pddl_from_object, get_args(cost))))
return pddl.Increase(fluent=fluent, expression=expression)
def has_costs(domain):
for action in domain.actions:
if (action.cost is not None) or (action.cost == 0):
return True
return False
def set_unit_costs(domain):
# Cost of None becomes zero if metric = True
#set_cost_scale(1)
for action in domain.actions:
action.cost = make_cost(1)
def make_action(name, parameters, preconditions, effects, cost=None):
# Usually all parameters are external
return pddl.Action(name=name,
parameters=make_parameters(parameters),
num_external_parameters=len(parameters),
precondition=make_preconditions(preconditions),
effects=make_effects(effects),
cost=make_cost(cost))
def make_axiom(parameters, preconditions, derived):
predicate = get_prefix(derived)
external_parameters = list(get_args(derived))
internal_parameters = [p for p in parameters if p not in external_parameters]
parameters = external_parameters + internal_parameters
return pddl.Axiom(name=predicate,
parameters=make_parameters(parameters),
num_external_parameters=len(external_parameters),
condition=make_preconditions(preconditions))
def make_domain(constants=[], predicates=[], functions=[], actions=[], axioms=[]):
types = [pddl.Type(OBJECT)]
pddl_parser.parsing_functions.set_supertypes(types)
return Domain(name='', requirements=pddl.Requirements([]),
types=types, type_dict={ty.name: ty for ty in types}, constants=constants,
predicates=predicates, predicate_dict={p.name: p for p in predicates},
functions=functions, actions=actions, axioms=axioms, pddl=None)
def pddl_from_instance(instance):
action = instance.action
args = [instance.var_mapping[p.name]
for p in action.parameters[:action.num_external_parameters]]
return Action(action.name, args)
| 31,750 |
Python
| 40.887863 | 142 | 0.652409 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.