file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1004/landmark_parser.py
|
#! /usr/bin/env python
import re
from lab.parser import Parser
parser = Parser()
parser.add_pattern(
"lmgraph_generation_time",
r"Landmark graph generation time: (.+)s",
type=float)
parser.add_pattern(
"landmarks",
r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.",
type=int)
parser.add_pattern(
"landmarks_disjunctive",
r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.",
type=int)
parser.add_pattern(
"landmarks_conjunctive",
r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.",
type=int)
parser.add_pattern(
"orderings",
r"Landmark graph contains (\d+) orderings.",
type=int)
parser.parse()
| 802 |
Python
| 25.766666 | 102 | 0.678304 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1004/v3-v4-optimal.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
from common_setup import IssueConfig, IssueExperiment
import os
from lab.reports import Attribute
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
def make_comparison_table():
report = common_setup.ComparativeReport(
algorithm_pairs=[
("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v3-seq-opt-bjolp"),
("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"),
("issue1004-v3-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"),
("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v3-seq-opt-bjolp-opt"),
("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"),
("issue1004-v3-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"),
], attributes=ATTRIBUTES,
)
outfile = os.path.join(
exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format)
)
report(exp.eval_dir, outfile)
exp.add_report(report)
REVISIONS = [
"issue1004-base-with-stats",
"issue1004-v3",
"issue1004-v4",
]
CONFIGS = [
common_setup.IssueConfig("seq-opt-bjolp", [],
driver_options=["--alias", "seq-opt-bjolp"]),
common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]),
]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REPO = os.environ["DOWNWARD_REPO"]
if common_setup.is_running_on_cluster():
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"],
)
else:
SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=2)
exp = common_setup.IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser("landmark_parser.py")
ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
Attribute("landmarks", min_wins=False),
Attribute("landmarks_disjunctive", min_wins=False),
Attribute("landmarks_conjunctive", min_wins=False),
Attribute("orderings", min_wins=False),
Attribute("lmgraph_generation_time"),
]
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_step("comparison table", make_comparison_table)
exp.add_parse_again_step()
exp.run_steps()
| 2,750 |
Python
| 30.988372 | 191 | 0.679636 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue736/v1.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab import tools
from downward.reports.compare import ComparativeReport
from downward.reports import PlanningReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue736-base", "issue736-v1"]
CONFIGS = [
IssueConfig(
"translate-only",
[],
driver_options=["--translate"])
]
SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE)
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
del exp.commands["parse-search"]
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource("translator_additional_parser", "translator_additional_parser.py", dest="translator_additional_parser.py")
exp.add_command("translator_additional_parser", ["{translator_additional_parser}"])
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_xz_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.run_steps()
| 1,887 |
Python
| 29.451612 | 123 | 0.703763 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue633/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
configs = [
IssueConfig(
"cegar-10K-original",
["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]),
IssueConfig(
"cegar-10K-landmarks-goals",
["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=10000,max_time=infinity))"]),
IssueConfig(
"cegar-900s-landmarks-goals",
["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=infinity,max_time=900))"]),
]
exp = IssueExperiment(
revisions=["issue633-base", "issue633-v1"],
configs=configs,
suite=suites.suite_optimal_with_ipc11(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_comparison_table_step()
exp()
| 862 |
Python
| 25.968749 | 106 | 0.647332 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue638/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"num_sga_patterns",
"Found (\d+) SGA patterns.",
required=False,
type=int)
self.add_pattern(
"num_interesting_patterns",
"Found (\d+) interesting patterns.",
required=False,
type=int)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 562 |
Python
| 21.519999 | 48 | 0.52847 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue638/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue638-base", "issue638-v1"]
CONFIGS = [
IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)])
for heuristic in [
"cpdbs(patterns=systematic(3), dominance_pruning=true)",
"cpdbs(patterns=systematic(4), dominance_pruning=true)",
"operatorcounting([pho_constraints(patterns=systematic(3))])",
"operatorcounting([pho_constraints(patterns=systematic(4))])",
]
]
sys.path.append(BENCHMARKS_DIR)
import suites
SUITE = suites.suite_optimal_strips()
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command("parser", ["custom-parser.py"])
exp.add_comparison_table_step(
attributes=exp.DEFAULT_TABLE_ATTRIBUTES +
["num_sga_patterns", "num_interesting_patterns"])
exp.add_scatter_plot_step(attributes=["total_time"])
exp()
| 1,355 |
Python
| 27.249999 | 71 | 0.709963 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue739/v2-search.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-base", "issue739-v2"]
CONFIGS = [
IssueConfig('search-time-limit', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '20s']),
IssueConfig('search-memory-limit', ['--search', 'astar(blind())'], driver_options=['--search-memory-limit', '100M']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('exit_code_converter_parser', 'exit-code-converter-parser.py', dest='exit-code-converter-parser.py')
exp.add_command('exit-code-converter-parser', ['{exit_code_converter_parser}'])
exp.add_comparison_table_step()
exp.run_steps()
| 1,332 |
Python
| 35.027026 | 121 | 0.733483 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue739/exit-code-converter-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
LEGACY_TO_NEW_EXIT_CODES = {
'critical-error': 'search-critical-error',
'input-error': 'search-input-error',
'unsupported-feature-requested': 'search-unsupported',
'unsolvable': 'search-unsolvable',
'incomplete-search-found-no-plan': 'search-unsolvable-incomplete',
'out-of-memory': 'search-out-of-memory',
'timeout': 'search-out-of-time',
'timeout-and-out-of-memory': 'search-out-of-memory-and-time',
}
def convert_legacy_to_new_exit_codes(content, props):
error = props['error']
if error in LEGACY_TO_NEW_EXIT_CODES:
props['error'] = LEGACY_TO_NEW_EXIT_CODES[error]
parser.add_function(convert_legacy_to_new_exit_codes)
parser.parse()
| 762 |
Python
| 28.346153 | 70 | 0.687664 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue667/v1-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('sccs-top-dfp-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_absolute_report_step(name='issue667-v1-abp',filter_config=[
'%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v1-pba',filter_config=[
'%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v2-abp',filter_config=[
'%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2',
])
exp.add_absolute_report_step(name='issue667-v2-pba',filter_config=[
'%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2',
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-abp',compared_configs=[
('%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2'),
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-pba',compared_configs=[
('%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2'),
])
exp.add_absolute_report_step(name='issue667-v1-paper',filter_config=[
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v2-paper',filter_config=[
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2',
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-paper',compared_configs=[
('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1', '%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2'),
])
#if matplotlib:
#for attribute in ["memory", "total_time"]:
#for config in configs:
#exp.add_report(
#RelativeScatterPlotReport(
#attributes=[attribute],
#filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
#get_category=lambda run1, run2: run1.get("domain"),
#),
#outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
#)
exp()
main(revisions=['issue667-v1', 'issue667-v2'])
| 18,318 |
Python
| 93.917098 | 481 | 0.694563 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue995/landmark_parser.py
|
import re
from lab.parser import Parser
class BottomUpParser(Parser):
def __init__(self):
super().__init__()
def add_bottom_up_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""):
def search_from_bottom(content, props):
reversed_content = "\n".join(reversed(content.splitlines()))
match = re.search(regex, reversed_content)
if required and not match:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
if match:
props[name] = type(match.group(1))
self.add_function(search_from_bottom, file=file)
parser = BottomUpParser()
parser.add_bottom_up_pattern("landmarks", r"Discovered (\d+) landmarks")
parser.add_bottom_up_pattern("conj_landmarks", r"(\d+) are conjunctive")
parser.add_bottom_up_pattern("disj_landmarks", r"(\d+) are disjunctive")
parser.add_bottom_up_pattern("edges", r"(\d+) edges")
parser.add_bottom_up_pattern("landmark_generation_time", r"Landmarks generation time: (.+)s",type=float)
parser.parse()
| 1,086 |
Python
| 35.233332 | 108 | 0.64825 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue995/v1-satisficing.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue995-base", "issue995-v1"]
CONFIGS = [
IssueConfig("lama-first", [],
driver_options=["--alias", "lama-first"]),
IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=['export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib', "DOWNWARD_BENCHMARKS"],
)
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=2)
exp = common_setup.IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(os.path.join(DIR, "landmark_parser.py"))
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
"landmarks",
"edges"
]
exp.add_comparison_table_step(attributes=ATTRIBUTES)
exp.add_parse_again_step()
exp.run_steps()
| 2,341 |
Python
| 27.560975 | 440 | 0.692866 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue995/v2-optimal.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue995-base", "issue995-v2"]
CONFIGS = [
IssueConfig("seq-opt-bjolp", [],
driver_options=["--alias", "seq-opt-bjolp"]),
IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
IssueConfig("seq-opt-bjolp-optimal", ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"],
)
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=2)
exp = common_setup.IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(os.path.join(DIR, "landmark_parser.py"))
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_parse_again_step()
ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
"landmarks",
"edges",
"landmark_generation_time"
]
exp.add_comparison_table_step(attributes=ATTRIBUTES)
exp.add_scatter_plot_step(attributes=['search_time'])
exp.add_scatter_plot_step(attributes=['search_time'], relative=True)
exp.run_steps()
| 2,479 |
Python
| 26.555555 | 90 | 0.667608 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue732/v7.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, finite_sum
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = [
"issue732-{rev}".format(**locals())
for rev in ["base", "v1", "v2", "v3", "v4", "v5", "v6", "v7"]]
BUILDS = ["release32"]
SEARCHES = [
("astar-inf", ["--search", "astar(const(infinity))"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
config,
build_options=[build],
driver_options=["--build", build])
for nick, config in SEARCHES
for build in BUILDS
]
SUITE = set(
common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE)
ENVIRONMENT = BaselSlurmEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
Attribute("sg_construction_time", functions=[finite_sum], min_wins=True),
Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True),
]
# Instead of comparing all revision pairs in separate reports, create a
# single report comparing neighboring revisions.
# exp.add_comparison_table_step(attributes=attributes)
compared_configs = []
for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]):
for config in CONFIGS:
config_nick = config.nick
compared_configs.append(
("{rev1}-{config_nick}".format(**locals()),
"{rev2}-{config_nick}".format(**locals()),
"Diff ({config_nick})".format(**locals())))
exp.add_report(
ComparativeReport(compared_configs, attributes=attributes),
name="compare-all-tags")
exp.run_steps()
| 2,261 |
Python
| 30.416666 | 80 | 0.682884 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue732/sg-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('sg_construction_time', 'time for root successor generation creation: (.+)s', type=float)
parser.add_pattern('sg_peak_mem_diff', 'peak memory difference for root successor generator creation: (\d+) KB', type=int)
parser.parse()
| 322 |
Python
| 28.363634 | 122 | 0.736025 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue732/v7-debug.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, finite_sum
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue732-base", "issue732-v7"]
BUILDS = ["debug32", "release32"]
CONFIGS = [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--alias", "lama-first", "--build", build])
for build in BUILDS
]
SUITE = set(
common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE)
ENVIRONMENT = BaselSlurmEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
Attribute("sg_construction_time", functions=[finite_sum], min_wins=True),
Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True),
]
exp.add_comparison_table_step(attributes=attributes)
exp.run_steps()
| 1,487 |
Python
| 28.17647 | 80 | 0.710155 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue732/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, arithmetic_mean, finite_sum, geometric_mean
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue732-base", "issue732-v1"]
CONFIGS = [
IssueConfig(
'astar-inf',
['--search', 'astar(const(infinity))'],
),
IssueConfig(
'astar-blind',
['--search', 'astar(blind())'],
),
IssueConfig(
'debug-astar-inf',
['--search', 'astar(const(infinity))'],
build_options=["debug32"],
driver_options=["--build=debug32"],
),
IssueConfig(
'debug-astar-blind',
['--search', 'astar(blind())'],
build_options=["debug32"],
driver_options=["--build=debug32"],
),
]
SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) |
set(common_setup.DEFAULT_SATISFICING_SUITE)))
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
Attribute("sg_construction_time", functions=[finite_sum], min_wins=True),
Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True),
"error",
"run_dir",
]
exp.add_absolute_report_step(attributes=attributes)
exp.run_steps()
| 1,864 |
Python
| 27.257575 | 78 | 0.656116 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue555/issue555-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue555-base", "issue555-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_h2': [
'--search',
'astar(hm(2))'],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 467 |
Python
| 16.333333 | 41 | 0.608137 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue585/v3-new-configs.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.experiment import FastDownwardExperiment
from downward.reports.compare import CompareConfigsReport
import common_setup
REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V3 = 'issue585-v3'
SUITE = suites.suite_optimal_with_ipc11()
ALGORITHMS = {
'astar_cpdbs_genetic': (REV_V3, ['--search', 'astar(cpdbs(patterns=genetic()))']),
'astar_zopdbs_systematic': (REV_V3, ['--search', 'astar(zopdbs(patterns=systematic()))']),
'astar_zopdbs_hillclimbing': (REV_V3, ['--search', 'astar(zopdbs(patterns=hillclimbing()))']),
'astar_pho_genetic': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=genetic())]))']),
'astar_pho_combo': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=combo())]))']),
}
exp = common_setup.IssueExperiment(
revisions=[],
configs={},
suite=SUITE,
)
for nick, (rev, cmd) in ALGORITHMS.items():
exp.add_algorithm(nick, REPO, rev, cmd)
exp.add_absolute_report_step()
exp()
| 1,081 |
Python
| 30.823529 | 114 | 0.682701 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue585/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.experiment import FastDownwardExperiment
from downward.reports.compare import CompareConfigsReport
import common_setup
REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V1 = 'issue585-v1'
SUITE = ['gripper:prob01.pddl'] # suites.suite_optimal_with_ipc11()
ALGORITHMS = {
'astar_pdb_base': (REV_BASE, ['--search', 'astar(pdb())']),
'astar_pdb_v1': (REV_V1, ['--search', 'astar(pdb())']),
'astar_cpdbs_base': (REV_BASE, ['--search', 'astar(cpdbs())']),
'astar_cpdbs_v1': (REV_V1, ['--search', 'astar(cpdbs())']),
'astar_cpdbs_systematic_base': (REV_BASE, ['--search', 'astar(cpdbs_systematic())']),
'astar_cpdbs_systematic_v1': (REV_V1, ['--search', 'astar(cpdbs(patterns=systematic()))']),
'astar_zopdbs_base': (REV_BASE, ['--search', 'astar(zopdbs())']),
'astar_zopdbs_v1': (REV_V1, ['--search', 'astar(zopdbs())']),
'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']),
'astar_ipdb_v1': (REV_V1, ['--search', 'astar(ipdb())']),
'astar_ipdb_alias': (REV_V1, ['--search', 'astar(cpdbs(patterns=hillclimbing()))']),
'astar_gapdb_base': (REV_BASE, ['--search', 'astar(gapdb())']),
'astar_gapdb_v1': (REV_V1, ['--search', 'astar(zopdbs(patterns=genetic()))']),
'astar_pho_systematic_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_systematic()]))']),
'astar_pho_systematic_v1': (REV_V1, ['--search', 'astar(operatorcounting([pho_constraints(patterns=systematic())]))']),
'astar_pho_hillclimbing_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_ipdb()]))']),
'astar_pho_hillclimbing_v1': (REV_V1, ['--search', 'astar(operatorcounting([pho_constraints(patterns=hillclimbing())]))']),
}
COMPARED_ALGORITHMS = [
('astar_pdb_base', 'astar_pdb_v1', 'Diff (pdb)'),
('astar_cpdbs_base', 'astar_cpdbs_v1', 'Diff (cpdbs)'),
('astar_cpdbs_systematic_base', 'astar_cpdbs_systematic_v1', 'Diff (cpdbs_systematic)'),
('astar_zopdbs_base', 'astar_zopdbs_v1', 'Diff (zopdbs)'),
('astar_ipdb_base', 'astar_ipdb_v1', 'Diff (ipdb)'),
('astar_ipdb_v1', 'astar_ipdb_alias', 'Diff (ipdb_alias)'),
('astar_gapdb_base', 'astar_gapdb_v1', 'Diff (gapdb)'),
('astar_pho_systematic_base', 'astar_pho_systematic_v1', 'Diff (pho_systematic)'),
('astar_pho_hillclimbing_base', 'astar_pho_hillclimbing_v1', 'Diff (pho_hillclimbing)'),
]
exp = common_setup.IssueExperiment(
revisions=[],
configs={},
suite=SUITE,
)
for nick, (rev, cmd) in ALGORITHMS.items():
exp.add_algorithm(nick, REPO, rev, cmd)
exp.add_report(CompareConfigsReport(
COMPARED_ALGORITHMS,
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
))
exp()
| 2,701 |
Python
| 39.328358 | 124 | 0.660866 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue585/v3-rest.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.experiment import FastDownwardExperiment
from downward.reports.compare import CompareConfigsReport
from relativescatter import RelativeScatterPlotReport
import common_setup
REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V3 = 'issue585-v3'
SUITE = suites.suite_optimal_with_ipc11()
ALGORITHMS = {
'astar_pdb_base': (REV_BASE, ['--search', 'astar(pdb())']),
'astar_pdb_v3': (REV_V3, ['--search', 'astar(pdb())']),
'astar_cpdbs_base': (REV_BASE, ['--search', 'astar(cpdbs())']),
'astar_cpdbs_v3': (REV_V3, ['--search', 'astar(cpdbs())']),
'astar_cpdbs_systematic_base': (REV_BASE, ['--search', 'astar(cpdbs_systematic())']),
'astar_cpdbs_systematic_v3': (REV_V3, ['--search', 'astar(cpdbs(patterns=systematic()))']),
'astar_zopdbs_base': (REV_BASE, ['--search', 'astar(zopdbs())']),
'astar_zopdbs_v3': (REV_V3, ['--search', 'astar(zopdbs())']),
'astar_pho_systematic_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_systematic()]))']),
'astar_pho_systematic_v3': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=systematic())]))']),
}
COMPARED_ALGORITHMS = [
('astar_pdb_base', 'astar_pdb_v3', 'Diff (pdb)'),
('astar_cpdbs_base', 'astar_cpdbs_v3', 'Diff (cpdbs)'),
('astar_cpdbs_systematic_base', 'astar_cpdbs_systematic_v3', 'Diff (cpdbs_systematic)'),
('astar_zopdbs_base', 'astar_zopdbs_v3', 'Diff (zopdbs)'),
('astar_pho_systematic_base', 'astar_pho_systematic_v3', 'Diff (pho_systematic)'),
]
exp = common_setup.IssueExperiment(
revisions=[],
configs={},
suite=SUITE,
)
for nick, (rev, cmd) in ALGORITHMS.items():
exp.add_algorithm(nick, REPO, rev, cmd)
exp.add_report(CompareConfigsReport(
COMPARED_ALGORITHMS,
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
))
for c1, c2, _ in COMPARED_ALGORITHMS:
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=[c1, c2],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue585_%s_v3_total_time.png' % c1
)
exp()
| 2,241 |
Python
| 34.031249 | 123 | 0.645248 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue585/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.experiment import FastDownwardExperiment
from downward.reports.compare import CompareConfigsReport
from relativescatter import RelativeScatterPlotReport
import common_setup
REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V1 = 'issue585-v3'
SUITE = suites.suite_optimal_with_ipc11()
ALGORITHMS = {
'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']),
'astar_ipdb_v3': (REV_V1, ['--search', 'astar(ipdb())']),
'astar_gapdb_base': (REV_BASE, ['--search', 'astar(gapdb())']),
'astar_gapdb_v3': (REV_V1, ['--search', 'astar(zopdbs(patterns=genetic()))']),
}
COMPARED_ALGORITHMS = [
('astar_ipdb_base', 'astar_ipdb_v3', 'Diff (ipdb)'),
('astar_gapdb_base', 'astar_gapdb_v3', 'Diff (gapdb)'),
]
exp = common_setup.IssueExperiment(
revisions=[],
configs={},
suite=SUITE,
)
for nick, (rev, cmd) in ALGORITHMS.items():
exp.add_algorithm(nick, REPO, rev, cmd)
exp.add_report(CompareConfigsReport(
COMPARED_ALGORITHMS,
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
))
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["astar_ipdb_base", "astar_ipdb_v3"],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue585_ipdb_base_v3_total_time.png'
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["astar_gapdb_base", "astar_gapdb_v3"],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue585_gapdb_base_v3_total_time.png'
)
exp()
| 1,683 |
Python
| 27.066666 | 82 | 0.664884 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue585/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.experiment import FastDownwardExperiment
from downward.reports.compare import CompareConfigsReport
import common_setup
REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V1 = 'issue585-v2'
SUITE = suites.suite_optimal_with_ipc11()
ALGORITHMS = {
'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']),
'astar_ipdb_v2': (REV_V1, ['--search', 'astar(ipdb())']),
}
COMPARED_ALGORITHMS = [
('astar_ipdb_base', 'astar_ipdb_v2', 'Diff (ipdb)'),
]
exp = common_setup.IssueExperiment(
revisions=[],
configs={},
suite=SUITE,
)
for nick, (rev, cmd) in ALGORITHMS.items():
exp.add_algorithm(nick, REPO, rev, cmd)
exp.add_report(CompareConfigsReport(
COMPARED_ALGORITHMS,
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
))
exp()
| 888 |
Python
| 23.027026 | 68 | 0.688063 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue726/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue726-base", "issue726-v1"]
CONFIGS = [
IssueConfig("blind", ["--search", "astar(blind())"]),
IssueConfig("lmcut", ["--search", "astar(lmcut())"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("base", "v1")]:
for config_nick in ["blind", "lmcut"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue726-%s-%s" % (rev1, config_nick),
"issue726-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue726-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,537 |
Python
| 29.759999 | 81 | 0.650618 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue578/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue578-v1"]
CONFIGS = [
IssueConfig('cpdbs-hc900', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900)))']),
IssueConfig('cpdbs-hc900-dp30', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=30))']),
IssueConfig('cpdbs-hc900-dp60', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=60))']),
IssueConfig('cpdbs-hc900-dp300', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=300))']),
IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(patterns=systematic(2)))']),
IssueConfig('cpdbs-sys2-dp30', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=30))']),
IssueConfig('cpdbs-sys2-dp60', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=60))']),
IssueConfig('cpdbs-sys2-dp300', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=300))']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_resource("custom_parser", "custom-parser.py")
exp.add_command("run-custom-parser", ["{custom_parser}"])
exp.add_suite(BENCHMARKS_DIR, SUITE)
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
"dominance_pruning_failed",
"dominance_pruning_time",
"dominance_pruning_pruned_subsets",
"dominance_pruning_pruned_pdbs",
"pdb_collection_construction_time",
])
exp.add_fetcher('data/issue578-v1-more-configs-eval')
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
exp.run_steps()
| 2,298 |
Python
| 39.333333 | 135 | 0.735857 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue578/v1-more-configs.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue578-v1"]
CONFIGS = [
IssueConfig('cpdbs-sys2-dp500', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=500))']),
IssueConfig('cpdbs-sys2-dp700', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=700))']),
IssueConfig('cpdbs-sys2-dp900', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=900))']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_resource("custom_parser", "custom-parser.py")
exp.add_command("run-custom-parser", ["{custom_parser}"])
exp.add_suite(BENCHMARKS_DIR, SUITE)
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
"dominance_pruning_failed",
"dominance_pruning_time",
"dominance_pruning_pruned_subsets",
"dominance_pruning_pruned_pdbs",
"pdb_collection_construction_time",
])
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
exp.run_steps()
| 1,659 |
Python
| 32.199999 | 121 | 0.748041 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue694/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue694-v1-base", "issue694-v1"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
("lmcut", "astar(lmcut())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
ATTRIBUTES = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time",
"int_hash_set_load_factor", "int_hash_set_resizes"]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')])
exp.add_comparison_table_step(attributes=ATTRIBUTES)
for relative in [False, True]:
exp.add_scatter_plot_step(relative=relative, attributes=["memory", "total_time"])
exp.run_steps()
| 1,644 |
Python
| 28.374999 | 85 | 0.698905 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue694/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def main():
parser = Parser()
parser.add_pattern(
"int_hash_set_load_factor",
"Int hash set load factor: \d+/\d+ = (.+)",
required=False,
type=float)
parser.add_pattern(
"int_hash_set_resizes",
"Int hash set resizes: (\d+)",
required=False,
type=int)
print "Running custom parser"
parser.parse()
main()
| 444 |
Python
| 19.227272 | 51 | 0.560811 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue591/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue591-base", "issue591-v1"]
CONFIGS = [
IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)])
for heuristic in [
"blind()", "cegar(max_states=10000)", "hm()", "lmcut()", "hmax()"]
]
SUITE = [
'barman-opt14-strips', 'cavediving-14-adl', 'childsnack-opt14-strips',
'citycar-opt14-adl', 'floortile-opt14-strips', 'ged-opt14-strips',
'hiking-opt14-strips', 'maintenance-opt14-adl',
'openstacks-opt14-strips', 'parking-opt14-strips',
'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips',
'visitall-opt14-strips']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time"])
exp()
| 1,297 |
Python
| 28.499999 | 76 | 0.70239 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue591/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue591-base", "issue591-v1"]
CONFIGS = [
IssueConfig(
"lazy_greedy_{}".format(heuristic),
["--heuristic", "h={}()".format(heuristic),
"--search", "lazy_greedy(h, preferred=h)"])
for heuristic in ["add", "cea", "cg", "ff"]
]
SUITE = [
'barman-sat14-strips', 'cavediving-14-adl', 'childsnack-sat14-strips',
'citycar-sat14-adl', 'floortile-sat14-strips', 'ged-sat14-strips',
'hiking-sat14-strips', 'maintenance-sat14-adl',
'openstacks-sat14-strips', 'parking-sat14-strips',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'transport-sat14-strips', 'visitall-sat14-strips']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time"])
exp()
| 1,344 |
Python
| 28.23913 | 74 | 0.692708 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue707/v2-compare.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.compare import ComparativeReport
from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run
BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks')
REVISIONS = []
CONFIGS = []
SUITE = DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email='[email protected]')
if is_test_run():
SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['{ms_parser}'])
exp.add_suite(BENCHMARKS_DIR, SUITE)
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
ms_construction_time,
ms_atomic_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_fetcher('data/issue707-v1-eval')
exp.add_fetcher('data/issue707-v2-pruning-variants-eval')
outfile = os.path.join(
exp.eval_dir,
"issue707-v1-v2-dfp-compare.html")
exp.add_report(ComparativeReport(algorithm_pairs=[
('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-nopruneunreachable' % 'issue707-v2'),
('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-nopruneirrelevant' % 'issue707-v2'),
('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-noprune' % 'issue707-v2'),
#('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneunreachable' % 'issue707-v2'),
#('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneirrelevant' % 'issue707-v2'),
#('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-noprune' % 'issue707-v2'),
#('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneunreachable' % 'issue707-v2'),
#('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneirrelevant' % 'issue707-v2'),
#('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-noprune' % 'issue707-v2'),
],attributes=attributes),outfile=outfile)
exp.add_step('publish-issue707-v1-v2-dfp-compare.html', subprocess.call, ['publish', outfile])
exp.run_steps()
| 3,396 |
Python
| 38.964705 | 145 | 0.71172 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue604/v7-base.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from downward.reports.compare import CompareConfigsReport
from common_setup import IssueConfig, IssueExperiment
import os
def main(revisions=[]):
suite = suites.suite_optimal_with_ipc11()
configs = {
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True)
extra_attributes = [
search_out_of_memory,
search_out_of_time,
perfect_heuristic,
proved_unsolvability,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
ms_memory_delta,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_fetcher('data/issue604-v1-eval',filter_config=[
'issue604-base-rl-b50k',
'issue604-base-cggl-b50k',
'issue604-base-dfp-b50k',
'issue604-base-rl-ginf',
'issue604-base-cggl-ginf',
'issue604-base-dfp-ginf',
'issue604-base-rl-f50k',
'issue604-base-cggl-f50k',
'issue604-base-dfp-f50k',
])
exp.add_fetcher('data/issue604-v7-eval',filter_config=[
'issue604-v7-rl-b50k',
'issue604-v7-cggl-b50k',
'issue604-v7-dfp-b50k',
'issue604-v7-rl-ginf',
'issue604-v7-cggl-ginf',
'issue604-v7-dfp-ginf',
'issue604-v7-rl-f50k',
'issue604-v7-cggl-f50k',
'issue604-v7-dfp-f50k',
])
exp.add_fetcher('data/issue604-v7-rest-eval',filter_config=[
'issue604-v7-rl-b50k',
'issue604-v7-cggl-b50k',
'issue604-v7-dfp-b50k',
'issue604-v7-rl-ginf',
'issue604-v7-cggl-ginf',
'issue604-v7-dfp-ginf',
'issue604-v7-rl-f50k',
'issue604-v7-cggl-f50k',
'issue604-v7-dfp-f50k',
])
exp.add_report(CompareConfigsReport(compared_configs=[
('issue604-base-rl-b50k', 'issue604-v7-rl-b50k'),
('issue604-base-cggl-b50k', 'issue604-v7-cggl-b50k'),
('issue604-base-dfp-b50k', 'issue604-v7-dfp-b50k'),
('issue604-base-rl-ginf', 'issue604-v7-rl-ginf'),
('issue604-base-cggl-ginf', 'issue604-v7-cggl-ginf'),
('issue604-base-dfp-ginf', 'issue604-v7-dfp-ginf'),
('issue604-base-rl-f50k', 'issue604-v7-rl-f50k'),
('issue604-base-cggl-f50k', 'issue604-v7-cggl-f50k'),
('issue604-base-dfp-f50k', 'issue604-v7-dfp-f50k'),
],attributes=attributes),outfile=os.path.join(
exp.eval_dir, 'issue604-base-v7-comparison.html'))
exp()
main()
| 3,824 |
Python
| 33.459459 | 107 | 0.633107 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue481/v1-sat-test.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue481-base", "issue481-v1"]
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = [
# Greedy (tests single and alternating open lists)
IssueConfig("eager_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"
]),
IssueConfig("lazy_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"
]),
# Epsilon Greedy
IssueConfig("lazy_epsilon_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy(epsilon_greedy(h))"
]),
# Pareto
IssueConfig("lazy_pareto_ff_cea", [
"--heuristic",
"h1=ff()",
"--heuristic",
"h2=cea()",
"--search",
"lazy(pareto([h1, h2]))"
]),
# Single Buckets
IssueConfig("lazy_single_buckets_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy(single_buckets(h))"
]),
# Type based (from issue455)
IssueConfig("ff-type-const", [
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])]),"
"preferred=[hff],cost_type=one)"
]),
IssueConfig("lama-first", [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true)]),"
"preferred=[hff,hlm],cost_type=one)"
]),
IssueConfig("lama-first-types-ff-g", [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true), type_based([hff, g()])]),"
"preferred=[hff,hlm],cost_type=one)"
]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
# Absolute report commented out because a comparison table is more useful for this issue.
# (It's still in this file because someone might want to use it as a basis.)
# Scatter plots commented out for now because I have no usable matplotlib available.
# exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 2,665 |
Python
| 28.955056 | 132 | 0.55122 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue481/v1-opt-test.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue481-base", "issue481-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = [
# Greedy (tests single and alternating open lists)
IssueConfig("astar_lmcut", [
"--search",
"astar(lmcut())"
]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 539 |
Python
| 16.999999 | 54 | 0.641929 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue627/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']),
IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']),
IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']),
}
exp = IssueExperiment(
benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/",
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-base-%s" % config.nick,
"issue627-v4-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v4_memory_%s.png' % config.nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-base-%s" % config.nick,
"issue627-v4-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v4_total_time_%s.png' % config.nick
)
exp()
main(revisions=['issue627-v3-base', 'issue627-v4'])
| 1,988 |
Python
| 34.517857 | 143 | 0.57998 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue627/merge-v3-v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir
from relativescatter import RelativeScatterPlotReport
import os
def main(revisions=None):
exp = IssueExperiment(benchmarks_dir=".", suite=[])
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v3-eval"),
filter=lambda(run): "base" not in run["config"],
)
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v4-eval"),
filter=lambda(run): "base" not in run["config"],
)
for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v4-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v4_memory_%s.png' % config_nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v4-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v4_total_time_%s.png' % config_nick
)
exp()
main(revisions=['issue627-v3', 'issue627-v4'])
| 1,574 |
Python
| 32.510638 | 116 | 0.568615 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue627/v5-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_satisficing_with_ipc11()
configs = {
IssueConfig('lazy-greedy-ff', [
'--heuristic',
'h=ff()',
'--search',
'lazy_greedy(h, preferred=h)'
]),
IssueConfig('lama-first', [],
driver_options=['--alias', 'lama-first']
),
IssueConfig('eager_greedy_cg', [
'--heuristic',
'h=cg()',
'--search',
'eager_greedy(h, preferred=h)'
]),
IssueConfig('eager_greedy_cea', [
'--heuristic',
'h=cea()',
'--search',
'eager_greedy(h, preferred=h)'
]),
}
exp = IssueExperiment(
benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/",
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-base-%s" % config.nick,
"issue627-v5-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v5_sat_memory_%s.png' % config.nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-base-%s" % config.nick,
"issue627-v5-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v5_sat_total_time_%s.png' % config.nick
)
exp()
main(revisions=['issue627-v3-base', 'issue627-v5'])
| 2,128 |
Python
| 28.569444 | 74 | 0.520677 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue714/v1-portfolios.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue714-base", "issue714-v1"]
CONFIGS = [
IssueConfig(alias, [], driver_options=["--alias", alias])
for alias in [
"seq-sat-fdss-1", "seq-sat-fdss-2", "seq-sat-fdss-2014",
"seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2"]
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES)
exp.add_comparison_table_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES)
exp.run_steps()
| 1,128 |
Python
| 27.948717 | 78 | 0.73227 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue714/v1-configs.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports import compare
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[],
configs=[],
environment=ENVIRONMENT,
)
compared_algorithms = []
for search in ["eager_greedy", "lazy_greedy"]:
for h1, h2 in itertools.permutations(["cea", "cg", "ff"], 2):
rev = "issue714-base"
config_nick = "-".join([search, h1, h2])
algo1 = common_setup.get_algo_nick(rev, config_nick)
exp.add_algorithm(
algo1,
common_setup.get_repo_base(),
rev,
[
"--heuristic", "h{h1}={h1}".format(**locals()),
"--heuristic", "h{h2}={h2}".format(**locals()),
"--search", "{search}(h{h1}, h{h2}, preferred=[h{h1},h{h2}])".format(**locals())],
driver_options=["--search-time-limit", "1m"])
rev = "issue714-v1"
config_nick = "-".join([search, h1, h2])
algo2 = common_setup.get_algo_nick(rev, config_nick)
exp.add_algorithm(
algo2,
common_setup.get_repo_base(),
rev,
[
"--heuristic", "h{h1}={h1}".format(**locals()),
"--heuristic", "h{h2}={h2}".format(**locals()),
"--search", "{search}([h{h1},h{h2}], preferred=[h{h1},h{h2}])".format(**locals())],
driver_options=["--search-time-limit", "1m"])
compared_algorithms.append([algo1, algo2, "Diff ({config_nick})".format(**locals())])
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_report(compare.ComparativeReport(
compared_algorithms,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name=common_setup.get_experiment_name() + "-comparison")
exp.run_steps()
| 2,299 |
Python
| 31.857142 | 99 | 0.599391 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue680/v1-potential.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal()
configs = []
for osi in ['103', '107']:
for cplex in ['1251', '1263']:
if osi == '107' and cplex == '1251':
# incompatible versions
continue
configs += [
IssueConfig(
'astar_initial_state_potential_OSI%s_CPLEX%s' % (osi, cplex),
['--search', 'astar(initial_state_potential())'],
build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)],
driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)]
),
IssueConfig(
'astar_sample_based_potentials_OSI%s_CPLEX%s' % (osi, cplex),
['--search', 'astar(sample_based_potentials())'],
build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)],
driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)]
),
IssueConfig(
'astar_all_states_potential_OSI%s_CPLEX%s' % (osi, cplex),
['--search', 'astar(all_states_potential())'],
build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)],
driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)]
),
]
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'],
processes=4,
email='[email protected]',
)
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
domains = suites.suite_optimal_strips()
exp.add_absolute_report_step(filter_domain=domains)
for attribute in ["memory", "total_time"]:
for config in ['astar_initial_state_potential', 'astar_sample_based_potentials', 'astar_all_states_potential']:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107']],
filter_domain=domains,
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}_CPLEX1263.png".format(exp.name, attribute, config)
)
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263']],
filter_domain=domains,
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}_OSI103.png".format(exp.name, attribute, config)
)
exp()
main(revisions=['issue680-v1'])
| 3,252 |
Python
| 38.192771 | 125 | 0.531673 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue528/issue528.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
SEARCH_REVS = ["issue528-base", "issue528-v1", "issue528-v2"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_lmcut": ["--search", "astar(lmcut())"]
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ("memory", "total_time"):
exp.add_report(
ScatterPlotReport(
attributes=[attr],
filter_config=[
"issue528-base-astar_lmcut",
"issue528-v2-astar_lmcut",
],
),
outfile='issue528_base_v2_%s.png' % attr
)
exp()
| 825 |
Python
| 20.179487 | 61 | 0.609697 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue851/generalscatter.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import logging
import math
import os
from lab import tools
from downward.reports.plot import MatplotlibPlot, Matplotlib, PgfPlots, \
PlotReport, MIN_AXIS
class ScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
# TODO: assert that both are linear or log
plot_size = max(report.x_missing_val * 1.01, report.y_missing_val * 1.01)
else:
plot_size = max(report.x_missing_val * 1.5, report.y_missing_val * 1.5)
# Plot a diagonal black line. Starting at (0,0) often raises errors.
axes.plot([0.001, plot_size], [0.001, plot_size], 'k')
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
# axes.set_xlim(report.xlim_left, report.xlim_right)
# axes.set_ylim(report.ylim_bottom, report.ylim_top)
for axis in [axes.xaxis, axes.yaxis]:
# MatplotlibPlot.change_axis_formatter(
# axis, report.missing_val if report.show_missing else None)
MatplotlibPlot.change_axis_formatter(axes.xaxis,
report.x_missing_val if report.show_missing else None)
MatplotlibPlot.change_axis_formatter(axes.yaxis,
report.y_missing_val if report.show_missing else None)
return has_points
class ScatterPgfPlots(PgfPlots):
@classmethod
def _format_coord(cls, coord):
def format_value(v):
return str(v) if isinstance(v, int) else '%f' % v
return '(%s, %s)' % (format_value(coord[0]), format_value(coord[1]))
@classmethod
def _get_plot(cls, report):
lines = []
options = cls._get_axis_options(report)
lines.append('\\begin{axis}[%s]' % cls._format_options(options))
for category, coords in sorted(report.categories.items()):
plot = {'only marks': True}
lines.append(
'\\addplot+[%s] coordinates {\n%s\n};' % (
cls._format_options(plot),
' '.join(cls._format_coord(c) for c in coords)))
if category:
lines.append('\\addlegendentry{%s}' % category)
elif report.has_multiple_categories:
# None is treated as the default category if using multiple
# categories. Add a corresponding entry to the legend.
lines.append('\\addlegendentry{default}')
# Add black line.
start = min(report.min_x, report.min_y)
if report.xlim_left is not None:
start = min(start, report.xlim_left)
if report.ylim_bottom is not None:
start = min(start, report.ylim_bottom)
end = max(report.max_x, report.max_y)
if report.xlim_right:
end = max(end, report.xlim_right)
if report.ylim_top:
end = max(end, report.ylim_top)
if report.show_missing:
end = max(end, report.missing_val)
lines.append(
'\\addplot[color=black] coordinates {(%f, %f) (%d, %d)};' %
(start, start, end, end))
lines.append('\\end{axis}')
return lines
@classmethod
def _get_axis_options(cls, report):
opts = PgfPlots._get_axis_options(report)
# Add line for missing values.
for axis in ['x', 'y']:
opts['extra %s ticks' % axis] = report.missing_val
opts['extra %s tick style' % axis] = 'grid=major'
return opts
class GeneralScatterPlotReport(PlotReport):
"""
Generate a scatter plot for a specific attribute.
"""
def __init__(self, x_algo, y_algo, x_attribute, y_attribute, show_missing=True, get_category=None, **kwargs):
"""
See :class:`.PlotReport` for inherited arguments.
The keyword argument *attributes* must contain exactly one
attribute.
Use the *filter_algorithm* keyword argument to select exactly
two algorithms.
If only one of the two algorithms has a value for a run, only
add a coordinate if *show_missing* is True.
*get_category* can be a function that takes **two** runs
(dictionaries of properties) and returns a category name. This
name is used to group the points in the plot. If there is more
than one group, a legend is automatically added. Runs for which
this function returns None are shown in a default category and
are not contained in the legend. For example, to group by
domain:
>>> def domain_as_category(run1, run2):
... # run2['domain'] has the same value, because we always
... # compare two runs of the same problem.
... return run1['domain']
Example grouping by difficulty:
>>> def improvement(run1, run2):
... time1 = run1.get('search_time', 1800)
... time2 = run2.get('search_time', 1800)
... if time1 > time2:
... return 'better'
... if time1 == time2:
... return 'equal'
... return 'worse'
>>> from downward.experiment import FastDownwardExperiment
>>> exp = FastDownwardExperiment()
>>> exp.add_report(ScatterPlotReport(
... attributes=['search_time'],
... get_category=improvement))
Example comparing the number of expanded states for two
algorithms:
>>> exp.add_report(ScatterPlotReport(
... attributes=["expansions_until_last_jump"],
... filter_algorithm=["algorithm-1", "algorithm-2"],
... get_category=domain_as_category,
... format="png", # Use "tex" for pgfplots output.
... ),
... name="scatterplot-expansions")
"""
# If the size has not been set explicitly, make it a square.
matplotlib_options = kwargs.get('matplotlib_options', {})
matplotlib_options.setdefault('figure.figsize', [8, 8])
kwargs['matplotlib_options'] = matplotlib_options
PlotReport.__init__(self, **kwargs)
if not self.attribute:
logging.critical('ScatterPlotReport needs exactly one attribute')
# By default all values are in the same category.
self.get_category = get_category or (lambda run1, run2: None)
self.show_missing = show_missing
self.xlim_left = self.xlim_left or MIN_AXIS
self.ylim_bottom = self.ylim_bottom or MIN_AXIS
if self.output_format == 'tex':
self.writer = ScatterPgfPlots
else:
self.writer = ScatterMatplotlib
self.x_algo = x_algo
self.y_algo = y_algo
self.x_attribute = x_attribute
self.y_attribute = y_attribute
def _set_scales(self, xscale, yscale):
PlotReport._set_scales(self, xscale or self.attribute.scale or 'log', yscale)
if self.xscale != self.yscale:
logging.critical('Scatterplots must use the same scale on both axes.')
def _get_missing_val(self, max_value, scale):
"""
Separate the missing values by plotting them at (max_value * 10)
rounded to the next power of 10.
"""
assert max_value is not None
# HACK!
max_value = 1800
if scale == 'linear':
return max_value * 1.1
return int(10 ** math.ceil(math.log10(max_value)))
def _handle_none_values(self, X, Y, replacement_x, replacement_y):
assert len(X) == len(Y), (X, Y)
if self.show_missing:
return ([x if x is not None else replacement_x for x in X],
[y if y is not None else replacement_y for y in Y])
return zip(*[(x, y) for x, y in zip(X, Y) if x is not None and y is not None])
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
x_count = 0
y_count = 0
x_none_count = 0
y_none_count = 0
for (domain, problem), runs in self.problem_runs.items():
run1 = next((run for run in runs if run['algorithm'] == self.x_algo), None)
run2 = next((run for run in runs if run['algorithm'] == self.y_algo), None)
if run1 is None or run2 is None:
continue
assert (run1['algorithm'] == self.x_algo and
run2['algorithm'] == self.y_algo)
val1 = run1.get(self.x_attribute)
val2 = run2.get(self.y_attribute)
x_count += 1
y_count += 1
if val1 is None:
x_none_count += 1
if val2 is None:
y_none_count += 1
# print val1, val2
if val1 is None and val2 is None:
continue
category = self.get_category(run1, run2)
categories[category].append((val1, val2))
# print x_count, y_count
# print x_none_count, y_none_count
# print len(categories[None])
# print categories[None]
return categories
def _get_limit(self, varlist, limit_type):
assert limit_type == 'max' or limit_type == 'min'
varlist = [x for x in varlist if x is not None]
if(limit_type == 'max'):
return max(varlist)
else:
return min(varlist)
def _get_plot_size(self, missing_val, scale):
if scale == 'linear':
return missing_val * 1.01
else:
return missing_val * 1.25
def _prepare_categories(self, categories):
categories = PlotReport._prepare_categories(self, categories)
# Find max-value to fit plot and to draw missing values.
# self.missing_val = self._get_missing_val(max(self.max_x, self.max_y))
self.x_missing_val = self._get_missing_val(self.max_x, self.xscale)
self.y_missing_val = self._get_missing_val(self.max_y, self.yscale)
# print self.x_missing_val, self.y_missing_val
# set minima
self.xlim_left = self._get_limit([self.xlim_left, self.min_x],'min')
self.ylim_bottom = self._get_limit([self.ylim_bottom, self.min_y],'min')
# set maxima
x_plot_size = y_plot_size = None
if self.show_missing:
x_plot_size = self._get_plot_size(self.x_missing_val, self.xscale)
y_plot_size = self._get_plot_size(self.y_missing_val, self.yscale)
self.xlim_right = self._get_limit([self.xlim_right, self.max_x, x_plot_size], 'max')
self.ylim_top = self._get_limit([self.ylim_top, self.max_y, y_plot_size], 'max')
# self.diagonal_start = self.diagonal_end = None
# if self.show_diagonal:
# self.diagonal_start = max(self.xlim_left, self.ylim_bottom)
# self.diagonal_end = min(self.xlim_right, self.ylim_top)
new_categories = {}
for category, coords in categories.items():
X, Y = zip(*coords)
# X, Y = self._handle_none_values(X, Y, self.missing_val)
X, Y = self._handle_none_values(X, Y, self.x_missing_val, self.y_missing_val)
coords = zip(X, Y)
new_categories[category] = coords
# print len(new_categories[None])
# print new_categories[None]
return new_categories
def write(self):
if not (len(self.algorithms) == 1 and self.x_algo == self.algorithms[0] and self.y_algo == self.algorithms[0]):
logging.critical(
'Scatter plots need exactly 1 algorithm that must match x_algo and y_algo: %s, %s, %s' % (self.algorithms, self.x_algo, self.y_algo))
self.xlabel = self.xlabel or self.x_algo + ": " + self.x_attribute
self.ylabel = self.ylabel or self.y_algo + ": " + self.y_attribute
suffix = '.' + self.output_format
if not self.outfile.endswith(suffix):
self.outfile += suffix
tools.makedirs(os.path.dirname(self.outfile))
self._write_plot(self.runs.values(), self.outfile)
| 12,617 |
Python
| 40.235294 | 149 | 0.580407 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue311/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Script to check correctness of eager_wastar.
Comparing eager_wastar with the equivalent version using
eager(single(w*h), reopen_closed=true).
Results should be the same for a given same value w.
'''
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue311"]
CONFIGS = [
IssueConfig('eager_wastar_w1', ['--search', 'eager_wastar([lmcut], w=1)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_wastar_w2', ['--search', 'eager_wastar([lmcut], w=2)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_wastar_w5', ['--search', 'eager_wastar([lmcut], w=5)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_wastar_w100', ['--search', 'eager_wastar([lmcut], w=100)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_single_openlist_w1', ['--search', 'eager(single(sum([g(), weight(lmcut, 1)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_single_openlist_w2', ['--search', 'eager(single(sum([g(), weight(lmcut, 2)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_single_openlist_w5', ['--search', 'eager(single(sum([g(), weight(lmcut, 5)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']),
IssueConfig('eager_single_openlist_w100', ['--search', 'eager(single(sum([g(), weight(lmcut, 100)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,597 |
Python
| 38.96923 | 180 | 0.691952 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue311/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Script to test possible eager version of LAMA
'''
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue311"]
CONFIGS = [
IssueConfig('lama', [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig('eager_lama', [
"--if-unit-cost",
"--evaluator",
"hlm=lama_synergy(lm_rhw(reasonable_orders=true))",
"--evaluator", "hff=ff_synergy(hlm)",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
eager_wastar([hff,hlm],preferred=[hff,hlm],w=5),
eager_wastar([hff,hlm],preferred=[hff,hlm],w=3),
eager_wastar([hff,hlm],preferred=[hff,hlm],w=2),
eager_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lama_synergy(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one),transform=adapt_costs(one))",
"--evaluator", "hff1=ff_synergy(hlm1)",
"--evaluator",
"hlm2=lama_synergy(lm_rhw(reasonable_orders=true,"
" lm_cost_type=plusone),transform=adapt_costs(plusone))",
"--evaluator", "hff2=ff_synergy(hlm2)",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--always"
])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,846 |
Python
| 32.104651 | 91 | 0.643008 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue548/mas-refetch.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import common_setup
REVS = ["issue548-base", "issue548-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
B_CONFIGS = {
'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
G_CONFIGS = {
'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
F_CONFIGS = {
'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
actual_search_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_fetcher('data/issue548-mas', parsers='ms-parser.py')
exp.add_comparison_table_step(attributes=attributes)
exp()
| 4,244 |
Python
| 53.423076 | 273 | 0.753299 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue548/mas.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue548-base", "issue548-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
B_CONFIGS = {
'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
G_CONFIGS = {
'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
F_CONFIGS = {
'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
exp()
| 2,834 |
Python
| 63.431817 | 273 | 0.755116 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue547/issue547-v2-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v2"]
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
'astar_blind': [
'--search',
'astar(blind())'],
'lazy_greedy_cg': [
'--heuristic',
'h=cg()',
'--search',
'lazy_greedy(h, preferred=h)'],
'lazy_greedy_cg_randomized': [
'--heuristic',
'h=cg()',
'--search',
'lazy_greedy(h, preferred=h, randomize_successors=true)'],
'eager_greedy_ff': [
'--heuristic',
'h=ff()',
'--search',
'eager_greedy(h, preferred=h)'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf]
),
outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr)
)
exp()
| 1,529 |
Python
| 25.842105 | 111 | 0.59189 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue547/issue547.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
from downward.reports.scatter import ScatterPlotReport
# Cactus plots are experimental in lab, and require some changes to
# classes in lab, so we cannot add them es external files here.
try:
from downward.reports.cactus import CactusPlotReport
has_cactus_plot = True
except:
has_cactus_plot = False
from lab.experiment import Step
from lab.fetcher import Fetcher
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_blind': [
'--search',
'astar(blind())'],
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_lmcut': [
'--search',
'astar(lmcut())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
exp.add_step(Step('refetch', Fetcher(), exp.path, parsers=['custom-parser.py']))
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v1-%s" % conf]
),
outfile='issue547_base_v1_%s_%s.png' % (conf, attr)
)
if has_cactus_plot:
exp.add_report(CactusPlotReport(attributes=['successor_generator_time'],
filter_config_nick="astar_blind",
ylabel='successor_generator_time',
get_category=lambda run: run['config_nick'],
category_styles={'astar_blind': {'linestyle': '-', 'c':'red'}}
))
exp()
| 2,079 |
Python
| 28.714285 | 111 | 0.620972 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue547/issue547-v2-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v2"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf]
),
outfile='issue547_base_v2_%s_%s.png' % (conf, attr)
)
exp()
| 1,104 |
Python
| 25.309523 | 111 | 0.641304 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue547/issue547-v2-lama.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v2"]
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
'lama-2011-first': [
"--if-unit-cost",
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))",
"--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm])",
"--if-non-unit-cost",
"--heuristic",
"hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one,cost_type=one))",
"--heuristic",
"hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=plusone,cost_type=plusone))",
"--search",
"lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], cost_type=one,reopen_closed=false)",
],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf]
),
outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr)
)
exp()
| 1,723 |
Python
| 30.345454 | 111 | 0.600116 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def compute_log_size(content, props):
props["log_size"] = len(content)
def main():
parser = Parser()
parser.add_function(compute_log_size)
parser.parse()
main()
| 234 |
Python
| 15.785713 | 41 | 0.666667 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/base-sat-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue744-base"]
CONFIG_DICT = {
"eager_greedy_ff": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h])"],
"eager_greedy_cea": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h])"],
"lazy_greedy_add": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lazy_greedy_cg": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h])"],
"lama-first": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""],
"lama-first-typed": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false)"],
}
CONFIGS = [
IssueConfig(config_nick, config,
driver_options=["--overall-time-limit", "30m"])
for rev in REVISIONS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec)
exp.run_steps()
| 3,211 |
Python
| 30.80198 | 103 | 0.644348 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/base-opt-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue744-base"]
SEARCHES = [
("bjolp", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc)"]),
("blind", ["--search", "astar(blind())"]),
("cegar", ["--search", "astar(cegar())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("lmcut", ["--search", "astar(lmcut())"]),
("mas", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1))"]),
("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
("h2", ["--search", "astar(hm(m=2))"]),
("hmax", ["--search", "astar(hmax())"]),
]
CONFIGS = [
IssueConfig(search_nick, search,
driver_options=["--overall-time-limit", "30m"])
for rev in REVISIONS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec)
exp.run_steps()
| 2,858 |
Python
| 33.035714 | 112 | 0.688943 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/v1-opt-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue744-v1"]
SEARCHES = [
("bjolp-silent", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc, verbosity=silent)"]),
("blind-silent", ["--search", "astar(blind(), verbosity=silent)"]),
("cegar-silent", ["--search", "astar(cegar(), verbosity=silent)"]),
# ("divpot", ["--search", "astar(diverse_potentials(), verbosity=silent)"]),
("ipdb-silent", ["--search", "astar(ipdb(), verbosity=silent)"]),
("lmcut-silent", ["--search", "astar(lmcut(), verbosity=silent)"]),
("mas-silent", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=silent)"]),
# ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=silent)"]),
("h2-silent", ["--search", "astar(hm(m=2), verbosity=silent)"]),
("hmax-silent", ["--search", "astar(hmax(), verbosity=silent)"]),
("bjolp-normal", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc, verbosity=normal)"]),
("blind-normal", ["--search", "astar(blind(), verbosity=normal)"]),
("cegar-normal", ["--search", "astar(cegar(), verbosity=normal)"]),
# ("divpot", ["--search", "astar(diverse_potentials(), verbosity=normal)"]),
("ipdb-normal", ["--search", "astar(ipdb(), verbosity=normal)"]),
("lmcut-normal", ["--search", "astar(lmcut(), verbosity=normal)"]),
("mas-normal", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=normal)"]),
# ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=normal)"]),
("h2-normal", ["--search", "astar(hm(m=2), verbosity=normal)"]),
("hmax-normal", ["--search", "astar(hmax(), verbosity=normal)"]),
]
CONFIGS = [
IssueConfig(search_nick, search,
driver_options=["--overall-time-limit", "30m"])
for rev in REVISIONS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_parse_again_step()
log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-bjolp-silent".format(REVISIONS[0]),
"{}-blind-silent".format(REVISIONS[0]),
"{}-cegar-silent".format(REVISIONS[0]),
"{}-ipdb-silent".format(REVISIONS[0]),
"{}-lmcut-silent".format(REVISIONS[0]),
"{}-mas-silent".format(REVISIONS[0]),
"{}-h2-silent".format(REVISIONS[0]),
"{}-hmax-silent".format(REVISIONS[0]),
],name="silent")
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-bjolp-normal".format(REVISIONS[0]),
"{}-blind-normal".format(REVISIONS[0]),
"{}-cegar-normal".format(REVISIONS[0]),
"{}-ipdb-normal".format(REVISIONS[0]),
"{}-lmcut-normal".format(REVISIONS[0]),
"{}-mas-normal".format(REVISIONS[0]),
"{}-h2-normal".format(REVISIONS[0]),
"{}-hmax-normal".format(REVISIONS[0]),
],name="normal")
exp.run_steps()
| 5,254 |
Python
| 42.07377 | 132 | 0.67244 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/sortedreport.py
|
# -*- coding: utf-8 -*-
#
# Downward Lab uses the Lab package to conduct experiments with the
# Fast Downward planning system.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from operator import itemgetter
import logging
from lab.reports import Table, DynamicDataModule
from downward.reports import PlanningReport
class SortedReport(PlanningReport):
def __init__(self, sort_spec, **kwargs):
PlanningReport.__init__(self, **kwargs)
self._sort_spec = sort_spec
def get_markup(self):
"""
Return `txt2tags <http://txt2tags.org/>`_ markup for the report.
"""
table = Table()
row_sort_module = RowSortModule(self._sort_spec)
table.dynamic_data_modules.append(row_sort_module)
for run_id, run in self.props.items():
row = {}
for key, value in run.items():
if key not in self.attributes:
continue
if isinstance(value, (list, tuple)):
key = '-'.join([str(item) for item in value])
row[key] = value
table.add_row(run_id, row)
return str(table)
class RowSortModule(DynamicDataModule):
def __init__(self, sort_spec):
self._sort_spec = sort_spec
def modify_printable_row_order(self, table, row_order):
col_names = [None] + table.col_names
entries = []
for row_name in row_order:
if row_name == 'column names (never printed)':
continue
entry = [row_name] + table.get_row(row_name)
entries.append(tuple(entry))
for attribute, desc in reversed(self._sort_spec):
index = col_names.index(attribute)
reverse = desc == 'desc'
entries.sort(key=itemgetter(index), reverse=reverse)
new_row_order = ['column names (never printed)'] + [i[0] for i in entries]
return new_row_order
| 2,524 |
Python
| 32.223684 | 82 | 0.62916 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue744/v1-sat-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue744-v1"]
CONFIG_DICT = {
"eager-greedy-ff-silent": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h], verbosity=silent)"],
"eager-greedy-cea-silent": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h], verbosity=silent)"],
"lazy-greedy-add-silent": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h], verbosity=silent)"],
"lazy-greedy-cg-silent": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h], verbosity=silent)"],
"lama-first-silent": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false, verbosity=silent)"""],
"lama-first-typed-silent": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false, verbosity=silent)"],
"eager-greedy-ff-normal": [
"--evaluator",
"h=ff()",
"--search",
"eager_greedy([h], preferred=[h], verbosity=normal)"],
"eager-greedy-cea-normal": [
"--evaluator",
"h=cea()",
"--search",
"eager_greedy([h], preferred=[h], verbosity=normal)"],
"lazy-greedy-add-normal": [
"--evaluator",
"h=add()",
"--search",
"lazy_greedy([h], preferred=[h], verbosity=normal)"],
"lazy-greedy-cg-normal": [
"--evaluator",
"h=cg()",
"--search",
"lazy_greedy([h], preferred=[h], verbosity=normal)"],
"lama-first-normal": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false, verbosity=normal)"""],
"lama-first-typed-normal": [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff), single(hff, pref_only=true),"
"single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000),"
"preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true,"
"preferred_successors_first=false, verbosity=normal)"],
}
CONFIGS = [
IssueConfig(config_nick, config,
driver_options=["--overall-time-limit", "30m"])
for rev in REVISIONS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_parse_again_step()
log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-eager-greedy-ff-silent".format(REVISIONS[0]),
"{}-eager-greedy-cea-silent".format(REVISIONS[0]),
"{}-lazy-greedy-add-silent".format(REVISIONS[0]),
"{}-lazy-greedy-cg-silent".format(REVISIONS[0]),
"{}-lama-first-silent".format(REVISIONS[0]),
"{}-lama-first-typed-silent".format(REVISIONS[0]),
],name="silent")
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-eager-greedy-ff-normal".format(REVISIONS[0]),
"{}-eager-greedy-cea-normal".format(REVISIONS[0]),
"{}-lazy-greedy-add-normal".format(REVISIONS[0]),
"{}-lazy-greedy-cg-normal".format(REVISIONS[0]),
"{}-lama-first-normal".format(REVISIONS[0]),
"{}-lama-first-typed-normal".format(REVISIONS[0]),
],name="normal")
exp.run_steps()
| 5,733 |
Python
| 36.477124 | 103 | 0.626374 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue422/issue422.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
exp = common_setup.IssueExperiment(
search_revisions=["issue422-base", "issue422-v1"],
configs={"lmcut": ["--search", "astar(lmcut())"]},
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 394 |
Python
| 19.789473 | 54 | 0.672589 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue899/v1-opt.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue899-base", "issue899-v1"]
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"])
exp.run_steps()
| 1,444 |
Python
| 26.788461 | 82 | 0.750693 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue794/axiom_time_parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
print 'Running axiom evaluation time parser'
parser = Parser()
parser.add_pattern('axiom_time_inner', r'AxiomEvaluator time in inner evaluate: (.+)', type=float)
parser.add_pattern('axiom_time_outer', r'AxiomEvaluator time in outer evaluate: (.+)', type=float)
parser.parse()
| 332 |
Python
| 29.272725 | 98 | 0.740964 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue470/issue470.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_merge_and_shrink_bisim': [
'--search',
'astar(merge_and_shrink('
+ 'merge_strategy=merge_linear(variable_order=reverse_level),'
+ 'shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false,'
+ 'group_by_h=true)))'],
'astar_merge_and_shrink_greedy_bisim': [
'--search',
'astar(merge_and_shrink('
+ 'merge_strategy=merge_linear(variable_order=reverse_level),'
+ 'shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,'
+ 'greedy=true,group_by_h=false)))'],
'astar_merge_and_shrink_dfp_bisim': [
'--search',
'astar(merge_and_shrink(merge_strategy=merge_dfp,'
+ 'shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,'
+ 'greedy=false,group_by_h=true)))'],
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue470-base", "issue470-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 1,436 |
Python
| 30.23913 | 84 | 0.550836 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue470/issue470-cg.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
"cg-lazy-nopref": [
"--heuristic", "h=cg()",
"--search", "lazy_greedy(h)"
],
"cg-lazy-pref": [
"--heuristic", "h=cg()",
"--search", "lazy_greedy(h, preferred=[h])"
],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue470-base", "issue470-v1"],
configs=CONFIGS,
suite=suites.suite_satisficing_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 547 |
Python
| 18.571428 | 54 | 0.561243 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue546/v1-limits.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue546-v1"]
LIMITS = {"search_time": 300, "search_memory": 1024}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"blind-fd-limits": ["--search", "astar(blind())"],
"blind-lab-limits": ["--search", "astar(blind())"],
}
class FastDownwardLimits(common_setup.IssueExperiment):
def _make_search_runs(self):
common_setup.IssueExperiment._make_search_runs(self)
for run in self.runs:
if "fd-limits" in run.properties["config_nick"]:
# Move limits to fast-downward.py
search_args, search_kwargs = run.commands["search"]
time_limit = search_kwargs["time_limit"]
mem_limit = search_kwargs["mem_limit"]
del search_kwargs["time_limit"]
del search_kwargs["mem_limit"]
search_args.insert(1, "--search-timeout")
search_args.insert(2, str(time_limit))
search_args.insert(3, "--search-memory")
search_args.insert(4, str(mem_limit))
exp = FastDownwardLimits(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 1,289 |
Python
| 28.999999 | 67 | 0.595035 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue546/v1-opt-fdss.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue546-base", "issue546-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"seq_opt_fdss_1": ["--alias", "seq-opt-fdss-1"],
"seq_opt_fdss_2": ["--alias", "seq-opt-fdss-2"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step(
attributes=common_setup.IssueExperiment.PORTFOLIO_ATTRIBUTES)
exp()
| 564 |
Python
| 19.178571 | 65 | 0.659574 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue532/timers-microbenchmark/main.cc
|
#include <ctime>
#include <functional>
#include <iostream>
#include <string>
#include <sys/times.h>
#include <unistd.h>
using namespace std;
void benchmark(const string &desc, int num_calls,
const function<void()> &func) {
cout << "Running " << desc << " " << num_calls << " times:" << flush;
clock_t start = clock();
for (int i = 0; i < num_calls; ++i)
func();
clock_t end = clock();
double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC;
cout << " " << duration << " seconds" << endl;
}
double get_time_with_times() {
struct tms the_tms;
times(&the_tms);
clock_t clocks = the_tms.tms_utime + the_tms.tms_stime;
return double(clocks) / sysconf(_SC_CLK_TCK);
}
double get_time_with_clock_gettime() {
timespec tp;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);
return tp.tv_sec + tp.tv_nsec / 1e9;
}
int main(int, char **) {
const int NUM_ITERATIONS = 10000000;
benchmark("nothing", NUM_ITERATIONS, [] () {});
cout << endl;
benchmark("times()",
NUM_ITERATIONS,
[&]() {get_time_with_times();});
benchmark("clock_gettime()",
NUM_ITERATIONS,
[&]() {get_time_with_clock_gettime();});
return 0;
}
| 1,269 |
C++
| 23.90196 | 73 | 0.57368 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue776/v1-lama-second.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue776-base", "issue776-v1"]
CONFIGS = [
IssueConfig('lama-second', [
"--heuristic",
"hlm2=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=plusone),transform=adapt_costs(plusone))",
"--heuristic",
"hff2=ff_synergy(hlm2)",
"--search",
"lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],reopen_closed=false)"
]),
]
SUITE = [
'barman-opt11-strips', 'barman-sat11-strips', 'citycar-opt14-adl',
'citycar-sat14-adl', 'elevators-opt08-strips', 'elevators-opt11-strips',
'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'openstacks-opt08-adl',
'openstacks-sat08-adl', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips',
'parking-sat11-strips', 'parking-sat14-strips', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pegsol-sat11-strips', 'scanalyzer-08-strips',
'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'tetris-opt14-strips', 'tetris-sat14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips'
]
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]",partition='infai_1')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parse_again_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,196 |
Python
| 33.873015 | 112 | 0.715392 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue558/v1-ext.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue558-base", "issue558-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"lazy_wa3_ff": [
"--heuristic", "h=ff()",
"--search", "lazy_wastar(h,w=3,preferred=h)"],
"lazy_wa1000_ff": [
"--heuristic", "h=ff()",
"--search", "lazy_wastar(h,w=1000,preferred=h)"],
"lazy_greedy_ff": [
"--heuristic", "h=ff()",
"--search", "lazy_greedy(h,preferred=h,reopen_closed=true)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 747 |
Python
| 20.999999 | 69 | 0.587684 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue558/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue558-base", "issue558-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"lazy_wa3_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_wastar(h,w=3,preferred=h)"],
"lama-w5": [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=plusone,cost_type=plusone))",
"--search", "lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)"]
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 767 |
Python
| 20.942857 | 76 | 0.573664 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue596/issue596-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue596-base", "issue596-v1"])
| 120 |
Python
| 16.285712 | 48 | 0.65 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue508/mas.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
import configs
REVS = ["issue508-base", "issue508-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
configs_optimal_core = configs.default_configs_optimal(ipc=False)
CONFIGS = {}
for nick in ["astar_merge_and_shrink_bisim", "astar_merge_and_shrink_greedy_bisim"]:
CONFIGS[nick] = configs_optimal_core[nick]
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 605 |
Python
| 20.642856 | 84 | 0.704132 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue583/issue583-v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue583-base-v2", "issue583-v3"])
| 123 |
Python
| 16.714283 | 51 | 0.650406 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue583/issue583-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue583-base", "issue583-v1"])
| 120 |
Python
| 16.285712 | 48 | 0.65 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue414/opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
import common_setup
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
REVS = ["issue414-base", "issue414"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
# The aliases are adjusted for the respective driver scripts by lab.
CONFIGS = {
"ipdb": ["--search", "astar(ipdb())"],
}
for alias in ["seq-opt-bjolp", "seq-opt-fdss-1", "seq-opt-fdss-2",
"seq-opt-lmcut", "seq-opt-merge-and-shrink"]:
CONFIGS[alias] = ["--alias", alias]
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 770 |
Python
| 19.837837 | 68 | 0.648052 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue414/sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
import common_setup
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
REVS = ["issue414-base", "issue414"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
# The aliases are adjusted for the respective driver scripts by lab.
CONFIGS = {
"seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"],
"seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"],
"seq_sat_fdss_2": ["--alias", "seq-sat-fdss-2"],
"lazy_greedy_ff": [
"--heuristic", "h=ff()",
"--search", "lazy_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 830 |
Python
| 20.307692 | 68 | 0.628916 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue469/raw_memory_parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class RawMemoryParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern('raw_memory', r'Peak memory: (.+) KB', type=int, required=False)
if __name__ == '__main__':
parser = RawMemoryParser()
print 'Running RawMemoryParser parser'
parser.parse()
| 353 |
Python
| 21.124999 | 89 | 0.628895 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue469/issue469.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute
from lab.suites import suite_all
import common_setup
import os
exp = common_setup.IssueExperiment(
search_revisions=["issue469-base", "issue469-v1"],
configs={"astar_blind": ["--search", "astar(blind())"]},
suite=suite_all(),
)
parser = os.path.join(common_setup.get_script_dir(),
'raw_memory_parser.py')
exp.add_search_parser(parser)
def add_unexplained_errors_as_int(run):
if run.get('error').startswith('unexplained'):
run['unexplained_errors'] = 1
else:
run['unexplained_errors'] = 0
return run
exp.add_absolute_report_step(
attributes=['raw_memory', Attribute('unexplained_errors', absolute=True)],
filter=add_unexplained_errors_as_int
)
exp()
| 841 |
Python
| 22.388888 | 78 | 0.662307 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue660/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_satisficing()
configs = {
IssueConfig('lazy-greedy-ff', [
'--heuristic',
'h=ff()',
'--search',
'lazy_greedy(h, preferred=h)'
]),
IssueConfig('lama-first', [],
driver_options=['--alias', 'lama-first']
),
IssueConfig('eager_greedy_cg', [
'--heuristic',
'h=cg()',
'--search',
'eager_greedy(h, preferred=h)'
]),
IssueConfig('eager_greedy_cea', [
'--heuristic',
'h=cea()',
'--search',
'eager_greedy(h, preferred=h)'
]),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step(
filter_domain=[
'assembly',
'miconic-fulladl',
'openstacks',
'openstacks-sat08-adl',
'optical-telegraphs',
'philosophers',
'psr-large',
'psr-middle',
'trucks',
],
)
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue660-base', 'issue660-v1'])
| 2,301 |
Python
| 26.734939 | 94 | 0.516732 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue595/issue595-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue595-v1", "issue595-v2"])
| 118 |
Python
| 15.999998 | 46 | 0.644068 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v7-sat-eager.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
NICKS = [
'eager_greedy_alt_ff_cg', 'eager_greedy_ff', 'eager_greedy_ff_no_pref',
'eager_pareto_ff', 'eager_wa3_cg'
]
CONFIGS = {}
for nick in NICKS:
CONFIGS[nick] = configs.default_configs_satisficing(ipc=False, extended=True)[nick]
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v7-base", "issue77-v7"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 697 |
Python
| 20.812499 | 87 | 0.695839 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-sat-eager.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False)
# The following lines remove some configs that we don't currently
# support.
DISABLED = [
]
for key, value in list(CONFIGS.items()):
if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")):
del CONFIGS[key]
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v3", "issue77-v4"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 750 |
Python
| 20.457142 | 72 | 0.706667 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/relative_scatter.py
|
from collections import defaultdict
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport
EPSILON = 0.01
def get_relative_change(val1, val2):
"""
>>> get_relative_change(10, 0)
-999.0
>>> get_relative_change(10, 1)
-9.0
>>> get_relative_change(10, 5)
-1.0
>>> get_relative_change(10, 10)
0.0
>>> get_relative_change(10, 15)
0.5
>>> get_relative_change(10, 20)
1.0
>>> get_relative_change(10, 100)
9.0
>>> get_relative_change(0, 10)
999.0
>>> get_relative_change(0, 0)
0.0
"""
assert val1 >= 0, val1
assert val2 >= 0, val2
if val1 == 0:
val1 = EPSILON
if val2 == 0:
val2 = EPSILON
if val1 > val2:
return 1 - val1 / float(val2)
return val2 / float(val1) - 1
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
If the value for config 1 is v1 and the value for config 2 is v2,
the plot contains the point (v1, 1 - v1/v2) if v1 > v2 and the
point (v1, v2/v1 - 1) otherwise.
"""
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples.
categories = defaultdict(list)
self.ylim_bottom = 0
self.ylim_top = 0
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 >= 0, (domain, problem, self.configs[0], val1)
assert val2 >= 0, (domain, problem, self.configs[1], val2)
x = val1
y = get_relative_change(val1, val2)
categories[category].append((x, y))
self.ylim_bottom = min(self.ylim_bottom, y)
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom *= 1.1
self.ylim_top *= 1.1
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'linear')
| 2,744 |
Python
| 30.918604 | 72 | 0.582362 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-sat2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
import downward.suites
# This experiment only tests the Lama-FF synergy, which sat1 did not
# test because it did not work in the issue77 branch.
CONFIGS = {
"synergy":
["--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))",
"--search", "eager_greedy([hff,hlm],preferred=[hff,hlm])"],
}
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v3-base", "issue77-v3"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 684 |
Python
| 24.370369 | 76 | 0.676901 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-sat1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.configs
import downward.suites
CONFIGS = downward.configs.default_configs_satisficing(extended=True)
# The following lines remove some configs that we don't currently
# support because the respective configurations are commented out
DISABLED = [
"seq_sat_fdss_1",
"seq_sat_fdss_2",
"seq_sat_lama_2011",
]
for key, value in list(CONFIGS.items()):
if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")):
del CONFIGS[key]
else:
for pos, arg in enumerate(value):
if ", pathmax=false" in arg:
# pathmax is gone in this branch
value[pos] = arg.replace(", pathmax=false", "")
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_satisficing_with_ipc11()
import common_setup
exp = common_setup.IssueExperiment(
search_revisions=["issue77-base", "issue77-v1"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 1,088 |
Python
| 24.325581 | 70 | 0.668199 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-opt1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.configs
import downward.suites
# "ipc=False" skips portfolio configurations which we don't need to
# test here.
CONFIGS = downward.configs.default_configs_optimal(ipc=False, extended=True)
# pathmax is gone in this branch, remove it:
for key, value in list(CONFIGS.items()):
for pos, arg in enumerate(value):
if ", pathmax=false" in arg:
value[pos] = arg.replace(", pathmax=false", "")
# selmax is currently disabled
del CONFIGS["astar_selmax_lmcut_lmcount"]
SUITE = downward.suites.suite_optimal_with_ipc11()
import common_setup
exp = common_setup.IssueExperiment(
search_revisions=["issue77-base", "issue77-v2"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 856 |
Python
| 24.205882 | 76 | 0.703271 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v7-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
CONFIGS = configs.default_configs_optimal(ipc=False, extended=False)
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_optimal_with_ipc11()
SCATTER_ATTRIBUTES = ["total_time"]
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v7-base", "issue77-v7"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=SCATTER_ATTRIBUTES, relative=True)
exp()
| 594 |
Python
| 21.037036 | 71 | 0.734007 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v6-sat-ehc.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
CONFIGS = {
"ehc_ff": [
"--search", "ehc(ff())"],
"ehc_add_pref": [
"--heuristic", "hadd=add()", "--search", "ehc(hadd, preferred=[hadd])"],
#"ehc_add_ff_pref": [
# "--search", "ehc(add(), preferred=[ff()],preferred_usage=RANK_PREFERRED_FIRST)"],
}
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v6-base", "issue77-v6"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 669 |
Python
| 22.103448 | 90 | 0.61435 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
CONFIGS = configs.default_configs_optimal(ipc=False, extended=False)
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_optimal_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v3", "issue77-v4"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 512 |
Python
| 17.999999 | 68 | 0.716797 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v5-sat-lazy.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
CONFIGS = {}
INCLUDE = ("lazy", "lama")
EXCLUDE = ("lazy_greedy_add", "lazy_greedy_cea", "lazy_greedy_cg")
for key, value in configs.default_configs_satisficing(ipc=False, extended=True).items():
if any(x in key for x in INCLUDE) and not any(x in key for x in EXCLUDE):
CONFIGS[key] = value
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-v5-base", "issue77-v5"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 753 |
Python
| 24.133333 | 88 | 0.694555 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-sat-lazy.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.suites
import common_setup
import configs
CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False)
DISABLED = [
]
for key, value in list(CONFIGS.items()):
if not key.startswith("lazy"):
del CONFIGS[key]
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))
SUITE = downward.suites.suite_satisficing_with_ipc11()
exp = common_setup.IssueExperiment(
search_revisions=["issue77-base", "issue77-v4"],
configs=CONFIGS,
suite=SUITE
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 638 |
Python
| 18.968749 | 72 | 0.703762 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue602/v1-mco.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
SUITE_MCO14 = [
'barman-mco14-strips',
'cavediving-mco14-adl',
'childsnack-mco14-strips',
'citycar-mco14-adl',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'maintenance-mco14-adl',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def main(revisions=None):
suite = SUITE_MCO14
configs = [
IssueConfig("astar_goalcount", [
"--search",
"astar(goalcount)"]),
IssueConfig("eager_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("eager_greedy_cea", [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_add", [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("lazy_greedy_cg", [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy(h, preferred=h)"]),
IssueConfig("seq_sat_lama_2011", [], driver_options=[
"--alias", "seq-sat-lama-2011"]),
IssueConfig("seq_sat_fdss_1", [], driver_options=[
"--alias", "seq-sat-fdss-1"]),
IssueConfig("seq_sat_fdss_2", [], driver_options=[
"--alias", "seq-sat-fdss-2"]),
]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=[
#'cavediving-sat14-adl:testing01_easy.pddl',
#'childsnack-sat14-strips:child-snack_pfile05.pddl',
#'citycar-sat14-adl:p3-2-2-0-1.pddl',
#'ged-sat14-strips:d-3-6.pddl',
'hiking-sat14-strips:ptesting-1-2-7.pddl',
#'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl',
#'tetris-sat14-strips:p020.pddl',
#'thoughtful-sat14-strips:bootstrap-typed-01.pddl',
#'transport-sat14-strips:p01.pddl',
],
processes=4,
email='[email protected]',
)
exp.add_absolute_report_step()
exp()
main(revisions=['issue602-v1'])
| 2,976 |
Python
| 28.77 | 72 | 0.516465 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v7.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v6"), ("issue571-base", "issue571-base", "issue571-v7")])
| 193 |
Python
| 26.714282 | 121 | 0.658031 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v2-vs-base.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = []
CONFIGS = []
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_algorithm(
"base-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-base",
['--search', 'astar(ipdb(max_time=900))'])
exp.add_algorithm(
"v2-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-v2",
['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables=false))'])
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,218 |
Python
| 28.731707 | 82 | 0.7422 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue743-v1"]
CONFIGS = [
IssueConfig(
'ipdb-goal-vars-{goal_vars}'.format(**locals()),
['--search', 'astar(ipdb(max_time=900, consider_co_effect_vars={goal_vars}))'.format(**locals())])
for goal_vars in [False, True]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,130 |
Python
| 27.274999 | 106 | 0.738938 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue743-v2", "issue743-v3"]
CONFIGS = [
IssueConfig('ipdb-900s', ['--search', 'astar(ipdb(max_time=900))'])
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,000 |
Python
| 26.054053 | 71 | 0.759 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue733/v1.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab import tools
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue733-base", "issue733-v1"]
PYTHONS = ["python2.7", "python3.5"]
CONFIGS = [
IssueConfig(
"{python}".format(**locals()),
[],
driver_options=["--translate"])
for python in PYTHONS
]
SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE)
BaselSlurmEnvironment.ENVIRONMENT_SETUP = (
'module purge\n'
'module load Python/3.5.2-goolf-1.7.20\n'
'module load matplotlib/1.5.1-goolf-1.7.20-Python-3.5.2\n'
'PYTHONPATH="%s:$PYTHONPATH"' % tools.get_lab_path())
ENVIRONMENT = BaselSlurmEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
class PythonVersionExperiment(IssueExperiment):
def _add_runs(self):
IssueExperiment._add_runs(self)
for run in self.runs:
python = run.algo.name.split("-")[-1]
command, kwargs = run.commands["fast-downward"]
command = [python] + command
run.commands["fast-downward"] = (command, kwargs)
exp = PythonVersionExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
del exp.commands["parse-search"]
exp.add_suite(BENCHMARKS_DIR, SUITE)
attributes = ["translator_time_done", "translator_peak_memory"]
exp.add_comparison_table_step(attributes=attributes)
compared_configs = [
("issue733-v1-python2.7", "issue733-v1-python3.5", "Diff")]
exp.add_report(
ComparativeReport(compared_configs, attributes=attributes),
name="compare-python-versions")
exp.run_steps()
| 2,020 |
Python
| 30.092307 | 88 | 0.700495 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/regression-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Before you can run the experiment you need to create duplicates of the
two tasks we want to test:
cd ../benchmarks/trucks-strips
for i in {00..19}; do cp p16.pddl p16-$i.pddl; done
for i in {00..19}; do cp domain_p16.pddl domain_p16-$i.pddl; done
cd ../freecell
for i in {00..19}; do cp probfreecell-11-5.pddl probfreecell-11-5-$i.pddl; done
Don't forget to remove the duplicate tasks afterwards. Otherwise they
will be included in subsequent experiments.
"""
import common_setup
REVS = ["issue544-base", "issue544-v1"]
LIMITS = {"search_time": 1800}
CONFIGS = {
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"],
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = (["trucks-strips:p16-%02d.pddl" % i for i in range(20)] +
["freecell:probfreecell-11-5-%02d.pddl" % i for i in range(20)])
PRIORITY = 0 # number means maia experiment
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 1,616 |
Python
| 23.134328 | 79 | 0.592203 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue344/common_setup.py
|
# -*- coding: utf-8 -*-
import os.path
import platform
from lab.environments import MaiaEnvironment
from lab.steps import Step
from downward.checkouts import Translator, Preprocessor, Planner
from downward.experiments import DownwardExperiment
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the filename of the main script, e.g.
"/ham/spam/eggs.py" => "eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Found by searching upwards in the directory tree from the main
script until a directory with a subdirectory named ".hg" is found."""
path = os.path.abspath(get_script_dir())
while True:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
def build_combos_with_names(repo, combinations, revisions, search_revisions):
"""Build (combos, combo_names) lists for the given planner revisions.
combos and combo_names are parallel lists, where combos contains
(Translator, Preprocessor, Search) triples and combo_names are the names
for the respective combinations that lab uses internally.
See MyExperiment.__init__ for documentation of the parameters
combinations, revisions and search_revisions."""
combos = []
names = []
def build(*rev_triple):
combo, name = build_combo_with_name(repo, *rev_triple)
combos.append(combo)
names.append(name)
for triple in combinations or []:
build(triple)
for rev in revisions or []:
build(rev, rev, rev)
for rev in search_revisions or []:
build(search_revisions[0], search_revisions[0], rev)
return combos, names
def build_combo_with_name(repo, trans_rev, preprocess_rev, search_rev):
"""Generate a tuple (combination, name) for the given revisions.
combination is a (Translator, Preprocessor, Search) tuple
and name is the name that lab uses to refer to it."""
# TODO: In the future, it would be nice if we didn't need the name
# information any more, as it is somewhat of an implementation
# detail.
combo = (Translator(repo, trans_rev),
Preprocessor(repo, preprocess_rev),
Planner(repo, search_rev))
if trans_rev == preprocess_rev == search_rev:
name = str(search_rev)
else:
name = "%s-%s-%s" % (trans_rev, preprocess_rev, search_rev)
return combo, name
def is_on_grid():
"""Returns True if the current machine is on the maia grid.
Implemented by checking if host name ends with ".cluster".
"""
return platform.node().endswith(".cluster")
class MyExperiment(DownwardExperiment):
DEFAULT_TEST_SUITE = [
"zenotravel:pfile1",
"zenotravel:pfile2",
]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"total_time",
"search_time",
"memory",
"expansions_until_last_jump",
]
"""Wrapper for DownwardExperiment with a few convenience features."""
def __init__(self, configs=None, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
combinations=None, suite=None, do_test_run="auto",
test_suite=DEFAULT_TEST_SUITE, **kwargs):
"""Create a DownwardExperiment with some convenience features.
If "configs" is specified, it should be a dict of {nick:
cmdline} pairs that sets the planner configurations to test.
If "grid_priority" is specified and no environment is
specifically requested in **kwargs, use the maia environment
with the specified priority.
If "path" is not specified, the experiment data path is
derived automatically from the main script's filename.
If "repo" is not specified, the repository base is derived
automatically from the main script's path.
If "combinations" is specified, it should be a non-empty list
of revision triples of the form (translator_rev,
preprocessor_rev, search_rev).
If "revisions" is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search.
If "search_revisions" is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All experiments use the
translator and preprocessor component of the first
revision.
It is possible to specify a mixture of"combinations",
"revisions" and "search_revisions".
If "suite" is specified, it should specify a problem suite.
If "do_test_run" is true, the "grid_priority" and
"environment" (from the base class) arguments are ignored and
a local experiment with default arguments is run instead. In
this case, the "suite" argument is replaced by the "test_suite"
argument.
If "do_test_run" is the string "auto" (the default), then
do_test_run is set to False when run on a grid machine and
to True otherwise. A grid machine is identified as one whose
node name ends with ".cluster".
"""
if do_test_run == "auto":
do_test_run = not is_on_grid()
if do_test_run:
# In a test run, overwrite certain arguments.
grid_priority = None
kwargs.pop("environment", None)
suite = test_suite
if grid_priority is not None and "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
combinations, self._combination_names = build_combos_with_names(
repo=repo,
combinations=combinations,
revisions=revisions,
search_revisions=search_revisions)
kwargs["combinations"] = combinations
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
if configs is not None:
for nick, config in configs.items():
self.add_config(nick, config)
if suite is not None:
self.add_suite(suite)
self._report_prefix = get_experiment_name()
def add_comparison_table_step(self, attributes=None):
revisions = self._combination_names
if len(revisions) != 2:
# TODO: Should generalize this by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
if attributes is None:
attributes = self.DEFAULT_TABLE_ATTRIBUTES
report = CompareRevisionsReport(*revisions, attributes=attributes)
self.add_report(report, outfile="%s-compare.html" % self._report_prefix)
def add_scatter_plot_step(self, attributes=None):
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
revisions = self._combination_names
if len(revisions) != 2:
# TODO: Should generalize this by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plots():
configs = [conf[0] for conf in self.configs]
for nick in configs:
config_before = "%s-%s" % (revisions[0], nick)
config_after = "%s-%s" % (revisions[1], nick)
for attribute in attributes:
name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
report = ScatterPlotReport(
filter_config=[config_before, config_after],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 9,626 |
Python
| 34.788104 | 80 | 0.628922 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue344/exp14.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.configs
import downward.suites
import common_setup
exp = common_setup.MyExperiment(
grid_priority=0,
search_revisions=["issue344-base", "issue344-v5"],
configs=downward.configs.default_configs_optimal(),
suite=downward.suites.suite_optimal_with_ipc11(),
do_test_run="auto"
)
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 434 |
Python
| 18.772726 | 55 | 0.700461 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue908-base", "issue908-v4"]
CONFIGS = [
IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']),
IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('parser.py')
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
attributes=exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]),
Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]),
Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]),
])
#exp.add_absolute_report_step()
exp.add_comparison_table_step(attributes=attributes)
exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"])
exp.run_steps()
| 2,121 |
Python
| 32.156249 | 164 | 0.751061 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('generator_computation_time', 'generator computation time: (.+)s', required=False, type=float)
parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float)
parser.add_pattern('dominance_pruning_time', 'Dominance pruning took (.+)s', required=False, type=float)
parser.parse()
| 432 |
Python
| 38.363633 | 123 | 0.75 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/issue439.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
import common_setup
SEARCH_REVS = ["issue439-base", "issue439-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys()))
exp()
| 779 |
Python
| 21.941176 | 61 | 0.695764 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/regressions.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs
import common_setup
SEARCH_REVS = ["issue439-base", "issue439-v1"]
LIMITS = {"search_time": 1800}
SUITE = [
"airport:p45-airport5MUC-p6.pddl",
"elevators-sat08-strips:p22.pddl",
"parking-sat11-strips:pfile09-033.pddl",
"scanalyzer-08-strips:p30.pddl",
"transport-sat11-strips:p14.pddl",
"transport-sat11-strips:p16.pddl",
"trucks:p19.pddl",
"trucks-strips:p23.pddl",
]
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"]
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys(),
attributes=attributes))
exp()
| 1,209 |
Python
| 25.304347 | 68 | 0.701406 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.