file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue781/v2-v3-combined.py
|
#! /usr/bin/env python
from collections import defaultdict
import os.path
import sys
import common_setup
FILE = os.path.abspath(__file__)
DIR = os.path.dirname(FILE)
FILENAME = os.path.splitext(os.path.basename(__file__))[0]
EXPS = os.path.join(DIR, "data")
EXPPATH = os.path.join(EXPS, FILENAME)
def remove_file(filename):
try:
os.remove(filename)
except OSError:
pass
exp = common_setup.IssueExperiment()
exp.steps = []
exp.add_step(
'remove-combined-properties',
remove_file,
os.path.join(exp.eval_dir, "properties"))
exp.add_fetcher(os.path.join(EXPS, "issue781-v2-eval"), merge=True)
exp.add_fetcher(os.path.join(EXPS, "issue781-v3-queue-ratio-eval"), merge=True)
ATTRIBUTES = [
"cost", "error", "run_dir", "search_start_time",
"search_start_memory", "coverage", "expansions_until_last_jump",
"total_time", "initial_h_value", "search_time", "abstractions",
"stored_heuristics", "stored_values", "stored_lookup_tables",
]
exp.add_absolute_report_step(
filter_algorithm=[
"issue781-v2-blind-ec-min-0.0",
"issue781-v2-blind-ec-min-0.2",
"issue781-v2-blind-queue-min-0.0",
"issue781-v3-blind-queue-min-0.2",
"issue781-v2-blind-simple-min-0.0",
"issue781-v2-blind-simple-min-0.2",
"issue781-v2-lmcut-ec-min-0.0",
"issue781-v2-lmcut-ec-min-0.2",
"issue781-v2-lmcut-queue-min-0.0",
"issue781-v3-lmcut-queue-min-0.2",
"issue781-v2-lmcut-simple-min-0.0",
"issue781-v2-lmcut-simple-min-0.2"],
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"])
exp.run_steps()
| 1,668 |
Python
| 29.345454 | 102 | 0.657074 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue781/parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def main():
print 'Running custom parser'
parser = Parser()
parser.add_pattern('time_for_pruning_operators', r'^Time for pruning operators: (.+)s$', type=float, flags="M")
parser.parse()
main()
| 268 |
Python
| 18.214284 | 115 | 0.660448 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue781/v3-queue-ratio.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue781-v3"]
CONFIGS = [
IssueConfig(
"{heuristic}-{pruning}-min-{min_ratio}".format(**locals()),
["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}(min_required_pruning_ratio={min_ratio}))".format(**locals())])
for heuristic in ["blind", "lmcut"]
for pruning in ["queue"]
for min_ratio in [0.2]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER)
exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER)
#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER)
exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER)
exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py"))
exp.add_absolute_report_step(
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"])
#exp.add_comparison_table_step()
exp.run_steps()
| 1,672 |
Python
| 31.803921 | 137 | 0.727273 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue792/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue792-base", "issue792-v1"]
CONFIGS = [
IssueConfig('blind', ['--search', 'astar(blind())']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_comparison_table_step()
exp.run_steps()
| 1,235 |
Python
| 25.297872 | 68 | 0.74332 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue914/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue914-base", "issue914-v4"]
BUILDS = ["release"]
CONFIG_NICKS = [
('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
('rl-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
('sccs-dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']),
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('ms-parser.py')
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True)
ms_num_remaining_factors = Attribute('ms_num_remaining_factors', absolute=False, min_wins=False)
ms_num_factors_kept = Attribute('ms_num_factors_kept', absolute=False, min_wins=False)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
ms_construction_time,
ms_atomic_construction_time,
ms_abstraction_constructed,
ms_atomic_fts_constructed,
ms_out_of_memory,
ms_out_of_time,
ms_memory_delta,
ms_num_remaining_factors,
ms_num_factors_kept,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.run_steps()
| 4,396 |
Python
| 42.97 | 479 | 0.755687 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue914/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float)
parser.add_pattern('ms_atomic_construction_time', 'M&S algorithm timer: (.+)s \(after computation of atomic factors\)', required=False, type=float)
parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink algorithm: (\d+) KB', required=False, type=int)
parser.add_pattern('ms_num_remaining_factors', 'Number of remaining factors: (\d+)', required=False, type=int)
parser.add_pattern('ms_num_factors_kept', 'Number of factors kept: (\d+)', required=False, type=int)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_atomic_fts_constructed(content, props):
ms_atomic_construction_time = props.get('ms_atomic_construction_time')
ms_atomic_fts_constructed = False
if ms_atomic_construction_time is not None:
ms_atomic_fts_constructed = True
props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed
parser.add_function(check_atomic_fts_constructed)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'success' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
parser.parse()
| 2,893 |
Python
| 39.194444 | 147 | 0.683374 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v7.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue814-v6", "issue814-v7"]
if common_setup.is_test_run():
BUILDS = ["release32"]
else:
BUILDS = ["debug64", "release32", "release64"]
CONFIGS = [
IssueConfig(
build + "-{heuristic}".format(**locals()),
["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"],
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "5m"])
for build in BUILDS
for heuristic in ["add", "ff"]
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step(
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["simplify_before", "simplify_after", "simplify_time"])
for attribute in ["memory", "total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,252 |
Python
| 31.185714 | 108 | 0.678508 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/parser.py
|
#! /usr/bin/env python
"""
========================================================================
Simplifying 29928 unary operators... done! [17292 unary operators]
time to simplify: 0.022623s
========================================================================
=> Here we want to extract 29928 (simplify_before), 17292 (simplify_after) and
0.022623s (simplify_time).
"""
import re
from lab.parser import Parser
print 'Running custom parser'
parser = Parser()
parser.add_pattern('simplify_before', r'^Simplifying (\d+) unary operators\.\.\. done! \[\d+ unary operators\]$', type=int)
parser.add_pattern('simplify_after', r'^Simplifying \d+ unary operators\.\.\. done! \[(\d+) unary operators\]$', type=int)
parser.add_pattern('simplify_time', r'^time to simplify: (.+)s$', type=float)
parser.parse()
| 812 |
Python
| 32.874999 | 123 | 0.575123 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/v1-satisficing.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
REVISIONS = [
"issue990-base",
"issue990-v1",
]
CONFIGS = [
common_setup.IssueConfig("lama-first", [],
driver_options=["--alias", "lama-first"]),
common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]),
]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REPO = os.environ["DOWNWARD_REPO"]
if common_setup.is_running_on_cluster():
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"],
)
else:
SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=2)
exp = common_setup.IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_absolute_report_step()
exp.add_parse_again_step()
exp.run_steps()
| 1,393 |
Python
| 23.892857 | 111 | 0.693467 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/sat-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue456-base", "issue456-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 650 |
Python
| 18.147058 | 45 | 0.573846 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/common.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["8e0b8e1f6edc"]
EXPORTS = ["PYTHONPATH", "PATH", "DOWNWARD_BENCHMARKS"]
def generate_configs(sas_filenames):
configs = []
for sas_file in sas_filenames:
common_driver_options = [] if sas_file is None else ["--sas-file", sas_file]
configs += [
IssueConfig('lazy-greedy-blind-{}'.format(sas_file), ['--search', 'lazy_greedy([blind()])'],
driver_options=common_driver_options + []),
IssueConfig('lama-first-{}'.format(sas_file), [],
driver_options=common_driver_options + ["--alias", "lama-first"]),
IssueConfig("seq_sat_fdss_1-{}".format(sas_file), [],
driver_options=common_driver_options + ["--alias", "seq-sat-fdss-1"]),
IssueConfig("seq_sat_fdss_-{}".format(sas_file), [],
driver_options=common_driver_options + ["--portfolio", "driver/portfolios/seq_sat_fdss_2.py",
"--overall-time-limit", "20s"]),
IssueConfig('translate-only-{}'.format(sas_file), [],
driver_options=['--translate'] + common_driver_options),
]
return configs
def generate_experiments(configs):
SUITE = ["gripper:prob01.pddl",
"blocks:probBLOCKS-5-0.pddl",
"visitall-sat11-strips:problem12.pddl",
"airport:p01-airport1-p1.pddl"]
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=EXPORTS)
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=2)
exp = IssueExperiment(
revisions=REVISIONS,
configs=configs,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.run_steps()
| 2,196 |
Python
| 33.328124 | 117 | 0.59745 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/out_sas.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common
# We want to test both NOT specifying the -sas-file option AND specifying the default "output.sas" value.
# The result should be the same in both cases
common.generate_experiments(common.generate_configs((None, "output.sas")))
| 289 |
Python
| 35.249996 | 105 | 0.737024 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/foobar_sas.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common
common.generate_experiments(common.generate_configs(["foobar.sas"]))
| 132 |
Python
| 17.999997 | 68 | 0.69697 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/base.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue939-base"]
CONFIGS = [
IssueConfig(
"translate-only",
[],
driver_options=["--translate"])
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]")
# This was generated by running "./suites.py all" in the benchmarks
# repository.
SUITE = [
'agricola-opt18-strips',
'agricola-sat18-strips',
'airport',
'airport-adl',
'assembly',
'barman-mco14-strips',
'barman-opt11-strips',
'barman-opt14-strips',
'barman-sat11-strips',
'barman-sat14-strips',
'blocks',
'caldera-opt18-adl',
'caldera-sat18-adl',
'caldera-split-opt18-adl',
'caldera-split-sat18-adl',
'cavediving-14-adl',
'childsnack-opt14-strips',
'childsnack-sat14-strips',
'citycar-opt14-adl',
'citycar-sat14-adl',
'data-network-opt18-strips',
'data-network-sat18-strips',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'elevators-sat08-strips',
'elevators-sat11-strips',
'flashfill-sat18-adl',
'floortile-opt11-strips',
'floortile-opt14-strips',
'floortile-sat11-strips',
'floortile-sat14-strips',
'freecell',
'ged-opt14-strips',
'ged-sat14-strips',
'grid',
'gripper',
'hiking-agl14-strips',
'hiking-opt14-strips',
'hiking-sat14-strips',
'logistics00',
'logistics98',
'maintenance-opt14-adl',
'maintenance-sat14-adl',
'miconic',
'miconic-fulladl',
'miconic-simpleadl',
'movie',
'mprime',
'mystery',
'no-mprime',
'no-mystery',
'nomystery-opt11-strips',
'nomystery-sat11-strips',
'nurikabe-opt18-adl',
'nurikabe-sat18-adl',
'openstacks',
'openstacks-agl14-strips',
'openstacks-opt08-adl',
'openstacks-opt08-strips',
'openstacks-opt11-strips',
'openstacks-opt14-strips',
'openstacks-sat08-adl',
'openstacks-sat08-strips',
'openstacks-sat11-strips',
'openstacks-sat14-strips',
'openstacks-strips',
'optical-telegraphs',
'organic-synthesis-opt18-strips',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-opt18-strips',
'organic-synthesis-split-sat18-strips',
'parcprinter-08-strips',
'parcprinter-opt11-strips',
'parcprinter-sat11-strips',
'parking-opt11-strips',
'parking-opt14-strips',
'parking-sat11-strips',
'parking-sat14-strips',
'pathways',
'pathways-noneg',
'pegsol-08-strips',
'pegsol-opt11-strips',
'pegsol-sat11-strips',
'petri-net-alignment-opt18-strips',
'philosophers',
'pipesworld-notankage',
'pipesworld-tankage',
'psr-large',
'psr-middle',
'psr-small',
'rovers',
'satellite',
'scanalyzer-08-strips',
'scanalyzer-opt11-strips',
'scanalyzer-sat11-strips',
'schedule',
'settlers-opt18-adl',
'settlers-sat18-adl',
'snake-opt18-strips',
'snake-sat18-strips',
'sokoban-opt08-strips',
'sokoban-opt11-strips',
'sokoban-sat08-strips',
'sokoban-sat11-strips',
'spider-opt18-strips',
'spider-sat18-strips',
'storage',
'termes-opt18-strips',
'termes-sat18-strips',
'tetris-opt14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'thoughtful-sat14-strips',
'tidybot-opt11-strips',
'tidybot-opt14-strips',
'tidybot-sat11-strips',
'tpp',
'transport-opt08-strips',
'transport-opt11-strips',
'transport-opt14-strips',
'transport-sat08-strips',
'transport-sat11-strips',
'transport-sat14-strips',
'trucks',
'trucks-strips',
'visitall-opt11-strips',
'visitall-opt14-strips',
'visitall-sat11-strips',
'visitall-sat14-strips',
'woodworking-opt08-strips',
'woodworking-opt11-strips',
'woodworking-sat08-strips',
'woodworking-sat11-strips',
'zenotravel',
]
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("translator_additional_parser.py")
del exp.commands['remove-output-sas']
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_parse_again_step()
exp.add_fetcher(name='fetch')
exp.run_steps()
| 4,843 |
Python
| 24.361256 | 68 | 0.655585 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/fetch.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.experiment import Experiment
from downward.reports import PlanningReport
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
import common_setup
DIR = os.path.dirname(os.path.abspath(__file__))
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]")
if common_setup.is_test_run():
ENVIRONMENT = LocalEnvironment(processes=4)
exp = Experiment()
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
class SameValueFilters(object):
"""Ignore runs for a task where all algorithms have the same value."""
def __init__(self, attribute):
self._attribute = attribute
self._tasks_to_values = defaultdict(list)
def _get_task(self, run):
return (run['domain'], run['problem'])
def store_values(self, run):
value = run.get(self._attribute)
self._tasks_to_values[self._get_task(run)].append(value)
# Don't filter this run, yet.
return True
def filter_tasks_with_equal_values(self, run):
values = self._tasks_to_values[self._get_task(run)]
return len(set(values)) != 1
exp.add_fetcher(src='data/issue939-base-eval')
exp.add_fetcher(src='data/issue939-v1-eval', merge=True)
ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
#exp.add_comparison_table_step(attributes=ATTRIBUTES)
same_value_filters = SameValueFilters("translator_output_sas_hash")
# exp.add_comparison_table_step(
# name="filtered",
# attributes=ATTRIBUTES,
# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES))
exp.add_report(ComparativeReport([
('issue939-base-translate-only', 'issue939-v1-translate-only')
], attributes=ATTRIBUTES))
exp.run_steps()
| 2,598 |
Python
| 29.57647 | 98 | 0.687452 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/translator_additional_parser.py
|
#!/usr/bin/env python
import hashlib
from lab.parser import Parser
def add_hash_value(content, props):
props['translator_output_sas_hash'] = hashlib.sha512(str(content).encode('utf-8')).hexdigest()
parser = Parser()
parser.add_function(add_hash_value, file="output.sas")
parser.parse()
| 294 |
Python
| 21.692306 | 98 | 0.731293 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/v2-v4-compare.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run
BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks')
REVISIONS = []
CONFIGS = []
SUITE = DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email='[email protected]')
if is_test_run():
SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
ms_construction_time,
ms_atomic_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_fetcher('data/issue668-v2-eval')
exp.add_fetcher('data/issue668-v4-eval')
exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[
('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v4'),
]),outfile='issue668-v2-v4-compare-abp.html')
exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[
('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v4'),
]),name='issue668-v2-v4-compare-pba.html')
exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[
('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'),
('%s-sbf-miasm-allrnd-b50k' % 'issue668-v2','%s-sbf-miasm-allrnd-b50k' % 'issue668-v4'),
]),name='issue668-v2-v4-compare-paper.html')
exp.run_steps()
| 5,070 |
Python
| 49.20792 | 145 | 0.686982 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/v5-paper.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.absolute import AbsoluteReport
from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run
BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks')
REVISIONS = ["issue668-v5-hack"]
CONFIGS = [
IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']),
]
SUITE = DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH"], partition='infai_1')
if is_test_run():
SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER)
exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER)
exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER)
exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER)
exp.add_parser('ms_parser', 'ms-parser.py')
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
ms_construction_time,
ms_atomic_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[
'%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack',
'%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack',
]),outfile='issue668-v5-paper.html')
exp.run_steps()
| 6,978 |
Python
| 74.04301 | 587 | 0.766982 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue123/issue123.py
|
#! /usr/bin/env python
from standard_experiment import REMOTE, get_exp
from downward import suites
#from lab.reports import Attribute, avg
import os.path
# Set the following variables for the experiment
REPO_NAME = 'fd-issue123'
# revisions, e.g. ['3d6c1ccacdce']
REVISIONS = ['issue123-base']
# suites, e.g. ['gripper:prob01.pddl', 'zenotravel:pfile1'] or suites.suite_satisficing_with_ipc11()
LOCAL_SUITE = ['depot:pfile1']
GRID_SUITE = suites.suite_satisficing_with_ipc11()
# configs, e.g. '--search', 'astar(lmcut())' for config
CONFIGS = {
'lama-2011': [
"--if-unit-cost",
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))",
"--search", "iterated(["
" lazy_greedy([hff,hlm],preferred=[hff,hlm]),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)"
" ],repeat_last=true,continue_on_fail=true)",
"--if-non-unit-cost",
"--heuristic",
"hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one,cost_type=one))",
"--heuristic",
"hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=plusone,cost_type=plusone))",
"--search", "iterated(["
" lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],"
" cost_type=one,reopen_closed=false),"
" lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],"
" reopen_closed=false),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)"
" ],repeat_last=true,continue_on_fail=true)",
],
'lama-2011-first-it': [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one,cost_type=one))",
"--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)"
],
'lama-2011-separated': [
"--if-unit-cost",
"--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true),pref=true)",
"--heuristic",
"hff=ff()",
"--search", "iterated(["
" lazy_greedy([hff,hlm],preferred=[hff,hlm]),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),"
" lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)"
" ],repeat_last=true,continue_on_fail=true)",
"--if-non-unit-cost",
"--heuristic",
"hlm1=lmcount(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one,cost_type=one),"
" pref=true,cost_type=one)",
"--heuristic",
"hff1=ff(cost_type=one)",
"--heuristic",
"hlm2=lmcount(lm_rhw(reasonable_orders=true,"
" lm_cost_type=plusone,cost_type=plusone),"
" pref=true,cost_type=plusone)",
"--heuristic",
"hff2=ff(cost_type=plusone)",
"--search", "iterated(["
" lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],"
" cost_type=one,reopen_closed=false),"
" lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],"
" reopen_closed=false),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),"
" lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)"
" ],repeat_last=true,continue_on_fail=true)",
],
'lama-2011-first-it-separated': [
"--heuristic",
"hlm=lmcount(lm_rhw(reasonable_orders=true,"
" lm_cost_type=one,cost_type=one),"
" pref=true,cost_type=one)",
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)",
],
}
# limits, e.g. { 'search_time': 120 }
LIMITS = None
# for 'make debug', set to True.
COMPILATION_OPTION = None #(default: 'release')
# choose any lower priority if whished
PRIORITY = None #(default: 0)
# Do not change anything below here
SCRIPT_PATH = os.path.abspath(__file__)
if REMOTE:
SUITE = GRID_SUITE
REPO = os.path.expanduser('~/repos/' + REPO_NAME)
else:
SUITE = LOCAL_SUITE
REPO = os.path.expanduser('~/work/' + REPO_NAME)
# Create the experiment. Add parsers, fetchers or reports...
exp = get_exp(script_path=SCRIPT_PATH, repo=REPO, suite=SUITE,
configs=CONFIGS, revisions=REVISIONS, limits=LIMITS,
compilation_option=COMPILATION_OPTION, priority=PRIORITY)
exp.add_score_attributes()
exp.add_extra_attributes(['quality'])
REV = REVISIONS[0]
configs_lama = [('%s-lama-2011' % REV, '%s-lama-2011-separated' % REV)]
exp.add_configs_report(compared_configs=configs_lama, name='lama')
configs_lama_first_it = [('%s-lama-2011-first-it' % REV, '%s-lama-2011-first-it-separated' % REV)]
exp.add_configs_report(compared_configs=configs_lama_first_it, name='lama-first-it')
exp.add_absolute_report()
exp()
| 5,928 |
Python
| 41.049645 | 100 | 0.529184 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue392/lama-nonunit.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue392-v2"]
LIMITS = {"search_time": 300}
CONFIGS = {}
for randomize in ["false", "true"]:
for pref_first in ["false", "true"]:
CONFIGS["lama-nonunit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [
"--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))",
"--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))",
"--search",
"iterated(["
"lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false),"
"lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)],"
"repeat_last=true,continue_on_fail=true)" % locals()
]
SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) &
set(suites.suite_diverse_costs()))
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 1,902 |
Python
| 43.255813 | 176 | 0.650368 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue392/lama-unit.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue392-v2"]
LIMITS = {"search_time": 300}
CONFIGS = {}
for randomize in ["false", "true"]:
for pref_first in ["false", "true"]:
CONFIGS["lama-unit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))",
"--search",
"iterated(["
"lazy_greedy([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)],"
"repeat_last=true,continue_on_fail=true)" % locals()
]
SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) &
set(suites.suite_unit_costs()))
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 1,532 |
Python
| 35.499999 | 139 | 0.648172 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def add_dominance_pruning_failed(content, props):
if "dominance_pruning=False" in content:
failed = False
elif "pdb_collection_construction_time" not in props:
failed = False
else:
failed = "dominance_pruning_time" not in props
props["dominance_pruning_failed"] = int(failed)
def main():
print "Running custom parser"
parser = Parser()
parser.add_pattern(
"pdb_collection_construction_time", "^PDB collection construction time: (.+)s$", type=float, flags="M", required=False)
parser.add_pattern(
"dominance_pruning_time", "^Dominance pruning took (.+)s$", type=float, flags="M", required=False)
parser.add_pattern(
"dominance_pruning_pruned_subsets", "Pruned (\d+) of \d+ maximal additive subsets", type=int, required=False)
parser.add_pattern(
"dominance_pruning_pruned_pdbs", "Pruned (\d+) of \d+ PDBs", type=int, required=False)
parser.add_function(add_dominance_pruning_failed)
parser.parse()
main()
| 1,069 |
Python
| 31.424241 | 127 | 0.667914 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/v3-no-pruning.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue735-v3"]
BUILD_OPTIONS = ["release32nolp"]
DRIVER_OPTIONS = ["--build", "release32nolp"]
CONFIGS = [
IssueConfig(
"cpdbs-{nick}-pruning-{pruning}".format(**locals()),
["--search", "astar(cpdbs({generator}, dominance_pruning={pruning}))".format(**locals())],
build_options=BUILD_OPTIONS,
driver_options=DRIVER_OPTIONS)
for nick, generator in [("sys2", "systematic(2)"), ("hc", "hillclimbing(max_time=900)")]
for pruning in [False, True]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [
"dominance_pruning_failed",
"dominance_pruning_time",
"dominance_pruning_pruned_subsets",
"dominance_pruning_pruned_pdbs",
"pdb_collection_construction_time"]
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_resource("custom_parser", "custom-parser.py")
exp.add_command("run-custom-parser", ["{custom_parser}"])
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,703 |
Python
| 30.555555 | 98 | 0.714621 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue468/issue468.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount_lm_merged_rhw_hm': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue468-base", "issue468-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 462 |
Python
| 19.130434 | 85 | 0.640693 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue425/opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites, configs
from downward.reports.compare import CompareConfigsReport
import common_setup
REVISIONS = ["issue425-base", "issue425-v1"]
CONFIGS = configs.default_configs_optimal()
# remove config that is disabled in this branch
del CONFIGS['astar_selmax_lmcut_lmcount']
exp = common_setup.IssueExperiment(
search_revisions=REVISIONS,
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
limits={"search_time": 300}
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
def grouped_configs_to_compare(config_nicks):
grouped_configs = []
for config_nick in config_nicks:
col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
grouped_configs.append((col_names[0], col_names[1],
'Diff - %s' % config_nick))
return grouped_configs
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_optimal_core()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES,
),
outfile="issue425-opt-compare-core-configs.html"
)
def add_first_run_search_time(run):
if run.get("search_time_all", []):
run["first_run_search_time"] = run["search_time_all"][0]
return run
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_optimal_ipc()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"],
filter=add_first_run_search_time,
),
outfile="issue425-opt-compare-portfolio-configs.html"
)
exp()
| 1,764 |
Python
| 32.301886 | 113 | 0.650794 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue425/sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites, configs
from downward.reports.compare import CompareConfigsReport
import common_setup
REVISIONS = ["issue425-base", "issue425-v1"]
exp = common_setup.IssueExperiment(
search_revisions=REVISIONS,
configs=configs.default_configs_satisficing(),
suite=suites.suite_satisficing_with_ipc11(),
limits={"search_time": 300}
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
def grouped_configs_to_compare(config_nicks):
grouped_configs = []
for config_nick in config_nicks:
col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
grouped_configs.append((col_names[0], col_names[1],
'Diff - %s' % config_nick))
return grouped_configs
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES,
),
outfile="issue425-sat-compare-core-configs.html"
)
def add_first_run_search_time(run):
if run.get("search_time_all", []):
run["first_run_search_time"] = run["search_time_all"][0]
return run
exp.add_report(CompareConfigsReport(
compared_configs=grouped_configs_to_compare(configs.configs_satisficing_ipc()),
attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"],
filter=add_first_run_search_time,
),
outfile="issue425-sat-compare-portfolio-configs.html"
)
exp()
| 1,671 |
Python
| 33.122448 | 113 | 0.645721 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/v5-planner.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue862-base", "issue862-v5"]
BUILDS = ["release32"]
CONFIG_DICT = {
"lazy-greedy-{h}".format(**locals()): [
"--evaluator",
"h={h}()".format(**locals()),
"--search",
"lazy_greedy([h], preferred=[h])"]
for h in ["hmax", "add", "ff", "cg", "cea"]
}
CONFIG_DICT["lama-first"] = [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]
CONFIG_DICT["blind"] = ["--search", "astar(blind())"]
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "30m"])
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = [
"airport-adl",
"assembly",
"miconic-fulladl",
"psr-large",
"psr-middle",
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,260 |
Python
| 27.620253 | 103 | 0.657965 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v8-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue693-v7-base", "issue693-v7", "issue693-v8"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_comparison_table_step()
exp.run_steps()
| 1,219 |
Python
| 24.957446 | 80 | 0.707137 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v3-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue693-v2", "issue693-v3"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
("divpot", "astar(diverse_potentials())"),
("lmcut", "astar(lmcut())"),
("cegar", "astar(cegar())"),
("systematic2", "astar(cpdbs(systematic(2)))"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals()))
exp.run_steps()
| 2,171 |
Python
| 29.591549 | 80 | 0.659143 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v6-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue693-v5", "issue693-v6"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--search-time-limit", "1m"])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
# Compare revisions.
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals()))
for config_nick, search in SEARCHES:
algorithms = [
"{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals())]
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=algorithms,
get_category=lambda run1, run2: run1["domain"]),
name="issue693-relative-scatter-{config_nick}-{build}-{rev1}-vs-{rev2}-{attribute}".format(**locals()))
exp.run_steps()
| 2,443 |
Python
| 32.479452 | 123 | 0.615227 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v4-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue693-v4-base", "issue693-v4"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("ff_lazy", ["--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]),
("add_lazy", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]),
("ff_eager", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
] + [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama-first"])
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare revisions.
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick in [nick for nick, _ in SEARCHES] + ["lama-first"]]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue693-sat-{rev1}-vs-{rev2}-{build}".format(**locals()))
exp.run_steps()
| 2,295 |
Python
| 30.888888 | 88 | 0.647059 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/main.cc
|
#include <algorithm>
#include <ctime>
#include <functional>
#include <iostream>
#include <string>
#include <unordered_set>
#include "fast_hash.h"
#include "hash.h"
#include "SpookyV2.h"
using namespace std;
static void benchmark(const string &desc, int num_calls,
const function<void()> &func) {
cout << "Running " << desc << " " << num_calls << " times:" << flush;
clock_t start = clock();
for (int j = 0; j < num_calls; ++j)
func();
clock_t end = clock();
double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC;
cout << " " << duration << "s" << endl;
}
static int scramble(int i) {
return (0xdeadbeef * i) ^ 0xfeedcafe;
}
#define rot32(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
#define mix32(a, b, c) \
{ \
a -= c; a ^= rot32(c, 4); c += b; \
b -= a; b ^= rot32(a, 6); a += c; \
c -= b; c ^= rot32(b, 8); b += a; \
a -= c; a ^= rot32(c, 16); c += b; \
b -= a; b ^= rot32(a, 19); a += c; \
c -= b; c ^= rot32(b, 4); b += a; \
}
#define final32(a, b, c) \
{ \
c ^= b; c -= rot32(b, 14); \
a ^= c; a -= rot32(c, 11); \
b ^= a; b -= rot32(a, 25); \
c ^= b; c -= rot32(b, 16); \
a ^= c; a -= rot32(c, 4); \
b ^= a; b -= rot32(a, 14); \
c ^= b; c -= rot32(b, 24); \
}
inline unsigned int hash_unsigned_int_sequence(
const int *k, unsigned int length, unsigned int initval) {
unsigned int a, b, c;
// Set up the internal state.
a = b = c = 0xdeadbeef + (length << 2) + initval;
// Handle most of the key.
while (length > 3) {
a += k[0];
b += k[1];
c += k[2];
mix32(a, b, c);
length -= 3;
k += 3;
}
// Handle the last 3 unsigned ints. All case statements fall through.
switch (length) {
case 3:
c += k[2];
case 2:
b += k[1];
case 1:
a += k[0];
final32(a, b, c);
// case 0: nothing left to add.
case 0:
break;
}
return c;
}
struct BurtleBurtleHash {
std::size_t operator()(const std::vector<int> &vec) const {
return hash_unsigned_int_sequence(vec.data(), vec.size(), 2016);
}
};
using BurtleBurtleHashSet = std::unordered_set<vector<int>, BurtleBurtleHash>;
struct HashWordHash {
std::size_t operator()(const std::vector<int> &vec) const {
utils::HashState hash_state;
hash_state.feed_ints(vec.data(), vec.size());
return hash_state.get_hash64();
}
};
struct SpookyV2Hash {
std::size_t operator()(const std::vector<int> &vec) const {
return SpookyHash::Hash64(vec.data(), vec.size() * 4, 2016);
}
};
struct SpookyV2HashInt {
std::size_t operator()(int i) const {
return SpookyHash::Hash64(&i, sizeof(int), 2016);
}
};
int main(int, char **) {
const int REPETITIONS = 2;
const int NUM_CALLS = 100000;
const int NUM_INSERTIONS = 100;
for (int i = 0; i < REPETITIONS; ++i) {
benchmark("nothing", NUM_CALLS, [] () {});
cout << endl;
benchmark("insert int with BoostHash", NUM_CALLS,
[&]() {
unordered_set<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
benchmark("insert int with BoostHashFeed", NUM_CALLS,
[&]() {
fast_hash::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
benchmark("insert int with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
benchmark("insert int with SpookyHash", NUM_CALLS,
[&]() {
std::unordered_set<int, SpookyV2HashInt> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
cout << endl;
benchmark("insert pair<int, int> with BoostHash", NUM_CALLS,
[&]() {
unordered_set<pair<int, int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(make_pair(scramble(i), scramble(i + 1)));
}
});
benchmark("insert pair<int, int> with BoostHashFeed", NUM_CALLS,
[&]() {
fast_hash::HashSet<pair<int, int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(make_pair(scramble(i), scramble(i + 1)));
}
});
benchmark("insert pair<int, int> with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<pair<int, int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(make_pair(scramble(i), scramble(i + 1)));
}
});
cout << endl;
for (int length : {1, 10, 100}
) {
benchmark(
"insert vector<int> of size " + to_string(length) +
" with BoostHash", NUM_CALLS,
[&]() {
unordered_set<vector<int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
benchmark(
"insert vector<int> of size " + to_string(length) +
" with BoostHashFeed", NUM_CALLS,
[&]() {
fast_hash::HashSet<vector<int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
benchmark(
"insert vector<int> of size " + to_string(length) +
" with BurtleVector", NUM_CALLS,
[&]() {
BurtleBurtleHashSet s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
benchmark(
"insert vector<int> of size " + to_string(length) +
" with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<vector<int>> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
benchmark(
"insert vector<int> of size " + to_string(length) +
" with BurtleFeedVector", NUM_CALLS,
[&]() {
std::unordered_set<vector<int>, HashWordHash> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
benchmark(
"insert vector<int> of size " + to_string(length) +
" with SpookyHash", NUM_CALLS,
[&]() {
std::unordered_set<vector<int>, SpookyV2Hash> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
vector<int> v;
v.reserve(length);
for (int j = 0; j < length; ++j) {
v.push_back(scramble(NUM_INSERTIONS * length + j));
}
s.insert(v);
}
});
cout << endl;
}
cout << endl;
}
return 0;
}
| 9,166 |
C++
| 32.334545 | 79 | 0.397883 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/SpookyV2.cc
|
// Spooky Hash
// A 128-bit noncryptographic hash, for checksums and table lookup
// By Bob Jenkins. Public domain.
// Oct 31 2010: published framework, disclaimer ShortHash isn't right
// Nov 7 2010: disabled ShortHash
// Oct 31 2011: replace End, ShortMix, ShortEnd, enable ShortHash again
// April 10 2012: buffer overflow on platforms without unaligned reads
// July 12 2012: was passing out variables in final to in/out in short
// July 30 2012: I reintroduced the buffer overflow
// August 5 2012: SpookyV2: d = should be d += in short hash, and remove extra mix from long hash
#include <memory.h>
#include "SpookyV2.h"
#define ALLOW_UNALIGNED_READS 1
//
// short hash ... it could be used on any message,
// but it's used by Spooky just for short messages.
//
void SpookyHash::Short(
const void *message,
size_t length,
uint64 *hash1,
uint64 *hash2) {
uint64 buf[2 * sc_numVars];
union {
const uint8 *p8;
uint32 *p32;
uint64 *p64;
size_t i;
}
u;
u.p8 = (const uint8 *)message;
if (!ALLOW_UNALIGNED_READS && (u.i & 0x7)) {
memcpy(buf, message, length);
u.p64 = buf;
}
size_t remainder = length % 32;
uint64 a = *hash1;
uint64 b = *hash2;
uint64 c = sc_const;
uint64 d = sc_const;
if (length > 15) {
const uint64 *end = u.p64 + (length / 32) * 4;
// handle all complete sets of 32 bytes
for (; u.p64 < end; u.p64 += 4) {
c += u.p64[0];
d += u.p64[1];
ShortMix(a, b, c, d);
a += u.p64[2];
b += u.p64[3];
}
//Handle the case of 16+ remaining bytes.
if (remainder >= 16) {
c += u.p64[0];
d += u.p64[1];
ShortMix(a, b, c, d);
u.p64 += 2;
remainder -= 16;
}
}
// Handle the last 0..15 bytes, and its length
d += ((uint64)length) << 56;
switch (remainder) {
case 15:
d += ((uint64)u.p8[14]) << 48;
case 14:
d += ((uint64)u.p8[13]) << 40;
case 13:
d += ((uint64)u.p8[12]) << 32;
case 12:
d += u.p32[2];
c += u.p64[0];
break;
case 11:
d += ((uint64)u.p8[10]) << 16;
case 10:
d += ((uint64)u.p8[9]) << 8;
case 9:
d += (uint64)u.p8[8];
case 8:
c += u.p64[0];
break;
case 7:
c += ((uint64)u.p8[6]) << 48;
case 6:
c += ((uint64)u.p8[5]) << 40;
case 5:
c += ((uint64)u.p8[4]) << 32;
case 4:
c += u.p32[0];
break;
case 3:
c += ((uint64)u.p8[2]) << 16;
case 2:
c += ((uint64)u.p8[1]) << 8;
case 1:
c += (uint64)u.p8[0];
break;
case 0:
c += sc_const;
d += sc_const;
}
ShortEnd(a, b, c, d);
*hash1 = a;
*hash2 = b;
}
// do the whole hash in one call
void SpookyHash::Hash128(
const void *message,
size_t length,
uint64 *hash1,
uint64 *hash2) {
if (length < sc_bufSize) {
Short(message, length, hash1, hash2);
return;
}
uint64 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
uint64 buf[sc_numVars];
uint64 *end;
union {
const uint8 *p8;
uint64 *p64;
size_t i;
}
u;
size_t remainder;
h0 = h3 = h6 = h9 = *hash1;
h1 = h4 = h7 = h10 = *hash2;
h2 = h5 = h8 = h11 = sc_const;
u.p8 = (const uint8 *)message;
end = u.p64 + (length / sc_blockSize) * sc_numVars;
// handle all whole sc_blockSize blocks of bytes
if (ALLOW_UNALIGNED_READS || ((u.i & 0x7) == 0)) {
while (u.p64 < end) {
Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
u.p64 += sc_numVars;
}
} else {
while (u.p64 < end) {
memcpy(buf, u.p64, sc_blockSize);
Mix(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
u.p64 += sc_numVars;
}
}
// handle the last partial block of sc_blockSize bytes
remainder = (length - ((const uint8 *)end - (const uint8 *)message));
memcpy(buf, end, remainder);
memset(((uint8 *)buf) + remainder, 0, sc_blockSize - remainder);
((uint8 *)buf)[sc_blockSize - 1] = remainder;
// do some final mixing
End(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
*hash1 = h0;
*hash2 = h1;
}
// init spooky state
void SpookyHash::Init(uint64 seed1, uint64 seed2) {
m_length = 0;
m_remainder = 0;
m_state[0] = seed1;
m_state[1] = seed2;
}
// add a message fragment to the state
void SpookyHash::Update(const void *message, size_t length) {
uint64 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
size_t newLength = length + m_remainder;
uint8 remainder;
union {
const uint8 *p8;
uint64 *p64;
size_t i;
}
u;
const uint64 *end;
// Is this message fragment too short? If it is, stuff it away.
if (newLength < sc_bufSize) {
memcpy(&((uint8 *)m_data)[m_remainder], message, length);
m_length = length + m_length;
m_remainder = (uint8)newLength;
return;
}
// init the variables
if (m_length < sc_bufSize) {
h0 = h3 = h6 = h9 = m_state[0];
h1 = h4 = h7 = h10 = m_state[1];
h2 = h5 = h8 = h11 = sc_const;
} else {
h0 = m_state[0];
h1 = m_state[1];
h2 = m_state[2];
h3 = m_state[3];
h4 = m_state[4];
h5 = m_state[5];
h6 = m_state[6];
h7 = m_state[7];
h8 = m_state[8];
h9 = m_state[9];
h10 = m_state[10];
h11 = m_state[11];
}
m_length = length + m_length;
// if we've got anything stuffed away, use it now
if (m_remainder) {
uint8 prefix = sc_bufSize - m_remainder;
memcpy(&(((uint8 *)m_data)[m_remainder]), message, prefix);
u.p64 = m_data;
Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
Mix(&u.p64[sc_numVars], h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
u.p8 = ((const uint8 *)message) + prefix;
length -= prefix;
} else {
u.p8 = (const uint8 *)message;
}
// handle all whole blocks of sc_blockSize bytes
end = u.p64 + (length / sc_blockSize) * sc_numVars;
remainder = (uint8)(length - ((const uint8 *)end - u.p8));
if (ALLOW_UNALIGNED_READS || (u.i & 0x7) == 0) {
while (u.p64 < end) {
Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
u.p64 += sc_numVars;
}
} else {
while (u.p64 < end) {
memcpy(m_data, u.p8, sc_blockSize);
Mix(m_data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
u.p64 += sc_numVars;
}
}
// stuff away the last few bytes
m_remainder = remainder;
memcpy(m_data, end, remainder);
// stuff away the variables
m_state[0] = h0;
m_state[1] = h1;
m_state[2] = h2;
m_state[3] = h3;
m_state[4] = h4;
m_state[5] = h5;
m_state[6] = h6;
m_state[7] = h7;
m_state[8] = h8;
m_state[9] = h9;
m_state[10] = h10;
m_state[11] = h11;
}
// report the hash for the concatenation of all message fragments so far
void SpookyHash::Final(uint64 *hash1, uint64 *hash2) {
// init the variables
if (m_length < sc_bufSize) {
*hash1 = m_state[0];
*hash2 = m_state[1];
Short(m_data, m_length, hash1, hash2);
return;
}
const uint64 *data = (const uint64 *)m_data;
uint8 remainder = m_remainder;
uint64 h0 = m_state[0];
uint64 h1 = m_state[1];
uint64 h2 = m_state[2];
uint64 h3 = m_state[3];
uint64 h4 = m_state[4];
uint64 h5 = m_state[5];
uint64 h6 = m_state[6];
uint64 h7 = m_state[7];
uint64 h8 = m_state[8];
uint64 h9 = m_state[9];
uint64 h10 = m_state[10];
uint64 h11 = m_state[11];
if (remainder >= sc_blockSize) {
// m_data can contain two blocks; handle any whole first block
Mix(data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
data += sc_numVars;
remainder -= sc_blockSize;
}
// mix in the last partial block, and the length mod sc_blockSize
memset(&((uint8 *)data)[remainder], 0, (sc_blockSize - remainder));
((uint8 *)data)[sc_blockSize - 1] = remainder;
// do some final mixing
End(data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
*hash1 = h0;
*hash2 = h1;
}
| 8,549 |
C++
| 25.635514 | 99 | 0.51468 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/fast_hash.h
|
#ifndef FAST_HASH_H
#define FAST_HASH_H
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
namespace fast_hash {
static_assert(sizeof(unsigned int) == 4, "unsigned int has unexpected size");
/*
Internal class storing the state of the hashing process. It should only be
instantiated by functions in this file.
*/
class HashState {
std::uint32_t hash;
public:
HashState()
: hash(0xdeadbeef) {
}
void feed(std::uint32_t value) {
hash ^= value + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
std::uint32_t get_hash32() {
return hash;
}
std::uint64_t get_hash64() {
return (static_cast<std::uint64_t>(hash) << 32) | hash;
}
};
/*
These functions add a new object to an existing HashState object.
To add hashing support for a user type X, provide an override
for utils::feed(HashState &hash_state, const X &value).
*/
static_assert(
sizeof(int) == sizeof(std::uint32_t),
"int and uint32_t have different sizes");
inline void feed(HashState &hash_state, int value) {
hash_state.feed(static_cast<std::uint32_t>(value));
}
static_assert(
sizeof(unsigned int) == sizeof(std::uint32_t),
"unsigned int and uint32_t have different sizes");
inline void feed(HashState &hash_state, unsigned int value) {
hash_state.feed(static_cast<std::uint32_t>(value));
}
inline void feed(HashState &hash_state, std::uint64_t value) {
hash_state.feed(static_cast<std::uint32_t>(value));
value >>= 32;
hash_state.feed(static_cast<std::uint32_t>(value));
}
template<typename T>
void feed(HashState &hash_state, const T *p) {
// This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway.
feed(hash_state, reinterpret_cast<std::uint64_t>(p));
}
template<typename T1, typename T2>
void feed(HashState &hash_state, const std::pair<T1, T2> &p) {
feed(hash_state, p.first);
feed(hash_state, p.second);
}
template<typename T>
void feed(HashState &hash_state, const std::vector<T> &vec) {
/*
Feed vector size to ensure that no two different vectors of the same type
have the same code prefix.
*/
feed(hash_state, vec.size());
for (const T &item : vec) {
feed(hash_state, item);
}
}
/*
Public hash functions.
get_hash() is used internally by the HashMap and HashSet classes below. In
more exotic use cases, such as implementing a custom hash table, you can also
use `get_hash32()`, `get_hash64()` and `get_hash()` directly.
*/
template<typename T>
std::uint32_t get_hash32(const T &value) {
HashState hash_state;
feed(hash_state, value);
return hash_state.get_hash32();
}
template<typename T>
std::uint64_t get_hash64(const T &value) {
HashState hash_state;
feed(hash_state, value);
return hash_state.get_hash64();
}
template<typename T>
std::size_t get_hash(const T &value) {
return static_cast<std::size_t>(get_hash64(value));
}
// This struct should only be used by HashMap and HashSet below.
template<typename T>
struct Hash {
std::size_t operator()(const T &val) const {
return get_hash(val);
}
};
/*
Aliases for hash sets and hash maps in user code. All user code should use
utils::UnorderedSet and utils::UnorderedMap instead of std::unordered_set and
std::unordered_map.
To hash types that are not supported out of the box, implement utils::feed.
*/
template<typename T1, typename T2>
using HashMap = std::unordered_map<T1, T2, Hash<T1>>;
template<typename T>
using HashSet = std::unordered_set<T, Hash<T>>;
/* Transitional aliases and functions */
template<typename T1, typename T2>
using UnorderedMap = std::unordered_map<T1, T2>;
template<typename T>
using UnorderedSet = std::unordered_set<T>;
}
#endif
| 3,848 |
C
| 24.66 | 90 | 0.677755 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/SpookyV2.h
|
//
// SpookyHash: a 128-bit noncryptographic hash function
// By Bob Jenkins, public domain
// Oct 31 2010: alpha, framework + SpookyHash::Mix appears right
// Oct 31 2011: alpha again, Mix only good to 2^^69 but rest appears right
// Dec 31 2011: beta, improved Mix, tested it for 2-bit deltas
// Feb 2 2012: production, same bits as beta
// Feb 5 2012: adjusted definitions of uint* to be more portable
// Mar 30 2012: 3 bytes/cycle, not 4. Alpha was 4 but wasn't thorough enough.
// August 5 2012: SpookyV2 (different results)
//
// Up to 3 bytes/cycle for long messages. Reasonably fast for short messages.
// All 1 or 2 bit deltas achieve avalanche within 1% bias per output bit.
//
// This was developed for and tested on 64-bit x86-compatible processors.
// It assumes the processor is little-endian. There is a macro
// controlling whether unaligned reads are allowed (by default they are).
// This should be an equally good hash on big-endian machines, but it will
// compute different results on them than on little-endian machines.
//
// Google's CityHash has similar specs to SpookyHash, and CityHash is faster
// on new Intel boxes. MD4 and MD5 also have similar specs, but they are orders
// of magnitude slower. CRCs are two or more times slower, but unlike
// SpookyHash, they have nice math for combining the CRCs of pieces to form
// the CRCs of wholes. There are also cryptographic hashes, but those are even
// slower than MD5.
//
#include <stddef.h>
#ifdef _MSC_VER
# define INLINE __forceinline
typedef unsigned __int64 uint64;
typedef unsigned __int32 uint32;
typedef unsigned __int16 uint16;
typedef unsigned __int8 uint8;
#else
# include <stdint.h>
# define INLINE inline
typedef uint64_t uint64;
typedef uint32_t uint32;
typedef uint16_t uint16;
typedef uint8_t uint8;
#endif
class SpookyHash {
public:
//
// SpookyHash: hash a single message in one call, produce 128-bit output
//
static void Hash128(
const void *message, // message to hash
size_t length, // length of message in bytes
uint64 *hash1, // in/out: in seed 1, out hash value 1
uint64 *hash2); // in/out: in seed 2, out hash value 2
//
// Hash64: hash a single message in one call, return 64-bit output
//
static uint64 Hash64(
const void *message, // message to hash
size_t length, // length of message in bytes
uint64 seed) { // seed
uint64 hash1 = seed;
Hash128(message, length, &hash1, &seed);
return hash1;
}
//
// Hash32: hash a single message in one call, produce 32-bit output
//
static uint32 Hash32(
const void *message, // message to hash
size_t length, // length of message in bytes
uint32 seed) { // seed
uint64 hash1 = seed, hash2 = seed;
Hash128(message, length, &hash1, &hash2);
return (uint32)hash1;
}
//
// Init: initialize the context of a SpookyHash
//
void Init(
uint64 seed1, // any 64-bit value will do, including 0
uint64 seed2); // different seeds produce independent hashes
//
// Update: add a piece of a message to a SpookyHash state
//
void Update(
const void *message, // message fragment
size_t length); // length of message fragment in bytes
//
// Final: compute the hash for the current SpookyHash state
//
// This does not modify the state; you can keep updating it afterward
//
// The result is the same as if SpookyHash() had been called with
// all the pieces concatenated into one message.
//
void Final(
uint64 *hash1, // out only: first 64 bits of hash value.
uint64 *hash2); // out only: second 64 bits of hash value.
//
// left rotate a 64-bit value by k bytes
//
static INLINE uint64 Rot64(uint64 x, int k) {
return (x << k) | (x >> (64 - k));
}
//
// This is used if the input is 96 bytes long or longer.
//
// The internal state is fully overwritten every 96 bytes.
// Every input bit appears to cause at least 128 bits of entropy
// before 96 other bytes are combined, when run forward or backward
// For every input bit,
// Two inputs differing in just that input bit
// Where "differ" means xor or subtraction
// And the base value is random
// When run forward or backwards one Mix
// I tried 3 pairs of each; they all differed by at least 212 bits.
//
static INLINE void Mix(
const uint64 *data,
uint64 &s0, uint64 &s1, uint64 &s2, uint64 &s3,
uint64 &s4, uint64 &s5, uint64 &s6, uint64 &s7,
uint64 &s8, uint64 &s9, uint64 &s10, uint64 &s11) {
s0 += data[0];
s2 ^= s10;
s11 ^= s0;
s0 = Rot64(s0, 11);
s11 += s1;
s1 += data[1];
s3 ^= s11;
s0 ^= s1;
s1 = Rot64(s1, 32);
s0 += s2;
s2 += data[2];
s4 ^= s0;
s1 ^= s2;
s2 = Rot64(s2, 43);
s1 += s3;
s3 += data[3];
s5 ^= s1;
s2 ^= s3;
s3 = Rot64(s3, 31);
s2 += s4;
s4 += data[4];
s6 ^= s2;
s3 ^= s4;
s4 = Rot64(s4, 17);
s3 += s5;
s5 += data[5];
s7 ^= s3;
s4 ^= s5;
s5 = Rot64(s5, 28);
s4 += s6;
s6 += data[6];
s8 ^= s4;
s5 ^= s6;
s6 = Rot64(s6, 39);
s5 += s7;
s7 += data[7];
s9 ^= s5;
s6 ^= s7;
s7 = Rot64(s7, 57);
s6 += s8;
s8 += data[8];
s10 ^= s6;
s7 ^= s8;
s8 = Rot64(s8, 55);
s7 += s9;
s9 += data[9];
s11 ^= s7;
s8 ^= s9;
s9 = Rot64(s9, 54);
s8 += s10;
s10 += data[10];
s0 ^= s8;
s9 ^= s10;
s10 = Rot64(s10, 22);
s9 += s11;
s11 += data[11];
s1 ^= s9;
s10 ^= s11;
s11 = Rot64(s11, 46);
s10 += s0;
}
//
// Mix all 12 inputs together so that h0, h1 are a hash of them all.
//
// For two inputs differing in just the input bits
// Where "differ" means xor or subtraction
// And the base value is random, or a counting value starting at that bit
// The final result will have each bit of h0, h1 flip
// For every input bit,
// with probability 50 +- .3%
// For every pair of input bits,
// with probability 50 +- 3%
//
// This does not rely on the last Mix() call having already mixed some.
// Two iterations was almost good enough for a 64-bit result, but a
// 128-bit result is reported, so End() does three iterations.
//
static INLINE void EndPartial(
uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3,
uint64 &h4, uint64 &h5, uint64 &h6, uint64 &h7,
uint64 &h8, uint64 &h9, uint64 &h10, uint64 &h11) {
h11 += h1;
h2 ^= h11;
h1 = Rot64(h1, 44);
h0 += h2;
h3 ^= h0;
h2 = Rot64(h2, 15);
h1 += h3;
h4 ^= h1;
h3 = Rot64(h3, 34);
h2 += h4;
h5 ^= h2;
h4 = Rot64(h4, 21);
h3 += h5;
h6 ^= h3;
h5 = Rot64(h5, 38);
h4 += h6;
h7 ^= h4;
h6 = Rot64(h6, 33);
h5 += h7;
h8 ^= h5;
h7 = Rot64(h7, 10);
h6 += h8;
h9 ^= h6;
h8 = Rot64(h8, 13);
h7 += h9;
h10 ^= h7;
h9 = Rot64(h9, 38);
h8 += h10;
h11 ^= h8;
h10 = Rot64(h10, 53);
h9 += h11;
h0 ^= h9;
h11 = Rot64(h11, 42);
h10 += h0;
h1 ^= h10;
h0 = Rot64(h0, 54);
}
static INLINE void End(
const uint64 *data,
uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3,
uint64 &h4, uint64 &h5, uint64 &h6, uint64 &h7,
uint64 &h8, uint64 &h9, uint64 &h10, uint64 &h11) {
h0 += data[0];
h1 += data[1];
h2 += data[2];
h3 += data[3];
h4 += data[4];
h5 += data[5];
h6 += data[6];
h7 += data[7];
h8 += data[8];
h9 += data[9];
h10 += data[10];
h11 += data[11];
EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
}
//
// The goal is for each bit of the input to expand into 128 bits of
// apparent entropy before it is fully overwritten.
// n trials both set and cleared at least m bits of h0 h1 h2 h3
// n: 2 m: 29
// n: 3 m: 46
// n: 4 m: 57
// n: 5 m: 107
// n: 6 m: 146
// n: 7 m: 152
// when run forwards or backwards
// for all 1-bit and 2-bit diffs
// with diffs defined by either xor or subtraction
// with a base of all zeros plus a counter, or plus another bit, or random
//
static INLINE void ShortMix(uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3) {
h2 = Rot64(h2, 50);
h2 += h3;
h0 ^= h2;
h3 = Rot64(h3, 52);
h3 += h0;
h1 ^= h3;
h0 = Rot64(h0, 30);
h0 += h1;
h2 ^= h0;
h1 = Rot64(h1, 41);
h1 += h2;
h3 ^= h1;
h2 = Rot64(h2, 54);
h2 += h3;
h0 ^= h2;
h3 = Rot64(h3, 48);
h3 += h0;
h1 ^= h3;
h0 = Rot64(h0, 38);
h0 += h1;
h2 ^= h0;
h1 = Rot64(h1, 37);
h1 += h2;
h3 ^= h1;
h2 = Rot64(h2, 62);
h2 += h3;
h0 ^= h2;
h3 = Rot64(h3, 34);
h3 += h0;
h1 ^= h3;
h0 = Rot64(h0, 5);
h0 += h1;
h2 ^= h0;
h1 = Rot64(h1, 36);
h1 += h2;
h3 ^= h1;
}
//
// Mix all 4 inputs together so that h0, h1 are a hash of them all.
//
// For two inputs differing in just the input bits
// Where "differ" means xor or subtraction
// And the base value is random, or a counting value starting at that bit
// The final result will have each bit of h0, h1 flip
// For every input bit,
// with probability 50 +- .3% (it is probably better than that)
// For every pair of input bits,
// with probability 50 +- .75% (the worst case is approximately that)
//
static INLINE void ShortEnd(uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3) {
h3 ^= h2;
h2 = Rot64(h2, 15);
h3 += h2;
h0 ^= h3;
h3 = Rot64(h3, 52);
h0 += h3;
h1 ^= h0;
h0 = Rot64(h0, 26);
h1 += h0;
h2 ^= h1;
h1 = Rot64(h1, 51);
h2 += h1;
h3 ^= h2;
h2 = Rot64(h2, 28);
h3 += h2;
h0 ^= h3;
h3 = Rot64(h3, 9);
h0 += h3;
h1 ^= h0;
h0 = Rot64(h0, 47);
h1 += h0;
h2 ^= h1;
h1 = Rot64(h1, 54);
h2 += h1;
h3 ^= h2;
h2 = Rot64(h2, 32);
h3 += h2;
h0 ^= h3;
h3 = Rot64(h3, 25);
h0 += h3;
h1 ^= h0;
h0 = Rot64(h0, 63);
h1 += h0;
}
private:
//
// Short is used for messages under 192 bytes in length
// Short has a low startup cost, the normal mode is good for long
// keys, the cost crossover is at about 192 bytes. The two modes were
// held to the same quality bar.
//
static void Short(
const void *message, // message (array of bytes, not necessarily aligned)
size_t length, // length of message (in bytes)
uint64 *hash1, // in/out: in the seed, out the hash value
uint64 *hash2); // in/out: in the seed, out the hash value
// number of uint64's in internal state
static const size_t sc_numVars = 12;
// size of the internal state
static const size_t sc_blockSize = sc_numVars * 8;
// size of buffer of unhashed data, in bytes
static const size_t sc_bufSize = 2 * sc_blockSize;
//
// sc_const: a constant which:
// * is not zero
// * is odd
// * is a not-very-regular mix of 1's and 0's
// * does not need any other special mathematical properties
//
static const uint64 sc_const = 0xdeadbeefdeadbeefLL;
uint64 m_data[2 * sc_numVars]; // unhashed data, for partial messages
uint64 m_state[sc_numVars]; // internal state of the hash
size_t m_length; // total length of the input so far
uint8 m_remainder; // length of unhashed data stashed in m_data
};
| 12,673 |
C
| 29.539759 | 82 | 0.530103 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue645/v4-random-seeds.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
def main(revisions=None):
suite=suites.suite_optimal_strips()
suite.extend(suites.suite_ipc14_opt_strips())
# only DFP configs
configs = {
# label reduction with seed 2016
IssueConfig('dfp-b50k-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false,random_seed=2016)))']),
IssueConfig('dfp-ginf-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false,random_seed=2016)))']),
IssueConfig('dfp-f50k-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']),
# shrink fh/rnd with seed 2016
IssueConfig('dfp-f50ks2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
IssueConfig('dfp-rnd50ks2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
# shrink fh/rnd with seed 2016 and with label reduction with seed 2016
IssueConfig('dfp-f50ks2016-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']),
IssueConfig('dfp-rnd50ks2016-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_absolute_report_step()
exp()
main(revisions=['issue645-v4'])
| 4,037 |
Python
| 52.839999 | 271 | 0.711667 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue67/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
from relativescatter import RelativeScatterPlotReport
REVS = ["issue67-v4-base", "issue67-v4"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_blind": [
"--search",
"astar(blind())"],
"astar_lmcut": [
"--search",
"astar(lmcut())"],
"astar_lm_zg": [
"--search",
"astar(lmcount(lm_zg(), admissible=true, optimal=true))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step()
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue67-v4-total-time.png'
)
exp()
| 833 |
Python
| 19.85 | 66 | 0.613445 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue67/issue67.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue67-v1-base", "issue67-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_blind": [
"--search",
"astar(blind())"],
"astar_lmcut": [
"--search",
"astar(lmcut())"],
"astar_lm_zg": [
"--search",
"astar(lmcount(lm_zg(), admissible=true, optimal=true))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step()
exp()
| 587 |
Python
| 17.967741 | 66 | 0.577513 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue846/v1-lama.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("lama-no-syn-pref-{pref}".format(**locals()), [
"--if-unit-cost",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff=ff()",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff1=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm2=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(plusone), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff2=ff(transform=adapt_costs(plusone))",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--always"])
for pref in ["none", "simple", "all"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(filter_algorithm=["issue846-v1-lama-no-syn-pref-none", "issue846-v1-lama-no-syn-pref-simple", "issue846-v1-lama-no-syn-pref-all"])
#exp.add_comparison_table_step()
exp.run_steps()
| 3,606 |
Python
| 37.784946 | 159 | 0.610649 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue348/version1_v3-version2_v3-base.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.environments import FreiburgSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = "/home/drexlerd/benchmarks/downward-benchmarks"
REVISIONS = ["issue348-base", "issue348-version1-v3", "issue348-version2-v3"]
CONFIGS = [
IssueConfig("lama", [], driver_options=["--alias", "lama-first"]),
IssueConfig("ehc-ff", ["--search", "ehc(ff())"]),
IssueConfig("ipdb", ["--search", "astar(ipdb())"]),
#IssueConfig("lmcut", ["--search", "astar(lmcut())"]),
IssueConfig("blind", ["--search", "astar(blind())"]),
#IssueConfig("lazy", [
# "--evaluator",
# "hff=ff()",
# "--evaluator",
# "hcea=cea()",
# "--search",
# "lazy_greedy([hff, hcea], preferred=[hff, hcea])"]),
]
ADL_DOMAINS = [
"assembly",
"miconic-fulladl",
"openstacks",
"openstacks-opt08-adl",
"optical-telegraphs",
"philosophers",
"psr-large",
"psr-middle",
"trucks",
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS
#ENVIRONMENT = BaselSlurmEnvironment(
# partition="infai_2",
# email="[email protected]",
# export=["PATH", "DOWNWARD_BENCHMARKS"])
ENVIRONMENT = FreiburgSlurmEnvironment()
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=3)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"])
exp.run_steps()
| 2,123 |
Python
| 27.32 | 77 | 0.695714 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue348/v24.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
#from lab.environments import FreiburgSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue348-base", "issue348-version2-v3", "issue348-v24"]
CONFIGS = [
IssueConfig("lama", [], driver_options=["--alias", "lama-first"]),
IssueConfig("ehc-ff", ["--search", "ehc(ff())"]),
IssueConfig("ipdb", ["--search", "astar(ipdb())"]),
IssueConfig("lmcut", ["--search", "astar(lmcut())"]),
IssueConfig("blind", ["--search", "astar(blind())"]),
IssueConfig("lazy", [
"--evaluator",
"hff=ff()",
"--evaluator",
"hcea=cea()",
"--search",
"lazy_greedy([hff, hcea], preferred=[hff, hcea])"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
#ENVIRONMENT = FreiburgSlurmEnvironment()
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=3)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"])
exp.run_steps()
| 1,884 |
Python
| 28.453125 | 77 | 0.708599 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue348/v14-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue348-v13", "issue348-v14"]
CONFIGS = [
IssueConfig("blind", ["--search", "astar(blind())"]),
]
ADL_DOMAINS = [
"assembly",
"miconic-fulladl",
"openstacks",
"openstacks-opt08-adl",
"optical-telegraphs",
"philosophers",
"psr-large",
"psr-middle",
"trucks",
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ["openstacks-opt08-adl:p01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-strips", filter_domain=common_setup.DEFAULT_OPTIMAL_SUITE)
exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-adl", filter_domain=ADL_DOMAINS)
exp.run_steps()
| 1,853 |
Python
| 27.523076 | 135 | 0.727469 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue560/issue560.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from relativescatter import RelativeScatterPlotReport
import common_setup
REVS = ["issue560-base", "issue560-v1"]
SUITE = suites.suite_all()
# We are only interested in the preprocessing here and will only run the first steps of the experiment.
CONFIGS = {
"astar_blind": [
"--search",
"astar(blind())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["preprocess_wall_clock_time"],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue560_base_v1_preprocess_wall_clock_time.png'
)
exp.add_absolute_report_step(attributes=["preprocess_wall_clock_time"])
exp()
| 824 |
Python
| 21.916666 | 103 | 0.684466 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue214/state_size_parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def calculate_old_state_size(content, props):
if 'bytes_per_state' not in props and 'preprocessor_variables' in props and 'state_var_t_size' in props:
props['bytes_per_state'] = props['preprocessor_variables'] * props['state_var_t_size']
class StateSizeParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern('bytes_per_state', 'Bytes per state: (\d+)',
required=False, type=int)
self.add_pattern('state_var_t_size', 'Dispatcher selected state size (\d).',
required=False, type=int)
self.add_pattern('variables', 'Variables: (\d+)',
required=False, type=int)
self.add_function(calculate_old_state_size)
if __name__ == '__main__':
parser = StateSizeParser()
print 'Running state size parser'
parser.parse()
| 923 |
Python
| 37.499998 | 109 | 0.608884 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue214/issue214-v4-ipdb.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
from downward.configs import default_configs_optimal
from downward.reports.scatter import ScatterPlotReport
import common_setup
REVS = ["issue214-base", "issue214-v4"]
CONFIGS = {"ipdb": ["--search", "astar(ipdb())"]}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
parsers=['state_size_parser.py'],
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size']
)
exp()
| 865 |
Python
| 22.405405 | 120 | 0.695954 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue214/issue214.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
from downward.configs import default_configs_optimal
from downward.reports.scatter import ScatterPlotReport
import common_setup
REVS = ["issue214-base", "issue214-v2"]
CONFIGS = default_configs_optimal()
# remove config that is disabled in this branch
del CONFIGS['astar_selmax_lmcut_lmcount']
TEST_RUN = True
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
parsers=['state_size_parser.py'],
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size']
)
exp.add_scatter_plot_step()
exp.add_report(ScatterPlotReport(
attributes=['bytes_per_state'],
filter_config_nick='astar_blind',
),
outfile='issue214_bytes_per_state.png')
for config_nick in ['astar_blind', 'astar_lmcut', 'astar_merge_and_shrink_bisim', 'astar_ipdb']:
for attr in ['memory', 'total_time']:
exp.add_report(ScatterPlotReport(
attributes=[attr],
filter_config_nick=config_nick,
),
outfile='issue214_%s_%s.png' % (attr, config_nick))
exp()
| 1,483 |
Python
| 25.035087 | 120 | 0.673635 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue214/issue214-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_satisficing_with_ipc11
from downward.configs import default_configs_satisficing
from downward.reports.scatter import ScatterPlotReport
import common_setup
REVS = ["issue214-base", "issue214-v2"]
CONFIGS = default_configs_satisficing()
TEST_RUN = True
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_satisficing_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
parsers=['state_size_parser.py'],
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size']
)
exp.add_scatter_plot_step()
exp.add_report(ScatterPlotReport(
attributes=['bytes_per_state'],
filter_config_nick='astar_blind',
),
outfile='issue214_sat_bytes_per_state.png')
for config_nick in ['lazy_greedy_ff', 'eager_greedy_cg', 'seq_sat_lama_2011']:
for attr in ['memory', 'total_time']:
exp.add_report(ScatterPlotReport(
attributes=[attr],
filter_config_nick=config_nick,
),
outfile='issue214_sat_%s_%s.png' % (attr, config_nick))
exp()
| 1,398 |
Python
| 24.907407 | 120 | 0.669528 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue731/v4-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue731-base", "issue731-v4"]
BUILDS = ["release32"]
SEARCHES = [
("ff_lazy", ["--heuristic", "h=ff()", "--search", "lazy_greedy([h], preferred=[h])"]),
("cea_lazy", ["--heuristic", "h=cea()", "--search", "lazy_greedy([h], preferred=[h])"]),
("type_based", ["--heuristic", "h=ff()", "--search", "eager(alt([type_based([h, g()])]))"]),
("zhu_givan", [
"--heuristic", "hlm=lmcount(lm_zg())",
"--search", """lazy_greedy([hlm], preferred=[hlm])"""]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
] + [
IssueConfig(
"lama-first-{build}".format(**locals()),
[],
build_options=[build],
driver_options=["--build", build, "--alias", "lama-first"])
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_comparison_table_step()
exp.run_steps()
| 1,678 |
Python
| 28.45614 | 96 | 0.628129 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue731/hash-microbenchmark/main.cc
|
#include <algorithm>
#include <ctime>
#include <functional>
#include <iostream>
#include <string>
#include <unordered_set>
#include "hash.h"
using namespace std;
static void benchmark(const string &desc, int num_calls,
const function<void()> &func) {
cout << "Running " << desc << " " << num_calls << " times:" << flush;
clock_t start = clock();
for (int j = 0; j < num_calls; ++j)
func();
clock_t end = clock();
double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC;
cout << " " << duration << "s" << endl;
}
static int scramble(int i) {
return (0xdeadbeef * i) ^ 0xfeedcafe;
}
int main(int, char **) {
const int REPETITIONS = 2;
const int NUM_CALLS = 1;
const int NUM_INSERTIONS = 10000000;
const int NUM_READ_PASSES = 10;
for (int i = 0; i < REPETITIONS; ++i) {
benchmark("nothing", NUM_CALLS, [] () {});
cout << endl;
benchmark("insert sequential int with BoostHash", NUM_CALLS,
[&]() {
unordered_set<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(i);
}
});
benchmark("insert sequential int with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(i);
}
});
cout << endl;
benchmark("insert scrambled int with BoostHash", NUM_CALLS,
[&]() {
unordered_set<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
benchmark("insert scrambled int with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
});
cout << endl;
benchmark("insert, then read sequential int with BoostHash", NUM_CALLS,
[&]() {
unordered_set<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(i);
}
for (int j = 0; j < NUM_READ_PASSES; ++j) {
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.count(i);
}
}
});
benchmark("insert, then read sequential int with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(i);
}
for (int j = 0; j < NUM_READ_PASSES; ++j) {
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.count(i);
}
}
});
cout << endl;
benchmark("insert, then read scrambled int with BoostHash", NUM_CALLS,
[&]() {
unordered_set<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
for (int j = 0; j < NUM_READ_PASSES; ++j) {
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.count(i);
}
}
});
benchmark("insert, then read scrambled int with BurtleFeed", NUM_CALLS,
[&]() {
utils::HashSet<int> s;
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.insert(scramble(i));
}
for (int j = 0; j < NUM_READ_PASSES; ++j) {
for (int i = 0; i < NUM_INSERTIONS; ++i) {
s.count(i);
}
}
});
cout << endl;
}
return 0;
}
| 4,371 |
C++
| 33.15625 | 80 | 0.371997 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue529/issue529.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue529-v1-base", "issue529-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_blind': [
'--search',
'astar(blind())'],
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_cpdbs': [
'--search',
'astar(cpdbs())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_zopdbs': [
'--search',
'astar(zopdbs())'],
'eager_greedy_cg': [
'--heuristic',
'h=cg()',
'--search',
'eager_greedy(h, preferred=h)'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for conf in CONFIGS:
for attr in ("memory", "total_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue529-v1-base-%s" % conf, "issue529-v1-%s" % conf]
),
outfile='issue529_base_v1_%s_%s.png' % (conf, attr)
)
exp()
| 1,439 |
Python
| 22.606557 | 85 | 0.544128 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue420/issue420-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
import common_setup
REVS = ["issue420-base", "issue420-v1"]
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lmcut": ["--search", "astar(lmcut())"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES
)
exp()
| 718 |
Python
| 18.432432 | 65 | 0.655989 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue420/issue420-v1-regressions.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Before you can run the experiment you need to create duplicates of the
two tasks we want to test:
cd ../benchmarks/tidybot-opt11-strips
for i in {00..49}; do cp p14.pddl p14-$i.pddl; done
cd ../parking-opt11-strips
for i in {00..49}; do cp pfile04-015.pddl pfile04-015-$i.pddl; done
Don't forget to remove the duplicate tasks afterwards. Otherwise they
will be included in subsequent experiments.
"""
import common_setup
REVS = ["issue420-base", "issue420-v1"]
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lmcut": ["--search", "astar(lmcut())"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = (["tidybot-opt11-strips:p14-%02d.pddl" % i for i in range(50)] +
["parking-opt11-strips:pfile04-015-%02d.pddl" % i for i in range(50)])
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES
)
exp()
| 1,194 |
Python
| 23.387755 | 83 | 0.669179 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue479/issue479-5min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute
import common_setup
import os
exp = common_setup.IssueExperiment(
search_revisions=["issue479-v2"],
configs={
'dfp-b-50k': ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(max_states=100000,threshold=1,greedy=false),merge_strategy=merge_dfp(),label_reduction=label_reduction(before_shrinking=true, before_merging=false)))'],
'blind': ['--search', 'astar(blind())'],
},
suite=['airport'],
limits={"search_time": 300},
)
exp.add_absolute_report_step(attributes=['coverage', 'error', 'run_dir'])
exp()
| 679 |
Python
| 26.199999 | 246 | 0.674521 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue891/v1.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue891-base", "issue891-v1"]
CONFIGS = [
IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]),
IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex,random_seed=1729))"]),
IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]),
IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]),
IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]),
IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=3)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"])
exp.run_steps()
| 2,174 |
Python
| 37.839285 | 156 | 0.735511 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue891/v1-mips.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue891-v1"]
CONFIGS = [
IssueConfig("opcount-lp", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex, use_integer_operator_counts=false))"]),
IssueConfig("opcount-mip", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex, use_integer_operator_counts=true))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=3)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_report(ComparativeReport(
[("issue891-v1-opcount-lp", "issue891-v1-opcount-mip", "Diff (LP/MIP)")],
attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["initial_h_value"]))
exp.add_scatter_plot_step(relative=False, attributes=["total_time", "memory"],
additional=[
("opcount-lp", "opcount-mip", "issue891-v1", "issue891-v1", "total_time"),
("opcount-lp", "opcount-mip", "issue891-v1", "issue891-v1", "memory"),
])
def interesting_h_value(run):
if "initial_h_value" in run and run["initial_h_value"] > 50:
run["initial_h_value"] = 51
return run
exp.add_report(ScatterPlotReport(
attributes=["initial_h_value"],
filter=interesting_h_value,
get_category=lambda run1, run2: run1["domain"],
))
exp.run_steps()
| 2,308 |
Python
| 31.985714 | 176 | 0.719671 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue925/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
from itertools import combinations
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
# These revisions are all tag experimental branches off the same revision.
# we only need different tags so lab creates separate build directories in the build cache.
# We then manually recompile the code in the build cache with the correct settings.
REVISIONS = ["issue925-cplex12.8-static", "issue925-cplex12.8-dynamic", "issue925-cplex12.9-static", "issue925-cplex12.9-dynamic"]
CONFIGS = [
IssueConfig("opcount-seq-lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]),
IssueConfig("diverse-potentials", ["--search", "astar(diverse_potentials())"]),
IssueConfig("optimal-lmcount", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_comparison_table_step()
for r1, r2 in combinations(REVISIONS, 2):
for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]:
exp.add_report(RelativeScatterPlotReport(
attributes=["total_time"],
filter_algorithm=["%s-%s" % (r, nick) for r in [r1, r2]],
get_category=lambda run1, run2: run1["domain"]),
outfile="issue925-v1-total-time-%s-%s-%s.png" % (r1, r2, nick))
exp.run_steps()
| 2,243 |
Python
| 37.689655 | 132 | 0.726259 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue791/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue791-base", "issue791-v1"]
CONFIGS = [
IssueConfig(
'blind-debug', ['--search', 'astar(blind())'],
build_options=["debug32"],
driver_options=["--build", "debug32", "--overall-time-limit", "5m"]
),
IssueConfig(
'blind-release', ['--search', 'astar(blind())'],
build_options=["release32"],
driver_options=["--build", "release32", "--overall-time-limit", "5m"]
),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_comparison_table_step()
exp.run_steps()
| 1,509 |
Python
| 26.454545 | 77 | 0.689198 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue671/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_all()
configs = {
IssueConfig('blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '60s']),
IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--search-time-limit', '60s']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'],
processes=4,
email='[email protected]',
)
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.append('translator_*')
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue671-base', 'issue671-v1'])
| 1,711 |
Python
| 30.127272 | 110 | 0.599065 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue488/issue488.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue488-base", "issue488-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 550 |
Python
| 17.999999 | 54 | 0.518182 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue592/v2-lama-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v2"]
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 552 |
Python
| 19.481481 | 90 | 0.668478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue592/v3-lama-opt2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v3"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("lm_zg", [
"--landmarks",
"lm=lm_zg()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_exhaust", [
"--landmarks",
"lm=lm_exhaust()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm_max", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"h1=lmcount(lm,admissible=true)",
"--heuristic",
"h2=lmcount(lm,admissible=false)",
"--search",
"astar(max([h1,h2]))"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 1,157 |
Python
| 20.054545 | 53 | 0.513397 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue436/sat-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import configs
import common_setup
REVS = ["issue436-base", "issue436-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
default_configs_satisficing = configs.default_configs_satisficing(extended=True)
CONFIGS = {}
for name in ['lazy_greedy_add', 'eager_greedy_ff', 'eager_greedy_add', 'lazy_greedy_ff', 'pareto_ff']:
CONFIGS[name] = default_configs_satisficing[name]
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=['total_time', 'memory'])
exp()
| 749 |
Python
| 22.437499 | 102 | 0.70494 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue436/opt-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import configs
import common_setup
REVS = ["issue436-base", "issue436-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
configs_optimal_core = configs.configs_optimal_core()
CONFIGS = {}
for name in ['astar_merge_and_shrink_greedy_bisim', 'astar_merge_and_shrink_dfp_bisim',
'astar_ipdb', 'astar_hmax', 'astar_blind', 'astar_lmcut',
'astar_merge_and_shrink_bisim', 'astar_lmcount_lm_merged_rhw_hm']:
CONFIGS[name] = configs_optimal_core[name]
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp()
| 780 |
Python
| 22.666666 | 87 | 0.678205 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue436/configs.py
|
def configs_optimal_core():
return {
# A*
"astar_blind": [
"--search",
"astar(blind)"],
"astar_h2": [
"--search",
"astar(hm(2))"],
"astar_ipdb": [
"--search",
"astar(ipdb)"],
"astar_lmcount_lm_merged_rhw_hm": [
"--search",
"astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"],
"astar_lmcut": [
"--search",
"astar(lmcut)"],
"astar_hmax": [
"--search",
"astar(hmax)"],
"astar_merge_and_shrink_bisim": [
"--search",
"astar(merge_and_shrink("
"merge_strategy=merge_linear(variable_order=reverse_level),"
"shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false,"
"group_by_h=true)))"],
"astar_merge_and_shrink_greedy_bisim": [
"--search",
"astar(merge_and_shrink("
"merge_strategy=merge_linear(variable_order=reverse_level),"
"shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,"
"greedy=true,group_by_h=false)))"],
"astar_merge_and_shrink_dfp_bisim": [
"--search",
"astar(merge_and_shrink(merge_strategy=merge_dfp,"
"shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,"
"greedy=false,group_by_h=true)))"],
"astar_selmax_lmcut_lmcount": [
"--search",
"astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()]),"
"admissible=true)],training_set=1000),mpd=true)"],
}
def configs_satisficing_core():
return {
# A*
"astar_goalcount": [
"--search",
"astar(goalcount)"],
# eager greedy
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"eager_greedy(h, preferred=h)"],
# lazy greedy
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"],
"lazy_greedy_cg": [
"--heuristic",
"h=cg()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
def configs_optimal_ipc():
return {
"seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"],
"seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"],
"seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"],
}
def configs_satisficing_ipc():
return {
"seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"],
"seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"],
"seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"],
}
def configs_optimal_extended():
return {
# A*
"astar_lmcount_lm_merged_rhw_hm_no_order": [
"--search",
"astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"],
}
def configs_satisficing_extended():
return {
# eager greedy
"eager_greedy_alt_ff_cg": [
"--heuristic",
"hff=ff()",
"--heuristic",
"hcg=cg()",
"--search",
"eager_greedy(hff,hcg,preferred=[hff,hcg])"],
"eager_greedy_ff_no_pref": [
"--search",
"eager_greedy(ff())"],
# lazy greedy
"lazy_greedy_alt_cea_cg": [
"--heuristic",
"hcea=cea()",
"--heuristic",
"hcg=cg()",
"--search",
"lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"],
"lazy_greedy_ff_no_pref": [
"--search",
"lazy_greedy(ff())"],
"lazy_greedy_cea": [
"--heuristic",
"h=cea()",
"--search",
"lazy_greedy(h, preferred=h)"],
# lazy wA*
"lazy_wa3_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_wastar(h,w=3,preferred=h)"],
# eager wA*
"eager_wa3_cg": [
"--heuristic",
"h=cg()",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=h)"],
# ehc
"ehc_ff": [
"--search",
"ehc(ff())"],
# iterated
"iterated_wa_ff": [
"--heuristic",
"h=ff()",
"--search",
"iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3),"
"lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"],
# pareto open list
"pareto_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false,"
"f_eval=sum([g(), h]))"],
# bucket-based open list
"bucket_lmcut": [
"--heuristic",
"h=lmcut()",
"--search",
"eager(single_buckets(h), reopen_closed=true, pathmax=false)"],
}
def default_configs_optimal(core=True, ipc=True, extended=False):
configs = {}
if core:
configs.update(configs_optimal_core())
if ipc:
configs.update(configs_optimal_ipc())
if extended:
configs.update(configs_optimal_extended())
return configs
def default_configs_satisficing(core=True, ipc=True, extended=False):
configs = {}
if core:
configs.update(configs_satisficing_core())
if ipc:
configs.update(configs_satisficing_ipc())
if extended:
configs.update(configs_satisficing_extended())
return configs
| 6,207 |
Python
| 29.282927 | 89 | 0.45932 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue269-base", "issue269-v1"]
LIMITS = {"search_time": 600}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"mas-label-order": ["--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation,label_reduction_system_order=random))"],
"mas-buckets": ["--search", "astar(merge_and_shrink(shrink_strategy=shrink_fh,label_reduction_system_order=regular))"],
"gapdb": ["--search", "astar(gapdb())"],
"ipdb": ["--search", "astar(ipdb())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 741 |
Python
| 24.586206 | 136 | 0.663968 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue269-base", "issue269-v1"]
LIMITS = {"search_time": 600}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"random-successors": ["--search", "lazy_greedy(ff(),randomize_successors=true)"],
"pareto-open-list": [
"--heuristic", "h=ff()",
"--search", "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false,f_eval=sum([g(), h]))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 655 |
Python
| 21.620689 | 113 | 0.625954 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/old_rng.h
|
#ifndef OLD_RNG_H
#define OLD_RNG_H
class OldRandomNumberGenerator {
static const int N = 624;
unsigned int mt[N];
int mti;
public:
OldRandomNumberGenerator(); // seed with time-dependent value
OldRandomNumberGenerator(int seed); // seed with int; see comments for seed()
OldRandomNumberGenerator(unsigned int *array, int count); // seed with array
OldRandomNumberGenerator(const OldRandomNumberGenerator ©);
OldRandomNumberGenerator &operator=(const OldRandomNumberGenerator ©);
void seed(int s);
void seed(unsigned int *array, int len);
unsigned int next32(); // random integer in [0..2^32-1]
int next31(); // random integer in [0..2^31-1]
double next_half_open(); // random float in [0..1), 2^53 possible values
double next_closed(); // random float in [0..1], 2^53 possible values
double next_open(); // random float in (0..1), 2^53 possible values
int next(int bound); // random integer in [0..bound), bound < 2^31
int operator()(int bound) { // same as next()
return next(bound);
}
double operator()() { // same as next_half_open()
return next_half_open();
}
};
/*
TODO: Add a static assertion that guarantees that ints are 32 bit.
In cases where they are not, need to adapt the code.
*/
/*
Notes on seeding
1. Seeding with an integer
To avoid different seeds mapping to the same sequence, follow one of
the following two conventions:
a) Only use seeds in 0..2^31-1 (preferred)
b) Only use seeds in -2^30..2^30-1 (2-complement machines only)
2. Seeding with an array (die-hard seed method)
The length of the array, len, can be arbitrarily high, but for lengths greater
than N, collisions are common. If the seed is of high quality, using more than
N values does not make sense.
*/
#endif
| 1,898 |
C
| 34.830188 | 81 | 0.660169 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/alt_inlined_rng.h
|
#ifndef ALT_INLINED_RNG_H
#define ALT_INLINED_RNG_H
#include <cassert>
#include <random>
class AltInlinedRandomNumberGenerator {
std::mt19937 rng;
std::uniform_real_distribution<double> double_distribution {
0.0, 1.0
};
public:
explicit AltInlinedRandomNumberGenerator(int seed) {
rng.seed(seed);
}
double operator()() {
return double_distribution(rng);
}
int operator()(int bound) {
assert(bound > 0);
std::uniform_int_distribution<int> distribution(0, bound - 1);
return distribution(rng);
}
};
#endif
| 593 |
C
| 19.482758 | 70 | 0.637437 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/main.cc
|
#include <ctime>
#include <functional>
#include <iostream>
#include <string>
#include "alt_inlined_rng.h"
#include "inlined_rng.h"
#include "old_rng.h"
#include "rng.h"
using namespace std;
void benchmark(const string &desc, int num_calls,
const function<void()> &func) {
cout << "Running " << desc << " " << num_calls << " times:" << flush;
clock_t start = clock();
for (int i = 0; i < num_calls; ++i)
func();
clock_t end = clock();
double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC;
cout << " " << duration << " seconds" << endl;
}
int main(int, char **) {
const int NUM_ITERATIONS = 100000000;
const int SEED = 2014;
OldRandomNumberGenerator old_rng(SEED);
RandomNumberGenerator new_rng(SEED);
InlinedRandomNumberGenerator inlined_rng(SEED);
AltInlinedRandomNumberGenerator alt_inlined_rng(SEED);
benchmark("nothing", NUM_ITERATIONS, [] () {});
cout << endl;
benchmark("random double (old RNG)",
NUM_ITERATIONS,
[&]() {old_rng();});
benchmark("random double (new RNG, old distribution)",
NUM_ITERATIONS,
[&]() {new_rng.get_double_old();});
benchmark("random double (new RNG)",
NUM_ITERATIONS,
[&]() {new_rng();});
benchmark("random double (inlined RNG)",
NUM_ITERATIONS,
[&]() {inlined_rng();});
benchmark("random double (alternative inlined RNG)",
NUM_ITERATIONS,
[&]() {alt_inlined_rng();});
cout << endl;
benchmark("random int in 0..999 (old RNG)",
NUM_ITERATIONS,
[&]() {old_rng(1000);});
benchmark("random int in 0..999 (new RNG, old distribution)",
NUM_ITERATIONS,
[&]() {new_rng.get_int_old(1000);});
benchmark("random int in 0..999 (inlined RNG)",
NUM_ITERATIONS,
[&]() {inlined_rng(1000);});
return 0;
}
| 1,985 |
C++
| 30.03125 | 73 | 0.55466 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/rng.h
|
#ifndef RNG_H
#define RNG_H
#include <algorithm>
#include <random>
#include <vector>
class RandomNumberGenerator {
// Mersenne Twister random number generator.
std::mt19937 rng;
public:
RandomNumberGenerator(); // seed with time-dependent value
explicit RandomNumberGenerator(int seed_); // seed with integer
RandomNumberGenerator(const RandomNumberGenerator &) = delete;
RandomNumberGenerator &operator=(const RandomNumberGenerator &) = delete;
void seed(int seed);
double operator()(); // random double in [0..1), 2^53 possible values
int operator()(int bound); // random integer in [0..bound), bound < 2^31
unsigned int next32_old();
int next31_old();
double get_double_old();
int get_int_old(int bound);
template<class T>
void shuffle(std::vector<T> &vec) {
std::shuffle(vec.begin(), vec.end(), rng);
}
};
#endif
| 922 |
C
| 26.147058 | 80 | 0.656182 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/rng.cc
|
#include "rng.h"
#include <cassert>
#include <chrono>
using namespace std;
RandomNumberGenerator::RandomNumberGenerator() {
unsigned int secs = chrono::system_clock::now().time_since_epoch().count();
seed(secs);
}
RandomNumberGenerator::RandomNumberGenerator(int seed_) {
seed(seed_);
}
void RandomNumberGenerator::seed(int seed) {
rng.seed(seed);
}
double RandomNumberGenerator::operator()() {
uniform_real_distribution<double> distribution(0.0, 1.0);
return distribution(rng);
}
int RandomNumberGenerator::operator()(int bound) {
assert(bound > 0);
uniform_int_distribution<int> distribution(0, bound - 1);
return distribution(rng);
}
unsigned int RandomNumberGenerator::next32_old() {
return rng();
}
int RandomNumberGenerator::next31_old() {
return static_cast<int>(next32_old() >> 1);
}
double RandomNumberGenerator::get_double_old() {
unsigned int a = next32_old() >> 5, b = next32_old() >> 6;
return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
}
int RandomNumberGenerator::get_int_old(int bound) {
unsigned int value;
do {
value = next31_old();
} while (value + static_cast<unsigned int>(bound) >= 0x80000000UL);
// Just using modulo doesn't lead to uniform distribution. This does.
return static_cast<int>(value % bound);
}
| 1,335 |
C++
| 22.034482 | 79 | 0.677903 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/inlined_rng.h
|
#ifndef INLINED_RNG_H
#define INLINED_RNG_H
#include <cassert>
#include <random>
class InlinedRandomNumberGenerator {
std::mt19937 rng;
public:
explicit InlinedRandomNumberGenerator(int seed) {
rng.seed(seed);
}
double operator()() {
std::uniform_real_distribution<double> distribution(0.0, 1.0);
return distribution(rng);
}
int operator()(int bound) {
assert(bound > 0);
std::uniform_int_distribution<int> distribution(0, bound - 1);
return distribution(rng);
}
};
#endif
| 554 |
C
| 19.555555 | 70 | 0.638989 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue269/rng-microbenchmark/old_rng.cc
|
/*
Mersenne Twister Random Number Generator.
Based on the C Code by Takuji Nishimura and Makoto Matsumoto.
http://www.math.keio.ac.jp/~matumoto/emt.html
*/
#include "old_rng.h"
#include <ctime>
using namespace std;
static const int M = 397;
static const unsigned int MATRIX_A = 0x9908b0dfU;
static const unsigned int UPPER_MASK = 0x80000000U;
static const unsigned int LOWER_MASK = 0x7fffffffU;
OldRandomNumberGenerator::OldRandomNumberGenerator() {
seed(static_cast<int>(time(0)));
}
OldRandomNumberGenerator::OldRandomNumberGenerator(int s) {
seed(s);
}
OldRandomNumberGenerator::OldRandomNumberGenerator(
unsigned int *init_key, int key_length) {
seed(init_key, key_length);
}
OldRandomNumberGenerator::OldRandomNumberGenerator(
const OldRandomNumberGenerator ©) {
*this = copy;
}
OldRandomNumberGenerator &OldRandomNumberGenerator::operator=(
const OldRandomNumberGenerator ©) {
for (int i = 0; i < N; ++i)
mt[i] = copy.mt[i];
mti = copy.mti;
return *this;
}
void OldRandomNumberGenerator::seed(int se) {
unsigned int s = (static_cast<unsigned int>(se) << 1) + 1;
// Seeds should not be zero. Other possible solutions (such as s |= 1)
// lead to more confusion, because often-used low seeds like 2 and 3 would
// be identical. This leads to collisions only for rarely used seeds (see
// note in header file).
mt[0] = s & 0xffffffffUL;
for (mti = 1; mti < N; ++mti) {
mt[mti] = (1812433253UL * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti);
mt[mti] &= 0xffffffffUL;
}
}
void OldRandomNumberGenerator::seed(unsigned int *init_key, int key_length) {
int i = 1, j = 0, k = (N > key_length ? N : key_length);
seed(19650218UL);
for (; k; --k) {
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) +
init_key[j] + j;
mt[i] &= 0xffffffffUL;
++i;
++j;
if (i >= N) {
mt[0] = mt[N - 1];
i = 1;
}
if (j >= key_length)
j = 0;
}
for (k = N - 1; k; --k) {
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941UL)) - i;
mt[i] &= 0xffffffffUL;
++i;
if (i >= N) {
mt[0] = mt[N - 1];
i = 1;
}
}
mt[0] = 0x80000000UL;
}
unsigned int OldRandomNumberGenerator::next32() {
unsigned int y;
static unsigned int mag01[2] = {
0x0UL, MATRIX_A
};
if (mti >= N) {
int kk;
for (kk = 0; kk < N - M; ++kk) {
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
mt[kk] = mt[kk + M] ^ (y >> 1) ^ mag01[y & 0x1UL];
}
for (; kk < N - 1; ++kk) {
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1UL];
}
y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ mag01[y & 0x1UL];
mti = 0;
}
y = mt[mti++];
y ^= (y >> 11);
y ^= (y << 7) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
}
int OldRandomNumberGenerator::next31() {
return static_cast<int>(next32() >> 1);
}
double OldRandomNumberGenerator::next_closed() {
unsigned int a = next32() >> 5, b = next32() >> 6;
return (a * 67108864.0 + b) * (1.0 / 9007199254740991.0);
}
double OldRandomNumberGenerator::next_half_open() {
unsigned int a = next32() >> 5, b = next32() >> 6;
return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
}
double OldRandomNumberGenerator::next_open() {
unsigned int a = next32() >> 5, b = next32() >> 6;
return (0.5 + a * 67108864.0 + b) * (1.0 / 9007199254740991.0);
}
int OldRandomNumberGenerator::next(int bound) {
unsigned int value;
do {
value = next31();
} while (value + static_cast<unsigned int>(bound) >= 0x80000000UL);
// Just using modulo doesn't lead to uniform distribution. This does.
return static_cast<int>(value % bound);
}
| 4,066 |
C++
| 28.471014 | 79 | 0.55214 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue648/v2-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue648-base", "issue648-v2"]
SUITE=suites.suite_satisficing()
SUITE.extend(suites.suite_ipc14_sat())
CONFIGS = [
# Test lazy search with randomization
IssueConfig("lazy_greedy_ff_randomized", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h, randomize_successors=true)"
]),
# Epsilon Greedy
IssueConfig("lazy_epsilon_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy(epsilon_greedy(h))"
]),
# Pareto
IssueConfig("lazy_pareto_ff_cea", [
"--heuristic",
"h1=ff()",
"--heuristic",
"h2=cea()",
"--search",
"lazy(pareto([h1, h2]))"
]),
# Type based
IssueConfig("ff-type-const", [
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])]),"
"preferred=[hff],cost_type=one)"
]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_resource('parser', 'parser.py', dest='parser.py')
exp.add_command('parser', ['parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True)
out_of_time = Attribute('out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
out_of_memory,
out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp()
| 2,011 |
Python
| 25.826666 | 90 | 0.611636 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue648/v1-sat-test.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue648-base", "issue648-v1"]
SUITE=suites.suite_satisficing()
SUITE.extend(suites.suite_ipc14_sat())
CONFIGS = [
# Test lazy search with randomization
IssueConfig("lazy_greedy_ff_randomized", [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h, randomize_successors=true)"
]),
# Epsilon Greedy
IssueConfig("lazy_epsilon_greedy_ff", [
"--heuristic",
"h=ff()",
"--search",
"lazy(epsilon_greedy(h))"
]),
# Pareto
IssueConfig("lazy_pareto_ff_cea", [
"--heuristic",
"h1=ff()",
"--heuristic",
"h2=cea()",
"--search",
"lazy(pareto([h1, h2]))"
]),
# Type based
IssueConfig("ff-type-const", [
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])]),"
"preferred=[hff],cost_type=one)"
]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
# Absolute report commented out because a comparison table is more useful for this issue.
# (It's still in this file because someone might want to use it as a basis.)
# Scatter plots commented out for now because I have no usable matplotlib available.
# exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()
exp()
| 1,647 |
Python
| 25.15873 | 90 | 0.582271 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue648/parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
def check_planner_exit_reason(content, props):
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
out_of_time = False
out_of_memory = False
if error == 'timeout':
out_of_time = True
elif error == 'out-of-memory':
out_of_memory = True
props['out_of_time'] = out_of_time
props['out_of_memory'] = out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
parser.parse()
| 1,365 |
Python
| 28.063829 | 75 | 0.640293 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue648/v1-opt-reparse.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue648-base", "issue648-v1"]
SUITE=suites.suite_optimal_strips()
SUITE.extend(suites.suite_ipc14_opt_strips())
CONFIGS = [
# Test label reduction, shrink_bucket_based (via shrink_fh and shrink_random)
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
IssueConfig('dfp-r50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
# Test sampling
IssueConfig('ipdb', ['--search', 'astar(ipdb)']),
# Test genetic pattern generation
IssueConfig('genetic', ['--search', 'astar(zopdbs(patterns=genetic))']),
# Test cegar
IssueConfig(
"cegar-10K-goals-randomorder",
["--search", "astar(cegar(subtasks=[goals(order=random)],max_states=10000,max_time=infinity))"]),
IssueConfig(
"cegar-10K-original-randomorder",
["--search", "astar(cegar(subtasks=[original],max_states=10000,max_time=infinity,pick=random))"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_fetcher('data/issue648-v1-opt-test', parsers=['parser.py'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True)
out_of_time = Attribute('out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
out_of_memory,
out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp()
| 2,356 |
Python
| 39.63793 | 240 | 0.724533 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue648/v1-opt-test.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue648-base", "issue648-v1"]
SUITE=suites.suite_optimal_strips()
SUITE.extend(suites.suite_ipc14_opt_strips())
CONFIGS = [
# Test label reduction, shrink_bucket_based (via shrink_fh and shrink_random)
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
IssueConfig('dfp-r50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
# Test sampling
IssueConfig('ipdb', ['--search', 'astar(ipdb)']),
# Test genetic pattern generation
IssueConfig('genetic', ['--search', 'astar(zopdbs(patterns=genetic))']),
# Test cegar
IssueConfig(
"cegar-10K-goals-randomorder",
["--search", "astar(cegar(subtasks=[goals(order=random)],max_states=10000,max_time=infinity))"]),
IssueConfig(
"cegar-10K-original-randomorder",
["--search", "astar(cegar(subtasks=[original],max_states=10000,max_time=infinity,pick=random))"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 1,704 |
Python
| 40.585365 | 240 | 0.709507 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue416/v1-lama.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_satisficing_with_ipc11()
configs = {
IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']),
IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']),
IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-base', 'issue416-v1'])
| 1,635 |
Python
| 29.867924 | 94 | 0.577982 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue416/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']),
IssueConfig('astar-seq_opt_bjolp', ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
for config in configs:
nick = config.nick
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_memory_%s.png' % nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue416_base_v1_total_time_%s.png' % nick
)
exp()
main(revisions=['issue416-base', 'issue416-v1'])
| 1,719 |
Python
| 30.851851 | 135 | 0.57708 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue705/v7.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, arithmetic_mean, geometric_mean
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
from csv_report import CSVReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue705-base", "issue705-v8", "issue705-v9", "issue705-v10", "issue705-v11"]
CONFIGS = [
IssueConfig(
'astar-blind',
['--search', 'astar(blind())'],
)
]
SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) |
set(common_setup.DEFAULT_SATISFICING_SUITE)))
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])
exp.add_fetcher('data/issue705-v4-eval')
exp.add_comparison_table_step()
def add_sg_peak_mem_diff_per_task_size(run):
mem = run.get("sg_peak_mem_diff")
size = run.get("translator_task_size")
if mem and size:
run["sg_peak_mem_diff_per_task_size"] = mem / float(size)
return run
for attr in ["total_time", "search_time", "sg_construction_time", "memory", "sg_peak_mem_diff_per_task_size"]:
for rev1, rev2 in [("base", "v11"), ("v8", "v9"), ("v9", "v10"), ("v10", "v11")]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2],
filter=add_sg_peak_mem_diff_per_task_size,
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2))
exp.add_report(CSVReport(
filter_algorithm="issue705-v11-astar-blind",
attributes=["algorithm", "domain", "sg_*", "translator_task_size"]),
outfile="csvreport.csv")
exp.run_steps()
| 2,345 |
Python
| 31.136986 | 110 | 0.666525 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue705/csv_report.py
|
from downward.reports import PlanningReport
class CSVReport(PlanningReport):
def get_text(self):
sep = " "
lines = [sep.join(self.attributes)]
for runs in self.problem_runs.values():
for run in runs:
lines.append(sep.join([str(run.get(attribute, "nan"))
for attribute in self.attributes]))
return "\n".join(lines)
| 418 |
Python
| 33.916664 | 74 | 0.562201 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue705/sg-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def add_absolute_and_relative(parser, attribute, pattern):
parser.add_pattern(attribute, pattern + ' (\d+) .+', required=False, type=int)
parser.add_pattern(attribute + '_rel', pattern + ' \d+ \((.+)\)', required=False, type=float)
parser = Parser()
parser.add_pattern('sg_construction_time', 'SG construction time: (.+)s', required=False, type=float)
parser.add_pattern('sg_peak_mem_diff', 'SG construction peak memory difference: (\d+)', required=False, type=int)
parser.add_pattern('sg_size_estimate_total', 'SG size estimates: total: (\d+)', required=False, type=int)
add_absolute_and_relative(parser, 'sg_size_estimate_overhead', 'SG size estimates: object overhead:')
add_absolute_and_relative(parser, 'sg_size_estimate_operators', 'SG size estimates: operators:')
add_absolute_and_relative(parser, 'sg_size_estimate_switch_var', 'SG size estimates: switch var:')
add_absolute_and_relative(parser, 'sg_size_estimate_value_generator', 'SG size estimates: generator for value:')
add_absolute_and_relative(parser, 'sg_size_estimate_default_generator', 'SG size estimates: default generator:')
add_absolute_and_relative(parser, 'sg_size_estimate_next_generator', 'SG size estimates: next generator:')
add_absolute_and_relative(parser, 'sg_counts_immediates', 'SG object counts: immediates:')
add_absolute_and_relative(parser, 'sg_counts_forks', 'SG object counts: forks:')
add_absolute_and_relative(parser, 'sg_counts_switches', 'SG object counts: switches:')
add_absolute_and_relative(parser, 'sg_counts_leaves', 'SG object counts: leaves:')
add_absolute_and_relative(parser, 'sg_counts_empty', 'SG object counts: empty:')
add_absolute_and_relative(parser, 'sg_counts_switch_empty', 'SG switch statistics: immediate ops empty:')
add_absolute_and_relative(parser, 'sg_counts_switch_single', 'SG switch statistics: single immediate op:')
add_absolute_and_relative(parser, 'sg_counts_switch_more', 'SG switch statistics: more immediate ops:')
add_absolute_and_relative(parser, 'sg_counts_leaf_empty', 'SG leaf statistics: applicable ops empty:')
add_absolute_and_relative(parser, 'sg_counts_leaf_single', 'SG leaf statistics: single applicable op:')
add_absolute_and_relative(parser, 'sg_counts_leaf_more', 'SG leaf statistics: more applicable ops:')
add_absolute_and_relative(parser, 'sg_counts_switch_vector_single', 'SG switch statistics: vector single:')
add_absolute_and_relative(parser, 'sg_counts_switch_vector_small', 'SG switch statistics: vector small:')
add_absolute_and_relative(parser, 'sg_counts_switch_vector_large', 'SG switch statistics: vector large:')
add_absolute_and_relative(parser, 'sg_counts_switch_vector_full', 'SG switch statistics: vector full:')
parser.parse()
| 2,761 |
Python
| 64.761903 | 113 | 0.750453 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue705/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, arithmetic_mean, geometric_mean
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
from csv_report import CSVReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue705-base", "issue705-v1", "issue705-v2", "issue705-v3"]
CONFIGS = [
IssueConfig(
'bounded-blind',
['--search', 'astar(blind(), bound=0)'],
)
]
SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) |
set(common_setup.DEFAULT_SATISFICING_SUITE)))
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])
exp.add_absolute_report_step(attributes=[
Attribute("sg_construction_time", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_switches", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_forks", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_immediates", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_default_generator", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_operators", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_overhead", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_switch_var", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_total", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_value_generator", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_size_estimate_next_generator", functions=[arithmetic_mean], min_wins=True),
Attribute("sg_counts_empty_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_leaf_empty_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_leaf_more_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_leaf_single_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_leaves_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_switch_empty_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_switch_more_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_switch_single_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_switches_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_forks_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_counts_immediates_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_default_generator_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_operators_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_overhead_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_switch_var_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_value_generator_rel", functions=[geometric_mean], min_wins=True),
Attribute("sg_size_estimate_next_generator_rel", functions=[geometric_mean], min_wins=True),
"error",
"run_dir",
])
exp.add_report(CSVReport(attributes=["algorithm", "domain", "sg_*", "translator_task_size"]), outfile="csvreport.csv")
def add_sg_peak_mem_diff_per_task_size(run):
mem = run.get("sg_peak_mem_diff")
size = run.get("translator_task_size")
if mem and size:
run["sg_peak_mem_diff_per_task_size"] = mem / float(size)
return run
for rev1, rev2 in [("base", "v1"), ("base", "v2"), ("base", "v3")]:
exp.add_report(RelativeScatterPlotReport(
attributes=["sg_peak_mem_diff_per_task_size"],
filter=add_sg_peak_mem_diff_per_task_size,
filter_algorithm=["issue705-%s-bounded-blind" % rev1, "issue705-%s-bounded-blind" % rev2],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue705-%s-%s.png" % (rev1, rev2))
exp.run_steps()
| 5,360 |
Python
| 47.297297 | 118 | 0.708209 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue527/compare_with_paper.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from lab.experiment import Experiment
from lab.steps import Step
from downward.reports.compare import CompareConfigsReport
from common_setup import get_experiment_name, get_data_dir, get_repo_base
import os
DATADIR = os.path.join(os.path.dirname(__file__), 'data')
exp = Experiment(get_data_dir())
exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'), filter_config_nick="astar_pho_seq_no_onesafe")
exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'), filter_config_nick="astar_occ_seq")
exp.add_report(CompareConfigsReport(
[
('869fec6f843b-astar_pho_seq_no_onesafe', 'issue527-v2-astar_occ_seq'),
],
attributes=[
'coverage',
'total_time',
'expansions',
'evaluations',
'generated',
'expansions_until_last_jump',
'error',
],
)
)
exp()
| 971 |
Python
| 25.27027 | 125 | 0.619979 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue527/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue527-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_occ_lmcut": [
"--search",
"astar(operatorcounting([lmcut_constraints()]))"],
"astar_occ_seq": [
"--search",
"astar(operatorcounting([state_equation_constraints()]))"],
"astar_occ_pho_1": [
"--search",
"astar(operatorcounting([pho_constraints_systematic(pattern_max_size=1, only_interesting_patterns=true)]))"],
"astar_occ_pho_2": [
"--search",
"astar(operatorcounting([pho_constraints_systematic(pattern_max_size=2, only_interesting_patterns=true)]))"],
"astar_occ_pho_2_naive": [
"--search",
"astar(operatorcounting([pho_constraints_systematic(pattern_max_size=2, only_interesting_patterns=false)]))"],
"astar_occ_pho_ipdb": [
"--search",
"astar(operatorcounting([pho_constraints_ipdb()]))"],
"astar_cpdbs_1": [
"--search",
"astar(cpdbs_systematic(pattern_max_size=1, only_interesting_patterns=true))"],
"astar_cpdbs_2": [
"--search",
"astar(cpdbs_systematic(pattern_max_size=2, only_interesting_patterns=true))"],
"astar_occ_pho_2_naive": [
"--search",
"astar(cpdbs_systematic(pattern_max_size=2, only_interesting_patterns=false))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_absolute_report_step()
exp()
| 1,548 |
Python
| 29.979999 | 118 | 0.620801 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue549/issue549-v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue549-base", "issue549-v3"])
| 120 |
Python
| 16.285712 | 48 | 0.65 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue549/main.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import common_setup
def main(revisions=None):
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
'cea': ['--search', 'eager_greedy(cea())'],
'cg': ['--search', 'eager_greedy(cg())'],
'lmcount': ['--search', 'eager_greedy(lmcount(lm_rhw()))'],
}
exp = common_setup.IssueExperiment(
revisions=revisions,
configs=CONFIGS,
suite=SUITE,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
grid_priority=-10,
)
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.append('landmarks')
attributes.append('landmarks_generation_time')
exp.add_comparison_table_step(attributes=attributes)
exp()
| 853 |
Python
| 22.08108 | 67 | 0.620164 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue549/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments.fast_downward_experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(FastDownwardExperiment):
"""Wrapper for FastDownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, revisions, suite, build_options=None,
driver_options=None, grid_priority=None,
test_suite=None, email=None, processes=1, **kwargs):
"""Create an FastDownwardExperiment with some convenience features.
All configs will be run on all revisions. Inherited options
*path*, *environment* and *cache_dir* from FastDownwardExperiment
are not supported and will be automatically set.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. nick will
automatically get the revision prepended, e.g.
'issue123-base-<nick>'::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*revisions* must be a non-empty list of revisions, which
specify which planner versions to use in the experiment.
The same versions are used for translator, preprocessor
and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
environment = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
environment = MaiaEnvironment(priority=grid_priority,
email=email)
FastDownwardExperiment.__init__(self, environment=environment,
**kwargs)
# Automatically deduce the downward repository from the file
repo = get_repo_base()
self.algorithm_nicks = []
self.revisions = revisions
for nick, cmdline in configs.items():
for rev in revisions:
algo_nick = '%s-%s' % (rev, nick)
self.add_algorithm(algo_nick, repo, rev, cmdline,
build_options, driver_options)
self.algorithm_nicks.append(algo_nick)
benchmarks_dir = os.path.join(repo, 'benchmarks')
self.add_suite(benchmarks_dir, suite)
self.search_parsers = []
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
# oufile is of the form <rev1>-<rev2>-...-<revn>.<format>
outfile = ''
for rev in self.revisions:
outfile += rev
outfile += '-'
outfile = outfile[:len(outfile)-1]
outfile += '.'
outfile += report.output_format
outfile = os.path.join(self.eval_dir, outfile)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revisions, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" %
(rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revisions, 2):
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" %
(rev1, rev2))
subprocess.call(['publish', outfile])
self.add_step(Step('publish-comparison-reports', publish_comparison_tables))
# TODO: this is copied from the old common_setup, but not tested
# with the new FastDownwardExperiment class!
def add_scatter_plot_step(self, attributes=None):
print 'This has not been tested with the new FastDownwardExperiment class!'
exit(0)
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in valid_attributes:
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 11,089 |
Python
| 35.843854 | 93 | 0.59816 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue512/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"init_time",
"Best heuristic value: \d+ \[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]",
required=True,
type=float)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 441 |
Python
| 21.099999 | 90 | 0.54195 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue512/issue512.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
import common_setup
SEARCH_REVS = ["issue512-base", "issue512-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
CONFIGS["blind"] = ["--search", "astar(blind())"]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"]
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys(),
attributes=attributes))
exp()
| 1,010 |
Python
| 25.605262 | 68 | 0.708911 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue710/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.reports import Attribute, geometric_mean
from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run
BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks')
REVISIONS = ["issue710-base", "issue710-v1"]
CONFIGS = [
IssueConfig('cpdbs-hc', ['--search', 'astar(cpdbs(patterns=hillclimbing()))']),
IssueConfig('cpdbs-hc900', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900)))']),
]
SUITE = DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email='[email protected]')
if is_test_run():
SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_resource('ipdb_parser', 'ipdb-parser.py', dest='ipdb-parser.py')
exp.add_command('ipdb-parser', ['{ipdb_parser}'])
exp.add_suite(BENCHMARKS_DIR, SUITE)
# ipdb attributes
extra_attributes = [
Attribute('hc_iterations', absolute=True, min_wins=True),
Attribute('hc_num_patters', absolute=True, min_wins=True),
Attribute('hc_size', absolute=True, min_wins=True),
Attribute('hc_num_generated', absolute=True, min_wins=True),
Attribute('hc_num_rejected', absolute=True, min_wins=True),
Attribute('hc_max_pdb_size', absolute=True, min_wins=True),
Attribute('hc_hill_climbing_time', absolute=False, min_wins=True, functions=[geometric_mean]),
Attribute('hc_total_time', absolute=False, min_wins=True, functions=[geometric_mean]),
Attribute('cpdbs_time', absolute=False, min_wins=True, functions=[geometric_mean]),
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.add_scatter_plot_step()
exp.run_steps()
| 2,075 |
Python
| 36.071428 | 145 | 0.723373 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue710/ipdb-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('hc_iterations', 'iPDB: iterations = (\d+)', required=False, type=int)
parser.add_pattern('hc_num_patters', 'iPDB: number of patterns = (\d+)', required=False, type=int)
parser.add_pattern('hc_size', 'iPDB: size = (\d+)', required=False, type=int)
parser.add_pattern('hc_num_generated', 'iPDB: generated = (\d+)', required=False, type=int)
parser.add_pattern('hc_num_rejected', 'iPDB: rejected = (\d+)', required=False, type=int)
parser.add_pattern('hc_max_pdb_size', 'iPDB: maximum pdb size = (\d+)', required=False, type=int)
parser.add_pattern('hc_hill_climbing_time', 'iPDB: hill climbing time: (.+)s', required=False, type=float)
parser.add_pattern('hc_total_time', 'Pattern generation \(hill climbing\) time: (.+)s', required=False, type=float)
parser.add_pattern('cpdbs_time', 'PDB collection construction time: (.+)s', required=False, type=float)
def check_hc_constructed(content, props):
hc_time = props.get('hc_total_time')
abstraction_constructed = False
if hc_time is not None:
abstraction_constructed = True
props['hc_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_hc_constructed)
def check_planner_exit_reason(content, props):
hc_abstraction_constructed = props.get('hc_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether hill climbing computation or search ran out of
# time or memory.
hc_out_of_time = False
hc_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if hc_abstraction_constructed == False:
if error == 'timeout':
hc_out_of_time = True
elif error == 'out-of-memory':
hc_out_of_memory = True
elif hc_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['hc_out_of_time'] = hc_out_of_time
props['hc_out_of_memory'] = hc_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
parser.parse()
| 2,375 |
Python
| 41.428571 | 115 | 0.665684 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue551/v2-lama-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue551-base", "issue551-v2"]
BENCHMARKS = os.path.expanduser('~/downward-benchmarks')
SUITE = suites.suite_satisficing_strips()
CONFIGS = [
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
IssueConfig("lm_hm", [
"--landmarks", "lm=lm_hm(2)",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_exhaust", [
"--landmarks", "lm=lm_exhaust()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_rhw", [
"--landmarks", "lm=lm_rhw()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_zg", [
"--landmarks", "lm=lm_zg()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
]
exp = IssueExperiment(
revisions=REVS,
benchmarks_dir=BENCHMARKS,
suite=SUITE,
configs=CONFIGS,
processes=4,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 1,199 |
Python
| 25.666666 | 76 | 0.580484 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue551/v5-seq-sat-lama-2011.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
REVS = ["issue551-base", "issue551-v5"]
BENCHMARKS = os.path.expanduser('~/downward-benchmarks')
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
]
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
environment=ENVIRONMENT
)
exp.add_suite(BENCHMARKS, SUITE)
exp.add_comparison_table_step()
exp()
| 823 |
Python
| 20.68421 | 90 | 0.72418 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue551/v4-lama-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
REVS = ["issue551-base", "issue551-v4"]
BENCHMARKS = os.path.expanduser('~/downward-benchmarks')
SUITE = suites.suite_optimal()
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
IssueConfig("seq-opt-bjolp-ocp", [
"--landmarks", "lm=lm_merged([lm_rhw(),lm_hm(m=1)])",
"--heuristic", "hlm=lmcount(lm,admissible=true,optimal=true)",
"--search", "astar(hlm,mpd=true)"]),
]
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
environment=ENVIRONMENT
)
exp.add_suite(BENCHMARKS, SUITE)
exp.add_comparison_table_step()
exp.add_comparison_table_step(attributes=["memory","total_time", "search_time", "landmarks_generation_time"])
exp.add_scatter_plot_step(relative=True, attributes=["memory","total_time", "search_time", "landmarks_generation_time"])
exp()
| 1,262 |
Python
| 27.704545 | 120 | 0.698891 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue551/v4-lama-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from downward import suites
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
REVS = ["issue551-base", "issue551-v4"]
BENCHMARKS = os.path.expanduser('~/downward-benchmarks')
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
IssueConfig("lm_hm", [
"--landmarks", "lm=lm_hm(2)",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_exhaust", [
"--landmarks", "lm=lm_exhaust()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_rhw", [
"--landmarks", "lm=lm_rhw()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
IssueConfig("lm_zg", [
"--landmarks", "lm=lm_zg()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy(hlm)"]),
]
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
environment=ENVIRONMENT
)
exp.add_suite(BENCHMARKS, SUITE)
exp.add_comparison_table_step()
exp.add_comparison_table_step(attributes=["memory","total_time", "search_time", "landmarks_generation_time"])
exp.add_scatter_plot_step(relative=True, attributes=["memory","total_time", "search_time", "landmarks_generation_time"])
exp()
| 1,657 |
Python
| 28.607142 | 120 | 0.635486 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue696/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue696-base", "issue696-v1"]
CONFIGS = [
IssueConfig(
"lazy_greedy_{}".format(heuristic),
["--heuristic", "h={}()".format(heuristic),
"--search", "lazy_greedy(h, preferred=h)"])
for heuristic in ["add", "cea", "cg", "ff"]
] + [
IssueConfig(
"ehc_{}".format(heuristic),
["--heuristic", "h={}()".format(heuristic),
"--search", "ehc(h, preferred=h)"])
for heuristic in ["ff"]
]
SUITE = [
'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips',
'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips',
'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips',
'elevators-sat11-strips', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl',
'openstacks-sat08-strips', 'openstacks-sat11-strips',
'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs',
'parcprinter-08-strips', 'parcprinter-sat11-strips',
'parking-sat11-strips', 'parking-sat14-strips', 'pathways',
'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips',
'philosophers', 'pipesworld-notankage', 'pipesworld-tankage',
'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time", "memory"])
exp()
| 2,671 |
Python
| 37.171428 | 73 | 0.674654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue662/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment, get_algo_nick, get_repo_base
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue662-base", "issue662-v1"]
CONFIGS = [
IssueConfig(
'astar-lmcut-static',
['--search', 'astar(lmcut())'],
build_options=["release32"],
driver_options=["--build=release32", "--search-time-limit", "60s"]
),
IssueConfig(
'astar-lmcut-dynamic',
['--search', 'astar(lmcut())'],
build_options=["release32dynamic"],
driver_options=["--build=release32dynamic", "--search-time-limit", "60s"]
)
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[],
configs=[],
environment=ENVIRONMENT,
)
for rev in REVISIONS:
for config in CONFIGS:
if rev.endswith("base") and config.nick.endswith("dynamic"):
continue
exp.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["total_time"]:
for algo1, algo2 in [("issue662-base-astar-lmcut-static",
"issue662-v1-astar-lmcut-static"),
("issue662-v1-astar-lmcut-static",
"issue662-v1-astar-lmcut-dynamic")]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=[algo1, algo2],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, algo1, algo2)
)
exp.run_steps()
| 2,394 |
Python
| 29.705128 | 95 | 0.616541 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue533/exp1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
"blind": [
"--search",
"astar(blind)"],
"ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
}
REVS = ["issue533-base", "issue533-v1", "issue533-v1-debug"]
LIMITS = {"search_time": 300}
# We define a suite that consists of (really) all domains because for
# translator issues like this one, it's interesting what we do in
# obscure cases like airport-adl. The following is simply a list of
# all domains that were in the benchmarks directory at the time of
# this writing.
SUITE = [
"airport",
"airport-adl",
"assembly",
"barman-opt11-strips",
"barman-sat11-strips",
"blocks",
"depot",
"driverlog",
"elevators-opt08-strips",
"elevators-opt11-strips",
"elevators-sat08-strips",
"elevators-sat11-strips",
"floortile-opt11-strips",
"floortile-sat11-strips",
"freecell",
"grid",
"gripper",
"logistics00",
"logistics98",
"miconic",
"miconic-fulladl",
"miconic-simpleadl",
"movie",
"mprime",
"mystery",
"no-mprime",
"no-mystery",
"nomystery-opt11-strips",
"nomystery-sat11-strips",
"openstacks",
"openstacks-opt08-adl",
"openstacks-opt08-strips",
"openstacks-opt11-strips",
"openstacks-sat08-adl",
"openstacks-sat08-strips",
"openstacks-sat11-strips",
"openstacks-strips",
"optical-telegraphs",
"parcprinter-08-strips",
"parcprinter-opt11-strips",
"parcprinter-sat11-strips",
"parking-opt11-strips",
"parking-sat11-strips",
"pathways",
"pathways-noneg",
"pegsol-08-strips",
"pegsol-opt11-strips",
"pegsol-sat11-strips",
"philosophers",
"pipesworld-notankage",
"pipesworld-tankage",
"psr-large",
"psr-middle",
"psr-small",
"rovers",
"satellite",
"scanalyzer-08-strips",
"scanalyzer-opt11-strips",
"scanalyzer-sat11-strips",
"schedule",
"sokoban-opt08-strips",
"sokoban-opt11-strips",
"sokoban-sat08-strips",
"sokoban-sat11-strips",
"storage",
"tidybot-opt11-strips",
"tidybot-sat11-strips",
"tpp",
"transport-opt08-strips",
"transport-opt11-strips",
"transport-sat08-strips",
"transport-sat11-strips",
"trucks",
"trucks-strips",
"visitall-opt11-strips",
"visitall-sat11-strips",
"woodworking-opt08-strips",
"woodworking-opt11-strips",
"woodworking-sat08-strips",
"woodworking-sat11-strips",
"zenotravel",
]
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step(
attributes=exp.DEFAULT_TABLE_ATTRIBUTES +
["translate_*", "translator_*"])
exp()
| 2,861 |
Python
| 21.896 | 69 | 0.620063 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.