file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue540/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue540-base", "issue540-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"ipdb": ["--search", "astar(ipdb(max_time=150))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 491 |
Python
| 17.923076 | 54 | 0.635438 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue540/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue540-base", "issue540-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lama-first": [
"--if-unit-cost",
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))",
"--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm])",
"--if-non-unit-cost",
"--heuristic",
"hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],cost_type=one,reopen_closed=false)",
"--always"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 940 |
Python
| 24.432432 | 97 | 0.58617 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/base-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISION = "issue213-base"
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb(max_time=900))"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), "
"merge_strategy=merge_dfp(), "
"label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"),
("seq", "astar(operatorcounting([state_equation_constraints()]))"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[REVISION],
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
algorithm_pairs = []
for build1, build2 in itertools.combinations(BUILDS, 2):
for config_nick, search in SEARCHES:
algorithm_pairs.append(
("{REVISION}-{config_nick}-{build1}".format(**locals()),
"{REVISION}-{config_nick}-{build2}".format(**locals()),
"Diff ({})".format(config_nick)))
exp.add_report(
ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name="issue213-opt-comparison")
#exp.add_scatter_plot_step(attributes=["total_time", "memory"])
exp.run_steps()
| 2,257 |
Python
| 30.361111 | 125 | 0.666371 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v3-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-base", "issue213-v1", "issue213-v3"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"),
("blind", "astar(blind())"),
("cegar", "astar(cegar())"),
("divpot", "astar(diverse_potentials())"),
("ipdb", "astar(ipdb(max_time=900))"),
("lmcut", "astar(lmcut())"),
("mas",
"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), "
"merge_strategy=merge_dfp(), "
"label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"),
("seq", "astar(operatorcounting([state_equation_constraints()]))"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
# Compare across revisions and builds.
# lmcut-base-32 vs. lmcut-v3-64
build1, build2 = BUILDS
rev1, rev2 = "issue213-base", "issue213-v3"
algorithm_pairs = [
("{rev1}-{config_nick}-{build1}".format(**locals()),
"{rev2}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-before-vs-after")
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release32", "issue213-v3-blind-release32"]),
name="issue213-relative-scatter-blind-m32-v1-vs-v3-{}".format(attribute))
exp.run_steps()
| 3,946 |
Python
| 34.881818 | 125 | 0.648505 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v5-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v1", "issue213-v4", "issue213-v5"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = ["pegsol-opt11-strips"] # common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')])
exp.add_absolute_report_step()
attributes = [
"coverage", "error", "expansions_until_last_jump", "memory",
"score_memory", "total_time", "score_total_time",
"hash_set_load_factor", "hash_set_resizings"]
# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))
# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
for rev in REVISIONS:
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{rev}".format(**locals()))
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]),
name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute))
exp.run_steps()
| 3,084 |
Python
| 33.277777 | 93 | 0.651427 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
def main():
parser = Parser()
parser.add_pattern(
"hash_set_load_factor",
"Hash set load factor: \d+/\d+ = (.+)",
required=False,
type=float)
parser.add_pattern(
"hash_set_resizings",
"Hash set resizings: (\d+)",
required=False,
type=int)
print "Running custom parser"
parser.parse()
main()
| 432 |
Python
| 18.681817 | 47 | 0.55787 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v3-blind.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v2", "issue213-v3"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
algorithm_pairs = []
revision1, revision2 = REVISIONS
for build in BUILDS:
for config_nick, search in SEARCHES:
algorithm_pairs.append(
("{revision1}-{config_nick}-{build}".format(**locals()),
"{revision2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals())))
exp.add_report(
ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name="issue213-v2-vs-v3-blind")
exp.run_steps()
| 1,654 |
Python
| 25.693548 | 72 | 0.671705 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue688/v3-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue688-v3-base", "issue688-v3"]
BUILDS = ["release32"]
SEARCHES = [
("blind", ["--search", "astar(blind())"]),
("ipdb", ["--search", "astar(ipdb())"]),
("divpot", ["--search", "astar(diverse_potentials())"]),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
search,
build_options=[build],
driver_options=["--build", build])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_report(RelativeScatterPlotReport(
attributes=["search_time"],
filter_algorithm=["issue688-v3-base-blind-release32", "issue688-v3-blind-release32"],
get_category=lambda run1, run2: run1.get("domain"),
), outfile="{}-blind-search_time.png".format(exp.name))
exp.run_steps()
| 1,566 |
Python
| 27.490909 | 89 | 0.694125 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v3", "issue561-v4")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue561/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v2", "issue561-v3")
| 106 |
Python
| 14.285712 | 34 | 0.632075 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue870/v1-seq.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILDS_AND_REVISIONS = [("release64", "issue870-base"), ("release64dynamic", "issue870-v1")]
CONFIG_NICKS = [
("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=[],
configs=[],
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
for build, rev in BUILDS_AND_REVISIONS:
for config_nick, config in CONFIG_NICKS:
exp.add_algorithm(
":".join([config_nick, build, rev]),
common_setup.get_repo_base(),
rev,
config,
build_options=[build],
driver_options=["--build", build])
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
algorithm_pairs = [
("seq:release64:issue870-base",
"seq:release64dynamic:issue870-v1",
"Diff (seq)")
]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue870-seq-static-vs-dynamic")
exp.run_steps()
| 2,087 |
Python
| 28 | 92 | 0.705319 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/plan_manager.py
|
import itertools
import os
import os.path
import re
from . import returncodes
_PLAN_INFO_REGEX = re.compile(r"; cost = (\d+) \((unit cost|general cost)\)\n")
def _read_last_line(filename):
line = None
with open(filename) as input_file:
for line in input_file:
pass
return line
def _parse_plan(plan_filename):
"""Parse a plan file and return a pair (cost, problem_type)
summarizing the salient information. Return (None, None) for
incomplete plans."""
last_line = _read_last_line(plan_filename) or ""
match = _PLAN_INFO_REGEX.match(last_line)
if match:
return int(match.group(1)), match.group(2)
else:
return None, None
class PlanManager:
def __init__(self, plan_prefix, portfolio_bound=None, single_plan=False):
self._plan_prefix = plan_prefix
self._plan_costs = []
self._problem_type = None
if portfolio_bound is None:
portfolio_bound = "infinity"
self._portfolio_bound = portfolio_bound
self._single_plan = single_plan
def get_plan_prefix(self):
return self._plan_prefix
def get_plan_counter(self):
return len(self._plan_costs)
def get_next_portfolio_cost_bound(self):
"""Return the next plan cost bound to be used in a portfolio planner.
Initially, this is the user-specified cost bound, or "infinity"
if the user specified no bound. Once a plan has been found, it
is the cost of the best plan found so far. (This is always the
last plan found because plans must decrease in cost.)
"""
if self._plan_costs:
return self._plan_costs[-1]
else:
return self._portfolio_bound
def abort_portfolio_after_first_plan(self):
return self._single_plan
def get_problem_type(self):
if self._problem_type is None:
returncodes.exit_with_driver_critical_error("no plans found yet: cost type not set")
return self._problem_type
def process_new_plans(self):
"""Update information about plans after a planner run.
Read newly generated plans and store the relevant information.
If the last plan file is incomplete, delete it.
"""
had_incomplete_plan = False
for counter in itertools.count(self.get_plan_counter() + 1):
plan_filename = self._get_plan_file(counter)
def bogus_plan(msg):
returncodes.exit_with_driver_critical_error("%s: %s" % (plan_filename, msg))
if not os.path.exists(plan_filename):
break
if had_incomplete_plan:
bogus_plan("plan found after incomplete plan")
cost, problem_type = _parse_plan(plan_filename)
if cost is None:
had_incomplete_plan = True
print("%s is incomplete. Deleted the file." % plan_filename)
os.remove(plan_filename)
else:
print("plan manager: found new plan with cost %d" % cost)
if self._problem_type is None:
# This is the first plan we found.
self._problem_type = problem_type
else:
# Check if info from this plan matches previous info.
if self._problem_type != problem_type:
bogus_plan("problem type has changed")
if cost >= self._plan_costs[-1]:
bogus_plan("plan quality has not improved")
self._plan_costs.append(cost)
def get_existing_plans(self):
"""Yield all plans that match the given plan prefix."""
if os.path.exists(self._plan_prefix):
yield self._plan_prefix
for counter in itertools.count(start=1):
plan_filename = self._get_plan_file(counter)
if os.path.exists(plan_filename):
yield plan_filename
else:
break
def delete_existing_plans(self):
"""Delete all plans that match the given plan prefix."""
for plan in self.get_existing_plans():
os.remove(plan)
def _get_plan_file(self, number):
return "%s.%d" % (self._plan_prefix, number)
| 4,288 |
Python
| 33.869918 | 96 | 0.588386 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/util.py
|
import os
from . import returncodes
DRIVER_DIR = os.path.abspath(os.path.dirname(__file__))
REPO_ROOT_DIR = os.path.dirname(DRIVER_DIR)
BUILDS_DIR = os.path.join(REPO_ROOT_DIR, "builds")
def get_elapsed_time():
"""
Return the CPU time taken by the python process and its child
processes.
"""
if os.name == "nt":
# The child time components of os.times() are 0 on Windows.
raise NotImplementedError("cannot use get_elapsed_time() on Windows")
return sum(os.times()[:4])
def find_domain_filename(task_filename):
"""
Find domain filename for the given task using automatic naming rules.
"""
dirname, basename = os.path.split(task_filename)
domain_basenames = [
"domain.pddl",
basename[:3] + "-domain.pddl",
"domain_" + basename,
"domain-" + basename,
]
for domain_basename in domain_basenames:
domain_filename = os.path.join(dirname, domain_basename)
if os.path.exists(domain_filename):
return domain_filename
returncodes.exit_with_driver_input_error(
"Error: Could not find domain file using automatic naming rules.")
| 1,166 |
Python
| 26.785714 | 77 | 0.647513 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/main.py
|
from __future__ import print_function
import logging
import os
import sys
from . import aliases
from . import arguments
from . import cleanup
from . import run_components
from . import __version__
def main():
args = arguments.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format="%(levelname)-8s %(message)s",
stream=sys.stdout)
logging.debug("processed args: %s" % args)
if args.version:
print(__version__)
sys.exit()
if args.show_aliases:
aliases.show_aliases()
sys.exit()
if args.cleanup:
cleanup.cleanup_temporary_files(args)
sys.exit()
exitcode = None
for component in args.components:
if component == "translate":
(exitcode, continue_execution) = run_components.run_translate(args)
elif component == "search":
(exitcode, continue_execution) = run_components.run_search(args)
if not args.keep_sas_file:
print("Remove intermediate file {}".format(args.sas_file))
os.remove(args.sas_file)
elif component == "validate":
(exitcode, continue_execution) = run_components.run_validate(args)
else:
assert False, "Error: unhandled component: {}".format(component)
print("{component} exit code: {exitcode}".format(**locals()))
print()
if not continue_execution:
print("Driver aborting after {}".format(component))
break
# Exit with the exit code of the last component that ran successfully.
# This means for example that if no plan was found, validate is not run,
# and therefore the return code is that of the search.
sys.exit(exitcode)
if __name__ == "__main__":
main()
| 1,832 |
Python
| 30.067796 | 79 | 0.611354 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/limits.py
|
from . import returncodes
from . import util
try:
import resource
except ImportError:
resource = None
import sys
"""
Notes on limits: On Windows, the resource module does not exist and hence we
cannot enforce any limits there. Furthermore, while the module exists on macOS,
memory limits are not enforced by that OS and hence we do not support imposing
memory limits there.
"""
CANNOT_LIMIT_MEMORY_MSG = "Setting memory limits is not supported on your platform."
CANNOT_LIMIT_TIME_MSG = "Setting time limits is not supported on your platform."
def can_set_time_limit():
return resource is not None
def can_set_memory_limit():
return resource is not None and sys.platform != "darwin"
def set_time_limit(time_limit):
if time_limit is None:
return
if not can_set_time_limit():
raise NotImplementedError(CANNOT_LIMIT_TIME_MSG)
# Reaching the soft time limit leads to a (catchable) SIGXCPU signal,
# which we catch to gracefully exit. Reaching the hard limit leads to
# a SIGKILL, which is unpreventable. We set a hard limit one second
# higher than the soft limit to make sure we abort also in cases where
# the graceful shutdown doesn't work, or doesn't work reasonably
# quickly.
try:
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit + 1))
except ValueError:
# If the previous call failed, we try again without the extra second.
# In particular, this is necessary if there already exists an external
# hard limit equal to time_limit.
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit))
def set_memory_limit(memory):
"""*memory* must be given in bytes or None."""
if memory is None:
return
if not can_set_memory_limit():
raise NotImplementedError(CANNOT_LIMIT_MEMORY_MSG)
resource.setrlimit(resource.RLIMIT_AS, (memory, memory))
def convert_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
def get_memory_limit(component_limit, overall_limit):
"""
Return the minimum of the component and overall limits or None if neither is set.
"""
limits = [limit for limit in [component_limit, overall_limit] if limit is not None]
return min(limits) if limits else None
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit
| 2,854 |
Python
| 32.197674 | 87 | 0.69096 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/tests.py
|
"""
Test module for Fast Downward driver script. Run with
py.test driver/tests.py
"""
import os
import subprocess
import sys
import pytest
from .aliases import ALIASES, PORTFOLIOS
from .arguments import EXAMPLES
from . import limits
from . import returncodes
from .util import REPO_ROOT_DIR, find_domain_filename
def translate():
"""Create translated task."""
cmd = [sys.executable, "fast-downward.py", "--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def cleanup():
subprocess.check_call([sys.executable, "fast-downward.py", "--cleanup"],
cwd=REPO_ROOT_DIR)
def run_driver(parameters):
cleanup()
translate()
cmd = [sys.executable, "fast-downward.py"] + parameters
return subprocess.check_call(cmd, cwd=REPO_ROOT_DIR)
def test_commandline_args():
for description, cmd in EXAMPLES:
parameters = [x.strip('"') for x in cmd]
run_driver(parameters)
def test_aliases():
for alias, config in ALIASES.items():
parameters = ["--alias", alias, "output.sas"]
run_driver(parameters)
def test_show_aliases():
run_driver(["--show-aliases"])
def test_portfolios():
for name, portfolio in PORTFOLIOS.items():
parameters = ["--portfolio", portfolio,
"--search-time-limit", "30m", "output.sas"]
run_driver(parameters)
@pytest.mark.skipif(not limits.can_set_time_limit(), reason="Cannot set time limits on this system")
def test_hard_time_limit():
def preexec_fn():
limits.set_time_limit(10)
driver = [sys.executable, "fast-downward.py"]
parameters = [
"--translate", "--translate-time-limit",
"10s", "misc/tests/benchmarks/gripper/prob01.pddl"]
subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
parameters = [
"--translate", "--translate-time-limit",
"20s", "misc/tests/benchmarks/gripper/prob01.pddl"]
with pytest.raises(subprocess.CalledProcessError) as exception_info:
subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
assert exception_info.value.returncode == returncodes.DRIVER_INPUT_ERROR
def test_automatic_domain_file_name_computation():
benchmarks_dir = os.path.join(REPO_ROOT_DIR, "benchmarks")
for dirpath, dirnames, filenames in os.walk(benchmarks_dir):
for filename in filenames:
if "domain" not in filename:
assert find_domain_filename(os.path.join(dirpath, filename))
| 2,596 |
Python
| 28.850574 | 100 | 0.665254 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/arguments.py
|
import argparse
import os.path
import re
import sys
from . import aliases
from . import returncodes
from . import util
DESCRIPTION = """Fast Downward driver script.
Input files can be either a PDDL problem file (with an optional PDDL domain
file), in which case the driver runs both planner components (translate and
search), or a SAS+ translator output file, in which case the driver runs just
the search component. You can override this default behaviour by selecting
components manually with the flags below. The first component to be run
determines the required input files:
--translate: [DOMAIN] PROBLEM
--search: TRANSLATE_OUTPUT
Arguments given before the specified input files are interpreted by the driver
script ("driver options"). Arguments given after the input files are passed on
to the planner components ("component options"). In exceptional cases where no
input files are needed, use "--" to separate driver from component options. In
even more exceptional cases where input files begin with "--", use "--" to
separate driver options from input files and also to separate input files from
component options.
By default, component options are passed to the search component. Use
"--translate-options" or "--search-options" within the component options to
override the default for the following options, until overridden again. (See
below for examples.)"""
LIMITS_HELP = """You can limit the time or memory for individual components
or the whole planner. The effective limit for each component is the minimum
of the component, overall, external soft, and external hard limits.
Limits are given in seconds or MiB. You can change the unit by using the
suffixes s, m, h and K, M, G.
By default, all limits are inactive. Only external limits (e.g. set with
ulimit) are respected.
Portfolios require that a time limit is in effect. Portfolio configurations
that exceed their time or memory limit are aborted, and the next
configuration is run."""
EXAMPLE_PORTFOLIO = os.path.relpath(
aliases.PORTFOLIOS["seq-opt-fdss-1"], start=util.REPO_ROOT_DIR)
EXAMPLES = [
("Translate and find a plan with A* + LM-Cut:",
["misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(lmcut())"']),
("Translate and run no search:",
["--translate",
"misc/tests/benchmarks/gripper/prob01.pddl"]),
("Run predefined configuration (LAMA-2011) on translated task:",
["--alias", "seq-sat-lama-2011", "output.sas"]),
("Run a portfolio on a translated task:",
["--portfolio", EXAMPLE_PORTFOLIO,
"--search-time-limit", "30m", "output.sas"]),
("Run the search component in debug mode (with assertions enabled) "
"and validate the resulting plan:",
["--debug", "output.sas", "--search", '"astar(ipdb())"']),
("Pass options to translator and search components:",
["misc/tests/benchmarks/gripper/prob01.pddl",
"--translate-options", "--full-encoding",
"--search-options", "--search", '"astar(lmcut())"']),
("Find a plan and validate it:",
["--validate",
"misc/tests/benchmarks/gripper/prob01.pddl",
"--search", '"astar(cegar())"']),
]
EPILOG = """component options:
--translate-options OPTION1 OPTION2 ...
--search-options OPTION1 OPTION2 ...
pass OPTION1 OPTION2 ... to specified planner component
(default: pass component options to search)
Examples:
%s
""" % "\n\n".join("%s\n%s" % (desc, " ".join([os.path.basename(sys.argv[0])] + parameters)) for desc, parameters in EXAMPLES)
COMPONENTS_PLUS_OVERALL = ["translate", "search", "validate", "overall"]
DEFAULT_SAS_FILE = "output.sas"
"""
Function to emulate the behavior of ArgumentParser.error, but with our
custom exit codes instead of 2.
"""
def print_usage_and_exit_with_driver_input_error(parser, msg):
parser.print_usage()
returncodes.exit_with_driver_input_error("{}: error: {}".format(os.path.basename(sys.argv[0]), msg))
class RawHelpFormatter(argparse.HelpFormatter):
"""Preserve newlines and spacing."""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _format_args(self, action, default_metavar):
"""Show explicit help for remaining args instead of "..."."""
if action.nargs == argparse.REMAINDER:
return "INPUT_FILE1 [INPUT_FILE2] [COMPONENT_OPTION ...]"
else:
return argparse.HelpFormatter._format_args(self, action, default_metavar)
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index
def _split_off_filenames(planner_args):
"""Given the list of arguments to be passed on to the planner
components, split it into a prefix of filenames and a suffix of
options. Returns a pair (filenames, options).
If a "--" separator is present, the last such separator serves as
the border between filenames and options. The separator itself is
not returned. (This implies that "--" can be a filename, but never
an option to a planner component.)
If no such separator is present, the first argument that begins
with "-" and consists of at least two characters starts the list
of options, and all previous arguments are filenames."""
if "--" in planner_args:
separator_pos = _rindex(planner_args, "--")
num_filenames = separator_pos
del planner_args[separator_pos]
else:
num_filenames = 0
for arg in planner_args:
# We treat "-" by itself as a filename because by common
# convention it denotes stdin or stdout, and we might want
# to support this later.
if arg.startswith("-") and arg != "-":
break
num_filenames += 1
return planner_args[:num_filenames], planner_args[num_filenames:]
def _split_planner_args(parser, args):
"""Partition args.planner_args, the list of arguments for the
planner components, into args.filenames, args.translate_options
and args.search_options. Modifies args directly and removes the original
args.planner_args list."""
args.filenames, options = _split_off_filenames(args.planner_args)
args.translate_options = []
args.search_options = []
curr_options = args.search_options
for option in options:
if option == "--translate-options":
curr_options = args.translate_options
elif option == "--search-options":
curr_options = args.search_options
else:
curr_options.append(option)
def _check_mutex_args(parser, args, required=False):
for pos, (name1, is_specified1) in enumerate(args):
for name2, is_specified2 in args[pos + 1:]:
if is_specified1 and is_specified2:
print_usage_and_exit_with_driver_input_error(
parser, "cannot combine %s with %s" % (name1, name2))
if required and not any(is_specified for _, is_specified in args):
print_usage_and_exit_with_driver_input_error(
parser, "exactly one of {%s} has to be specified" %
", ".join(name for name, _ in args))
def _looks_like_search_input(filename):
with open(filename) as input_file:
first_line = next(input_file, "").rstrip()
return first_line == "begin_version"
def _set_components_automatically(parser, args):
"""Guess which planner components to run based on the specified
filenames and set args.components accordingly. Currently
implements some simple heuristics:
1. If there is exactly one input file and it looks like a
Fast-Downward-generated file, run search only.
2. Otherwise, run all components."""
if len(args.filenames) == 1 and _looks_like_search_input(args.filenames[0]):
args.components = ["search"]
else:
args.components = ["translate", "search"]
def _set_components_and_inputs(parser, args):
"""Set args.components to the planner components to be run and set
args.translate_inputs and args.search_input to the correct input
filenames.
Rules:
1. If any --run-xxx option is specified, then the union
of the specified components is run.
2. If nothing is specified, use automatic rules. See
separate function."""
args.components = []
if args.translate or args.run_all:
args.components.append("translate")
if args.search or args.run_all:
args.components.append("search")
if not args.components:
_set_components_automatically(parser, args)
# We implicitly activate validation in debug mode. However, for
# validation we need the PDDL input files and a plan, therefore both
# components must be active.
if args.validate or (args.debug and len(args.components) == 2):
args.components.append("validate")
args.translate_inputs = []
assert args.components
first = args.components[0]
num_files = len(args.filenames)
# When passing --help to any of the components (or -h to the
# translator), we don't require input filenames and silently
# swallow any that are provided. This is undocumented to avoid
# cluttering the driver's --help output.
if first == "translate":
if "--help" in args.translate_options or "-h" in args.translate_options:
args.translate_inputs = []
elif num_files == 1:
task_file, = args.filenames
domain_file = util.find_domain_filename(task_file)
args.translate_inputs = [domain_file, task_file]
elif num_files == 2:
args.translate_inputs = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "translator needs one or two input files")
elif first == "search":
if "--help" in args.search_options:
args.search_input = None
elif num_files == 1:
args.search_input, = args.filenames
else:
print_usage_and_exit_with_driver_input_error(
parser, "search needs exactly one input file")
else:
assert False, first
def _set_translator_output_options(parser, args):
if any("--sas-file" in opt for opt in args.translate_options):
print_usage_and_exit_with_driver_input_error(
parser, "Cannot pass the \"--sas-file\" option to translate.py from the "
"fast-downward.py script. Pass it directly to fast-downward.py instead.")
args.search_input = args.sas_file
args.translate_options += ["--sas-file", args.search_input]
def _get_time_limit_in_seconds(limit, parser):
match = re.match(r"^(\d+)(s|m|h)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed time limit parameter: {}".format(limit))
time = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "m":
time *= 60
elif suffix == "h":
time *= 3600
return time
def _get_memory_limit_in_bytes(limit, parser):
match = re.match(r"^(\d+)(k|m|g)?$", limit, flags=re.I)
if not match:
print_usage_and_exit_with_driver_input_error(parser, "malformed memory limit parameter: {}".format(limit))
memory = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "k":
memory *= 1024
elif suffix is None or suffix == "m":
memory *= 1024 * 1024
elif suffix == "g":
memory *= 1024 * 1024 * 1024
return memory
def set_time_limit_in_seconds(parser, args, component):
param = component + "_time_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_time_limit_in_seconds(limit, parser))
def set_memory_limit_in_bytes(parser, args, component):
param = component + "_memory_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_memory_limit_in_bytes(limit, parser))
def _convert_limits_to_ints(parser, args):
for component in COMPONENTS_PLUS_OVERALL:
set_time_limit_in_seconds(parser, args, component)
set_memory_limit_in_bytes(parser, args, component)
def parse_args():
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=RawHelpFormatter,
add_help=False)
help_options = parser.add_argument_group(
title=("driver options that show information and exit "
"(don't run planner)"))
# We manually add the help option because we want to control
# how it is grouped in the output.
help_options.add_argument(
"-h", "--help",
action="help", default=argparse.SUPPRESS,
help="show this help message and exit")
help_options.add_argument(
"-v", "--version", action="store_true",
help="print version number and exit")
help_options.add_argument(
"--show-aliases", action="store_true",
help="show the known aliases (see --alias) and exit")
components = parser.add_argument_group(
title=("driver options selecting the planner components to be run\n"
"(may select several; default: auto-select based on input file(s))"))
components.add_argument(
"--run-all", action="store_true",
help="run all components of the planner")
components.add_argument(
"--translate", action="store_true",
help="run translator component")
components.add_argument(
"--search", action="store_true",
help="run search component")
limits = parser.add_argument_group(
title="time and memory limits", description=LIMITS_HELP)
for component in COMPONENTS_PLUS_OVERALL:
limits.add_argument("--{}-time-limit".format(component))
limits.add_argument("--{}-memory-limit".format(component))
driver_other = parser.add_argument_group(
title="other driver options")
driver_other.add_argument(
"--alias",
help="run a config with an alias (e.g. seq-sat-lama-2011)")
driver_other.add_argument(
"--build",
help="BUILD can be a predefined build name like release "
"(default) and debug, a custom build name, or the path to "
"a directory holding the planner binaries. The driver "
"first looks for the planner binaries under 'BUILD'. If "
"this path does not exist, it tries the directory "
"'<repo>/builds/BUILD/bin', where the build script creates "
"them by default.")
driver_other.add_argument(
"--debug", action="store_true",
help="alias for --build=debug --validate")
driver_other.add_argument(
"--validate", action="store_true",
help='validate plans (implied by --debug); needs "validate" (VAL) on PATH')
driver_other.add_argument(
"--log-level", choices=["debug", "info", "warning"],
default="info",
help="set log level (most verbose: debug; least verbose: warning; default: %(default)s)")
driver_other.add_argument(
"--plan-file", metavar="FILE", default="sas_plan",
help="write plan(s) to FILE (default: %(default)s; anytime configurations append .1, .2, ...)")
driver_other.add_argument(
"--sas-file", metavar="FILE",
help="intermediate file for storing the translator output "
"(implies --keep-sas-file, default: {})".format(DEFAULT_SAS_FILE))
driver_other.add_argument(
"--keep-sas-file", action="store_true",
help="keep translator output file (implied by --sas-file, default: "
"delete file if translator and search component are active)")
driver_other.add_argument(
"--portfolio", metavar="FILE",
help="run a portfolio specified in FILE")
driver_other.add_argument(
"--portfolio-bound", metavar="VALUE", default=None, type=int,
help="exclusive bound on plan costs (only supported for satisficing portfolios)")
driver_other.add_argument(
"--portfolio-single-plan", action="store_true",
help="abort satisficing portfolio after finding the first plan")
driver_other.add_argument(
"--cleanup", action="store_true",
help="clean up temporary files (translator output and plan files) and exit")
parser.add_argument(
"planner_args", nargs=argparse.REMAINDER,
help="file names and options passed on to planner components")
# Using argparse.REMAINDER relies on the fact that the first
# argument that doesn't belong to the driver doesn't look like an
# option, i.e., doesn't start with "-". This is usually satisfied
# because the argument is a filename; in exceptional cases, "--"
# can be used as an explicit separator. For example, "./fast-downward.py --
# --help" passes "--help" to the search code.
args = parser.parse_args()
if args.sas_file:
args.keep_sas_file = True
else:
args.sas_file = DEFAULT_SAS_FILE
if args.build and args.debug:
print_usage_and_exit_with_driver_input_error(
parser, "The option --debug is an alias for --build=debug "
"--validate. Do no specify both --debug and --build.")
if not args.build:
if args.debug:
args.build = "debug"
else:
args.build = "release"
_split_planner_args(parser, args)
_check_mutex_args(parser, [
("--alias", args.alias is not None),
("--portfolio", args.portfolio is not None),
("options for search component", bool(args.search_options))])
_set_translator_output_options(parser, args)
_convert_limits_to_ints(parser, args)
if args.alias:
try:
aliases.set_options_for_alias(args.alias, args)
except KeyError:
print_usage_and_exit_with_driver_input_error(
parser, "unknown alias: %r" % args.alias)
if args.portfolio_bound is not None and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound may only be used for portfolios.")
if args.portfolio_bound is not None and args.portfolio_bound < 0:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-bound must not be negative.")
if args.portfolio_single_plan and not args.portfolio:
print_usage_and_exit_with_driver_input_error(
parser, "--portfolio-single-plan may only be used for portfolios.")
if not args.version and not args.show_aliases and not args.cleanup:
_set_components_and_inputs(parser, args)
if "translate" not in args.components or "search" not in args.components:
args.keep_sas_file = True
return args
| 19,013 |
Python
| 38.448133 | 125 | 0.651659 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/__init__.py
|
from .version import __version__
| 33 |
Python
| 15.999992 | 32 | 0.727273 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolio_runner.py
|
""" Module for running planner portfolios.
Memory limits: We apply the same memory limit that is given to the
plan script to each planner call. Note that this setup does not work if
the sum of the memory usage of the Python process and the planner calls
is limited. In this case the Python process might get killed although
we would like to kill only the single planner call and continue with
the remaining configurations. If we ever want to support this scenario
we will have to reduce the memory limit of the planner calls by the
amount of memory that the Python process needs. On maia for example
this amounts to 128MB of reserved virtual memory. We can make Python
reserve less space by lowering the soft limit for virtual memory before
the process is started.
"""
__all__ = ["run"]
import subprocess
import sys
from . import call
from . import limits
from . import returncodes
from . import util
DEFAULT_TIMEOUT = 1800
def adapt_heuristic_cost_type(arg, cost_type):
if cost_type == "normal":
transform = "no_transform()"
else:
transform = "adapt_costs({})".format(cost_type)
return arg.replace("H_COST_TRANSFORM", transform)
def adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager):
g_bound = plan_manager.get_next_portfolio_cost_bound()
plan_counter = plan_manager.get_plan_counter()
print("g bound: %s" % g_bound)
print("next plan number: %d" % (plan_counter + 1))
for index, arg in enumerate(args):
if arg == "--evaluator" or arg == "--heuristic":
heuristic = args[index + 1]
heuristic = adapt_heuristic_cost_type(heuristic, heuristic_cost_type)
args[index + 1] = heuristic
elif arg == "--search":
search = args[index + 1]
if "bound=BOUND" not in search:
returncodes.exit_with_driver_critical_error(
"Satisficing portfolios need the string "
"\"bound=BOUND\" in each search configuration. "
"See the FDSS portfolios for examples.")
for name, value in [
("BOUND", g_bound),
("S_COST_TYPE", search_cost_type)]:
search = search.replace(name, str(value))
search = adapt_heuristic_cost_type(search, heuristic_cost_type)
args[index + 1] = search
break
def run_search(executable, args, sas_file, plan_manager, time, memory):
complete_args = [executable] + args + [
"--internal-plan-file", plan_manager.get_plan_prefix()]
print("args: %s" % complete_args)
try:
exitcode = call.check_call(
"search", complete_args, stdin=sas_file,
time_limit=time, memory_limit=memory)
except subprocess.CalledProcessError as err:
exitcode = err.returncode
print("exitcode: %d" % exitcode)
print()
return exitcode
def compute_run_time(timeout, configs, pos):
remaining_time = timeout - util.get_elapsed_time()
print("remaining time: {}".format(remaining_time))
relative_time = configs[pos][0]
remaining_relative_time = sum(config[0] for config in configs[pos:])
print("config {}: relative time {}, remaining {}".format(
pos, relative_time, remaining_relative_time))
# For the last config we have relative_time == remaining_relative_time, so
# we use all of the remaining time at the end.
return remaining_time * relative_time / remaining_relative_time
def run_sat_config(configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory):
run_time = compute_run_time(timeout, configs, pos)
if run_time <= 0:
return None
_, args_template = configs[pos]
args = list(args_template)
adapt_args(args, search_cost_type, heuristic_cost_type, plan_manager)
if not plan_manager.abort_portfolio_after_first_plan():
args.extend([
"--internal-previous-portfolio-plans",
str(plan_manager.get_plan_counter())])
result = run_search(executable, args, sas_file, plan_manager, run_time, memory)
plan_manager.process_new_plans()
return result
def run_sat(configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory):
# If the configuration contains S_COST_TYPE or H_COST_TRANSFORM and the task
# has non-unit costs, we start by treating all costs as one. When we find
# a solution, we rerun the successful config with real costs.
heuristic_cost_type = "one"
search_cost_type = "one"
changed_cost_types = False
while configs:
configs_next_round = []
for pos, (relative_time, args) in enumerate(configs):
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if exitcode == returncodes.SUCCESS:
if plan_manager.abort_portfolio_after_first_plan():
return
configs_next_round.append((relative_time, args))
if (not changed_cost_types and can_change_cost_type(args) and
plan_manager.get_problem_type() == "general cost"):
print("Switch to real costs and repeat last run.")
changed_cost_types = True
search_cost_type = "normal"
heuristic_cost_type = "plusone"
exitcode = run_sat_config(
configs, pos, search_cost_type, heuristic_cost_type,
executable, sas_file, plan_manager, timeout, memory)
if exitcode is None:
return
yield exitcode
if exitcode == returncodes.SEARCH_UNSOLVABLE:
return
if final_config_builder:
print("Build final config.")
final_config = final_config_builder(args)
break
if final_config:
break
# Only run the successful configs in the next round.
configs = configs_next_round
if final_config:
print("Abort portfolio and run final config.")
exitcode = run_sat_config(
[(1, final_config)], 0, search_cost_type,
heuristic_cost_type, executable, sas_file, plan_manager,
timeout, memory)
if exitcode is not None:
yield exitcode
def run_opt(configs, executable, sas_file, plan_manager, timeout, memory):
for pos, (relative_time, args) in enumerate(configs):
run_time = compute_run_time(timeout, configs, pos)
exitcode = run_search(executable, args, sas_file, plan_manager,
run_time, memory)
yield exitcode
if exitcode in [returncodes.SUCCESS, returncodes.SEARCH_UNSOLVABLE]:
break
def can_change_cost_type(args):
return any("S_COST_TYPE" in part or "H_COST_TRANSFORM" in part for part in args)
def get_portfolio_attributes(portfolio):
attributes = {}
with open(portfolio, "rb") as portfolio_file:
content = portfolio_file.read()
try:
exec(content, attributes)
except Exception:
returncodes.exit_with_driver_critical_error(
"The portfolio %s could not be loaded. Maybe it still "
"uses the old portfolio syntax? See the FDSS portfolios "
"for examples using the new syntax." % portfolio)
if "CONFIGS" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define CONFIGS")
if "OPTIMAL" not in attributes:
returncodes.exit_with_driver_critical_error("portfolios must define OPTIMAL")
return attributes
def run(portfolio, executable, sas_file, plan_manager, time, memory):
"""
Run the configs in the given portfolio file.
The portfolio is allowed to run for at most *time* seconds and may
use a maximum of *memory* bytes.
"""
attributes = get_portfolio_attributes(portfolio)
configs = attributes["CONFIGS"]
optimal = attributes["OPTIMAL"]
final_config = attributes.get("FINAL_CONFIG")
final_config_builder = attributes.get("FINAL_CONFIG_BUILDER")
if "TIMEOUT" in attributes:
returncodes.exit_with_driver_input_error(
"The TIMEOUT attribute in portfolios has been removed. "
"Please pass a time limit to fast-downward.py.")
if time is None:
if sys.platform == "win32":
returncodes.exit_with_driver_unsupported_error(limits.CANNOT_LIMIT_TIME_MSG)
else:
returncodes.exit_with_driver_input_error(
"Portfolios need a time limit. Please pass --search-time-limit "
"or --overall-time-limit to fast-downward.py.")
timeout = util.get_elapsed_time() + time
if optimal:
exitcodes = run_opt(
configs, executable, sas_file, plan_manager, timeout, memory)
else:
exitcodes = run_sat(
configs, executable, sas_file, plan_manager, final_config,
final_config_builder, timeout, memory)
return returncodes.generate_portfolio_exitcode(list(exitcodes))
| 9,492 |
Python
| 38.719665 | 88 | 0.623894 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/version.py
|
# This file is auto-generated by the scripts in misc/release.
# Do not modify it.
__version__ = "20.06+"
| 106 |
Python
| 20.399996 | 61 | 0.679245 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/aliases.py
|
import os
from .util import DRIVER_DIR
PORTFOLIO_DIR = os.path.join(DRIVER_DIR, "portfolios")
ALIASES = {}
ALIASES["seq-sat-fd-autotune-1"] = [
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--evaluator", "hcea=cea()",
"--evaluator", "hcg=cg(transform=adapt_costs(plusone))",
"--evaluator", "hgc=goalcount()",
"--evaluator", "hAdd=add()",
"--search", """iterated([
lazy(alt([single(sum([g(),weight(hff,10)])),
single(sum([g(),weight(hff,10)]),pref_only=true)],
boost=2000),
preferred=[hff],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hAdd,7)])),
single(sum([g(),weight(hAdd,7)]),pref_only=true),
single(sum([g(),weight(hcg,7)])),
single(sum([g(),weight(hcg,7)]),pref_only=true),
single(sum([g(),weight(hcea,7)])),
single(sum([g(),weight(hcea,7)]),pref_only=true),
single(sum([g(),weight(hgc,7)])),
single(sum([g(),weight(hgc,7)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([tiebreaking([sum([g(),weight(hAdd,3)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,3)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,3)]),hcg]),
tiebreaking([sum([g(),weight(hcg,3)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,3)]),hcea]),
tiebreaking([sum([g(),weight(hcea,3)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,3)]),hgc]),
tiebreaking([sum([g(),weight(hgc,3)]),hgc],pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=false,cost_type=normal),
eager(alt([tiebreaking([sum([g(),weight(hAdd,10)]),hAdd]),
tiebreaking([sum([g(),weight(hAdd,10)]),hAdd],pref_only=true),
tiebreaking([sum([g(),weight(hcg,10)]),hcg]),
tiebreaking([sum([g(),weight(hcg,10)]),hcg],pref_only=true),
tiebreaking([sum([g(),weight(hcea,10)]),hcea]),
tiebreaking([sum([g(),weight(hcea,10)]),hcea],pref_only=true),
tiebreaking([sum([g(),weight(hgc,10)]),hgc]),
tiebreaking([sum([g(),weight(hgc,10)]),hgc],pref_only=true)],
boost=500),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal)
],repeat_last=true,continue_on_fail=true)"""]
ALIASES["seq-sat-fd-autotune-2"] = [
"--evaluator", "hcea=cea(transform=adapt_costs(plusone))",
"--evaluator", "hcg=cg(transform=adapt_costs(one))",
"--evaluator", "hgc=goalcount(transform=adapt_costs(plusone))",
"--evaluator", "hff=ff()",
"--search", """iterated([
ehc(hcea,preferred=[hcea],preferred_usage=0,cost_type=normal),
lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),
single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcg,3)])),
single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hcea,3)])),
single(sum([weight(g(),2),weight(hcea,3)]),pref_only=true),
single(sum([weight(g(),2),weight(hgc,3)])),
single(sum([weight(g(),2),weight(hgc,3)]),pref_only=true)],
boost=200),
preferred=[hcea,hgc],reopen_closed=false,cost_type=one),
lazy(alt([single(sum([g(),weight(hff,5)])),
single(sum([g(),weight(hff,5)]),pref_only=true),
single(sum([g(),weight(hcg,5)])),
single(sum([g(),weight(hcg,5)]),pref_only=true),
single(sum([g(),weight(hcea,5)])),
single(sum([g(),weight(hcea,5)]),pref_only=true),
single(sum([g(),weight(hgc,5)])),
single(sum([g(),weight(hgc,5)]),pref_only=true)],
boost=5000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=normal),
lazy(alt([single(sum([g(),weight(hff,2)])),
single(sum([g(),weight(hff,2)]),pref_only=true),
single(sum([g(),weight(hcg,2)])),
single(sum([g(),weight(hcg,2)]),pref_only=true),
single(sum([g(),weight(hcea,2)])),
single(sum([g(),weight(hcea,2)]),pref_only=true),
single(sum([g(),weight(hgc,2)])),
single(sum([g(),weight(hgc,2)]),pref_only=true)],
boost=1000),
preferred=[hcea,hgc],reopen_closed=true,cost_type=one)
],repeat_last=true,continue_on_fail=true)"""]
def _get_lama(**kwargs):
return [
"--if-unit-cost",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref={pref})".format(**kwargs),
"--evaluator", "hff=ff()",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref={pref})".format(**kwargs),
"--evaluator", "hff1=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref={pref})".format(**kwargs),
"--evaluator", "hff2=ff(transform=adapt_costs(plusone))",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
# Append --always to be on the safe side if we want to append
# additional options later.
"--always"]
ALIASES["seq-sat-lama-2011"] = _get_lama(pref="true")
ALIASES["lama"] = _get_lama(pref="false")
ALIASES["lama-first"] = [
"--evaluator",
"hlm=lmcount(lm_factory=lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]
ALIASES["seq-opt-bjolp"] = [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]
ALIASES["seq-opt-lmcut"] = [
"--search", "astar(lmcut())"]
PORTFOLIOS = {}
for portfolio in os.listdir(PORTFOLIO_DIR):
name, ext = os.path.splitext(portfolio)
assert ext == ".py", portfolio
PORTFOLIOS[name.replace("_", "-")] = os.path.join(PORTFOLIO_DIR, portfolio)
def show_aliases():
for alias in sorted(list(ALIASES) + list(PORTFOLIOS)):
print(alias)
def set_options_for_alias(alias_name, args):
"""
If alias_name is an alias for a configuration, set args.search_options
to the corresponding command-line arguments. If it is an alias for a
portfolio, set args.portfolio to the path to the portfolio file.
Otherwise raise KeyError.
"""
assert not args.search_options
assert not args.portfolio
if alias_name in ALIASES:
args.search_options = [x.replace(" ", "").replace("\n", "")
for x in ALIASES[alias_name]]
elif alias_name in PORTFOLIOS:
args.portfolio = PORTFOLIOS[alias_name]
else:
raise KeyError(alias_name)
| 7,900 |
Python
| 43.892045 | 119 | 0.567975 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/run_components.py
|
import errno
import logging
import os.path
import subprocess
import sys
from . import call
from . import limits
from . import portfolio_runner
from . import returncodes
from . import util
from .plan_manager import PlanManager
# TODO: We might want to turn translate into a module and call it with "python3 -m translate".
REL_TRANSLATE_PATH = os.path.join("translate", "translate.py")
if os.name == "posix":
REL_SEARCH_PATH = "downward"
VALIDATE = "validate"
elif os.name == "nt":
REL_SEARCH_PATH = "downward.exe"
VALIDATE = "validate.exe"
else:
returncodes.exit_with_driver_unsupported_error("Unsupported OS: " + os.name)
def get_executable(build, rel_path):
# First, consider 'build' to be a path directly to the binaries.
# The path can be absolute or relative to the current working
# directory.
build_dir = build
if not os.path.exists(build_dir):
# If build is not a full path to the binaries, it might be the
# name of a build in our standard directory structure.
# in this case, the binaries are in
# '<repo-root>/builds/<buildname>/bin'.
build_dir = os.path.join(util.BUILDS_DIR, build, "bin")
if not os.path.exists(build_dir):
returncodes.exit_with_driver_input_error(
"Could not find build '{build}' at {build_dir}. "
"Please run './build.py {build}'.".format(**locals()))
abs_path = os.path.join(build_dir, rel_path)
if not os.path.exists(abs_path):
returncodes.exit_with_driver_input_error(
"Could not find '{rel_path}' in build '{build}'. "
"Please run './build.py {build}'.".format(**locals()))
return abs_path
def run_translate(args):
logging.info("Running translator.")
time_limit = limits.get_time_limit(
args.translate_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.translate_memory_limit, args.overall_memory_limit)
translate = get_executable(args.build, REL_TRANSLATE_PATH)
assert sys.executable, "Path to interpreter could not be found"
cmd = [sys.executable] + [translate] + args.translate_inputs + args.translate_options
stderr, returncode = call.get_error_output_and_returncode(
"translator",
cmd,
time_limit=time_limit,
memory_limit=memory_limit)
# We collect stderr of the translator and print it here, unless
# the translator ran out of memory and all output in stderr is
# related to MemoryError.
do_print_on_stderr = True
if returncode == returncodes.TRANSLATE_OUT_OF_MEMORY:
output_related_to_memory_error = True
if not stderr:
output_related_to_memory_error = False
for line in stderr.splitlines():
if "MemoryError" not in line:
output_related_to_memory_error = False
break
if output_related_to_memory_error:
do_print_on_stderr = False
if do_print_on_stderr and stderr:
returncodes.print_stderr(stderr)
if returncode == 0:
return (0, True)
elif returncode == 1:
# Unlikely case that the translator crashed without raising an
# exception.
return (returncodes.TRANSLATE_CRITICAL_ERROR, False)
else:
# Pass on any other exit code, including in particular signals or
# exit codes such as running out of memory or time.
return (returncode, False)
def run_search(args):
logging.info("Running search (%s)." % args.build)
time_limit = limits.get_time_limit(
args.search_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(
args.search_memory_limit, args.overall_memory_limit)
executable = get_executable(args.build, REL_SEARCH_PATH)
plan_manager = PlanManager(
args.plan_file,
portfolio_bound=args.portfolio_bound,
single_plan=args.portfolio_single_plan)
plan_manager.delete_existing_plans()
if args.portfolio:
assert not args.search_options
logging.info("search portfolio: %s" % args.portfolio)
return portfolio_runner.run(
args.portfolio, executable, args.search_input, plan_manager,
time_limit, memory_limit)
else:
if not args.search_options:
returncodes.exit_with_driver_input_error(
"search needs --alias, --portfolio, or search options")
if "--help" not in args.search_options:
args.search_options.extend(["--internal-plan-file", args.plan_file])
try:
call.check_call(
"search",
[executable] + args.search_options,
stdin=args.search_input,
time_limit=time_limit,
memory_limit=memory_limit)
except subprocess.CalledProcessError as err:
# TODO: if we ever add support for SEARCH_PLAN_FOUND_AND_* directly
# in the planner, this assertion no longer holds. Furthermore, we
# would need to return (err.returncode, True) if the returncode is
# in [0..10].
# Negative exit codes are allowed for passing out signals.
assert err.returncode >= 10 or err.returncode < 0, "got returncode < 10: {}".format(err.returncode)
return (err.returncode, False)
else:
return (0, True)
def run_validate(args):
logging.info("Running validate.")
num_files = len(args.filenames)
if num_files == 1:
task, = args.filenames
domain = util.find_domain_filename(task)
elif num_files == 2:
domain, task = args.filenames
else:
returncodes.exit_with_driver_input_error("validate needs one or two PDDL input files.")
plan_files = list(PlanManager(args.plan_file).get_existing_plans())
if not plan_files:
print("Not running validate since no plans found.")
return (0, True)
validate_inputs = [domain, task] + plan_files
try:
call.check_call(
"validate",
[VALIDATE] + validate_inputs,
time_limit=args.validate_time_limit,
memory_limit=args.validate_memory_limit)
except OSError as err:
if err.errno == errno.ENOENT:
returncodes.exit_with_driver_input_error("Error: {} not found. Is it on the PATH?".format(VALIDATE))
else:
returncodes.exit_with_driver_critical_error(err)
else:
return (0, True)
| 6,502 |
Python
| 36.808139 | 112 | 0.633651 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/call.py
|
"""Make subprocess calls with time and memory limits."""
from . import limits
from . import returncodes
import logging
import os
import subprocess
import sys
try:
from shlex import quote
except ImportError:
from pipes import quote
def print_call_settings(nick, cmd, stdin, time_limit, memory_limit):
if stdin is not None:
stdin = quote(stdin)
logging.info("{} stdin: {}".format(nick, stdin))
if time_limit is not None:
time_limit = str(time_limit) + "s"
logging.info("{} time limit: {}".format(nick, time_limit))
if memory_limit is not None:
memory_limit = int(limits.convert_to_mb(memory_limit))
memory_limit = str(memory_limit) + " MB"
logging.info("{} memory limit: {}".format(nick, memory_limit))
escaped_cmd = [quote(x) for x in cmd]
if stdin is not None:
escaped_cmd.extend(["<", quote(stdin)])
logging.info("{} command line string: {}".format(nick, " ".join(escaped_cmd)))
def _get_preexec_function(time_limit, memory_limit):
def set_limits():
def _try_or_exit(function, description):
def fail(exception, exitcode):
returncodes.print_stderr("{} failed: {}".format(description, exception))
os._exit(exitcode)
try:
function()
except NotImplementedError as err:
fail(err, returncodes.DRIVER_UNSUPPORTED)
except OSError as err:
fail(err, returncodes.DRIVER_CRITICAL_ERROR)
except ValueError as err:
fail(err, returncodes.DRIVER_INPUT_ERROR)
_try_or_exit(lambda: limits.set_time_limit(time_limit), "Setting time limit")
_try_or_exit(lambda: limits.set_memory_limit(memory_limit), "Setting memory limit")
if time_limit is None and memory_limit is None:
return None
else:
return set_limits
def check_call(nick, cmd, stdin=None, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, stdin, time_limit, memory_limit)
kwargs = {"preexec_fn": _get_preexec_function(time_limit, memory_limit)}
sys.stdout.flush()
if stdin:
with open(stdin) as stdin_file:
return subprocess.check_call(cmd, stdin=stdin_file, **kwargs)
else:
return subprocess.check_call(cmd, **kwargs)
def get_error_output_and_returncode(nick, cmd, time_limit=None, memory_limit=None):
print_call_settings(nick, cmd, None, time_limit, memory_limit)
preexec_fn = _get_preexec_function(time_limit, memory_limit)
sys.stdout.flush()
p = subprocess.Popen(cmd, preexec_fn=preexec_fn, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr, p.returncode
| 2,720 |
Python
| 32.592592 | 91 | 0.643382 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/returncodes.py
|
from __future__ import print_function
import sys
"""
We document Fast Downward exit codes at
http://www.fast-downward.org/ExitCodes. Please update this documentation when
making changes below.
"""
SUCCESS = 0
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY = 1
SEARCH_PLAN_FOUND_AND_OUT_OF_TIME = 2
SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME = 3
TRANSLATE_UNSOLVABLE = 10
SEARCH_UNSOLVABLE = 11
SEARCH_UNSOLVED_INCOMPLETE = 12
TRANSLATE_OUT_OF_MEMORY = 20
TRANSLATE_OUT_OF_TIME = 21
SEARCH_OUT_OF_MEMORY = 22
SEARCH_OUT_OF_TIME = 23
SEARCH_OUT_OF_MEMORY_AND_TIME = 24
TRANSLATE_CRITICAL_ERROR = 30
TRANSLATE_INPUT_ERROR = 31
SEARCH_CRITICAL_ERROR = 32
SEARCH_INPUT_ERROR = 33
SEARCH_UNSUPPORTED = 34
DRIVER_CRITICAL_ERROR = 35
DRIVER_INPUT_ERROR = 36
DRIVER_UNSUPPORTED = 37
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def is_unrecoverable(exitcode):
# Exit codes in the range from 30 to 39 represent unrecoverable failures.
return 30 <= exitcode < 40
def exit_with_driver_critical_error(msg):
print_stderr(msg)
sys.exit(DRIVER_CRITICAL_ERROR)
def exit_with_driver_input_error(msg):
print_stderr(msg)
sys.exit(DRIVER_INPUT_ERROR)
def exit_with_driver_unsupported_error(msg):
print_stderr(msg)
sys.exit(DRIVER_UNSUPPORTED)
def generate_portfolio_exitcode(exitcodes):
"""A portfolio's exitcode is determined as follows:
There is exactly one type of unexpected exit code -> use it.
There are multiple types of unexpected exit codes -> SEARCH_CRITICAL_ERROR.
[..., SUCCESS, ...] -> SUCCESS
[..., SEARCH_UNSOLVABLE, ...] -> SEARCH_UNSOLVABLE
[..., SEARCH_UNSOLVED_INCOMPLETE, ...] -> SEARCH_UNSOLVED_INCOMPLETE
[..., SEARCH_OUT_OF_MEMORY, ..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_MEMORY_AND_TIME
[..., SEARCH_OUT_OF_TIME, ...] -> SEARCH_OUT_OF_TIME
[..., SEARCH_OUT_OF_MEMORY, ...] -> SEARCH_OUT_OF_MEMORY
"""
print("Exit codes: {}".format(exitcodes))
exitcodes = set(exitcodes)
unrecoverable_codes = [code for code in exitcodes if is_unrecoverable(code)]
# There are unrecoverable exit codes.
if unrecoverable_codes:
print("Error: Unexpected exit codes: {}".format(unrecoverable_codes))
if len(unrecoverable_codes) == 1:
return (unrecoverable_codes[0], False)
else:
return (SEARCH_CRITICAL_ERROR, False)
# At least one plan was found.
if SUCCESS in exitcodes:
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY_AND_TIME, True)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_MEMORY, True)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_PLAN_FOUND_AND_OUT_OF_TIME, True)
else:
return (SUCCESS, True)
# A config proved unsolvability or did not find a plan.
for code in [SEARCH_UNSOLVABLE, SEARCH_UNSOLVED_INCOMPLETE]:
if code in exitcodes:
return (code, False)
# No plan was found due to hitting resource limits.
if SEARCH_OUT_OF_MEMORY in exitcodes and SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_MEMORY_AND_TIME, False)
elif SEARCH_OUT_OF_MEMORY in exitcodes:
return (SEARCH_OUT_OF_MEMORY, False)
elif SEARCH_OUT_OF_TIME in exitcodes:
return (SEARCH_OUT_OF_TIME, False)
assert False, "Error: Unhandled exit codes: {}".format(exitcodes)
| 3,500 |
Python
| 31.119266 | 94 | 0.681143 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/cleanup.py
|
from itertools import count
import os
def _try_remove(f):
try:
os.remove(f)
except OSError:
return False
return True
def cleanup_temporary_files(args):
_try_remove(args.sas_file)
_try_remove(args.plan_file)
for i in count(1):
if not _try_remove("%s.%s" % (args.plan_file, i)):
break
| 346 |
Python
| 18.277777 | 58 | 0.595376 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2.py
|
OPTIMAL = False
CONFIGS = [
# eager_greedy_ff
(330, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff
(411, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea
(213, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea
(57, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_add
(204, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cg
(208, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cg
(109, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_add
(63, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
def FINAL_CONFIG_BUILDER(successful_args):
# This assumes that CONFIGS only contains "simple" configurations.
new_args = list(successful_args)
for pos, arg in enumerate(successful_args):
if arg == "--search":
orig_search = successful_args[pos + 1]
sub_searches = []
for weight in (5, 3, 2, 1):
if orig_search.startswith("lazy"):
sub_search = \
"lazy_wastar([h],preferred=[h],w=%d,cost_type=S_COST_TYPE)" % weight
else:
sub_search = \
"eager(single(sum([g(),weight(h,%d)])),preferred=[h],cost_type=S_COST_TYPE)" % weight
sub_searches.append(sub_search)
sub_search_string = ",".join(sub_searches)
new_search = "iterated([%s],bound=BOUND,repeat_last=true)" % sub_search_string
new_args[pos + 1] = new_search
break
return new_args
| 2,502 |
Python
| 42.155172 | 109 | 0.551159 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2018.py
|
"""
This is the "Fast Downward Stone Soup 2018" sequential portfolio that participated in the IPC 2018
satisficing and bounded-cost tracks. For more information, see the planner abstract:
Jendrik Seipp and Gabriele Röger.
Fast Downward Stone Soup 2018.
In Ninth International Planning Competition (IPC 2018), Deterministic Part, pp. 80-82. 2018.
https://ai.dmi.unibas.ch/papers/seipp-roeger-ipc2018.pdf
"""
OPTIMAL = False
CONFIGS = [
(26, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true),type_based([hff,g()])],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=false,bound=BOUND)"]),
(25, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=false)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=0),preferred=[hlm],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(135, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=false,preferred_successors_first=true,bound=BOUND)"]),
(59, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(23, [
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(hff),single(hff,pref_only=true),single(hlm),single(hlm,pref_only=true)],boost=1000),preferred=[hff,hlm],cost_type=one,reopen_closed=false,randomize_successors=true,preferred_successors_first=true,bound=BOUND)"]),
(57, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=false)",
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=1000),preferred=[hlm,hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(17, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(12, [
"--evaluator",
"hadd=add(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(hadd),single(hadd,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hadd,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true)],boost=2000),preferred=[hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true),single(hlm),single(hlm,pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=true,use_orders=true)",
"--evaluator",
"hcea=cea(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true),single(hcea),single(hcea,pref_only=true)],boost=0),preferred=[hlm,hcea],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(88, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=one,bound=BOUND)"]),
(8, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=100),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hgoalcount,10)])),single(sum([g(),weight(hgoalcount,10)]),pref_only=true)],boost=2000),preferred=[hff,hgoalcount],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(24, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"eager(alt([type_based([g()]),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=one,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=false,use_orders=true)",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--evaluator",
"hblind=blind()",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hblind,2)]),pref_only=true),single(sum([g(),weight(hlm,2)])),single(sum([g(),weight(hlm,2)]),pref_only=true),single(sum([g(),weight(hff,2)])),single(sum([g(),weight(hff,2)]),pref_only=true)],boost=4419),preferred=[hlm],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(30, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy_wastar([hff],w=3,preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(58, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=one,bound=BOUND)"]),
(26, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([single(sum([g(),weight(hblind,10)])),single(sum([g(),weight(hblind,10)]),pref_only=true),single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hcea,10)])),single(sum([g(),weight(hcea,10)]),pref_only=true)],boost=536),preferred=[hff],reopen_closed=false,bound=BOUND)"]),
(27, [
"--evaluator",
"hcea=cea(transform=adapt_costs(one))",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=one,bound=BOUND)"]),
(50, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([type_based([g()]),single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(29, [
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(one))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hcg=cg()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hcg,3)])),single(sum([weight(g(),2),weight(hcg,3)]),pref_only=true),single(sum([weight(g(),2),weight(hgoalcount,3)])),single(sum([weight(g(),2),weight(hgoalcount,3)]),pref_only=true)],boost=3662),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([single(sum([g(),weight(hcg,10)])),single(sum([g(),weight(hcg,10)]),pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(21, [
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=one,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_reasonable_orders_hps(lm_rhw(only_causal_landmarks=true,disjunctive_landmarks=true,use_orders=true))",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hlm=lmcount(lmg,admissible=false,pref=true,transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hblind,3)])),single(sum([weight(g(),2),weight(hblind,3)]),pref_only=true),single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true),single(sum([weight(g(),2),weight(hlm,3)])),single(sum([weight(g(),2),weight(hlm,3)]),pref_only=true),single(sum([weight(g(),2),weight(hadd,3)])),single(sum([weight(g(),2),weight(hadd,3)]),pref_only=true)],boost=2474),preferred=[hadd],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(28, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hadd=add()",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hhmax=hmax()",
"--search",
"eager(alt([tiebreaking([sum([g(),weight(hblind,7)]),hblind]),tiebreaking([sum([g(),weight(hhmax,7)]),hhmax]),tiebreaking([sum([g(),weight(hadd,7)]),hadd]),tiebreaking([sum([g(),weight(hcg,7)]),hcg])],boost=2142),preferred=[],reopen_closed=true,bound=BOUND)"]),
(28, [
"--evaluator",
"hadd=add(transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff]),tiebreaking([sum([weight(g(),4),weight(hff,5)]),hff],pref_only=true),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd]),tiebreaking([sum([weight(g(),4),weight(hadd,5)]),hadd],pref_only=true)],boost=2537),preferred=[hff,hadd],reopen_closed=true,bound=BOUND)"]),
(53, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,transform=transform=adapt_costs(plusone))",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=5000),preferred=[hlm],reopen_closed=false,bound=BOUND)"]),
(29, [
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"lazy(alt([single(sum([weight(g(),2),weight(hff,3)])),single(sum([weight(g(),2),weight(hff,3)]),pref_only=true)],boost=5000),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(27, [
"--evaluator",
"hblind=blind()",
"--evaluator",
"hff=ff(transform=adapt_costs(one))",
"--search",
"eager(alt([single(sum([g(),weight(hblind,2)])),single(sum([g(),weight(hff,2)]))],boost=4480),preferred=[],reopen_closed=true,bound=BOUND)"]),
(29, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([type_based([g()]),single(hlm),single(hlm,pref_only=true),single(hff),single(hff,pref_only=true)],boost=1000),preferred=[hlm,hff],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(54, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=true,use_orders=true,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--evaluator",
"hff=ff()",
"--search",
"lazy(alt([tiebreaking([sum([g(),weight(hlm,10)]),hlm]),tiebreaking([sum([g(),weight(hlm,10)]),hlm],pref_only=true),tiebreaking([sum([g(),weight(hff,10)]),hff]),tiebreaking([sum([g(),weight(hff,10)]),hff],pref_only=true)],boost=200),preferred=[hlm],reopen_closed=true,cost_type=plusone,bound=BOUND)"]),
(87, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=false,use_orders=false,m=1)",
"--evaluator",
"hcg=cg(transform=adapt_costs(one))",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(hlm),single(hlm,pref_only=true),single(hcg),single(hcg,pref_only=true)],boost=0),preferred=[hcg],reopen_closed=false,cost_type=one,bound=BOUND)"]),
(30, [
"--landmarks",
"lmg=lm_exhaust(only_causal_landmarks=false)",
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--evaluator",
"hhmax=hmax()",
"--evaluator",
"hblind=blind()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false,transform=adapt_costs(one))",
"--search",
"lazy(alt([type_based([g()]),single(sum([g(),weight(hblind,3)])),single(sum([g(),weight(hblind,3)]),pref_only=true),single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true),single(sum([g(),weight(hhmax,3)])),single(sum([g(),weight(hhmax,3)]),pref_only=true)],boost=3052),preferred=[hff],reopen_closed=true,bound=BOUND)"]),
(56, [
"--evaluator",
"hff=ff(transform=adapt_costs(plusone))",
"--search",
"lazy(alt([tiebreaking([sum([g(),hff]),hff]),tiebreaking([sum([g(),hff]),hff],pref_only=true)],boost=432),preferred=[hff],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(19, [
"--landmarks",
"lmg=lm_merged([lm_rhw(only_causal_landmarks=false,disjunctive_landmarks=false,use_orders=true),lm_hm(m=1,conjunctive_landmarks=true,use_orders=true)])",
"--evaluator",
"hff=ff()",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"lazy(alt([single(sum([g(),weight(hff,10)])),single(sum([g(),weight(hff,10)]),pref_only=true),single(sum([g(),weight(hlm,10)])),single(sum([g(),weight(hlm,10)]),pref_only=true)],boost=500),preferred=[hff],reopen_closed=false,cost_type=plusone,bound=BOUND)"]),
(56, [
"--landmarks",
"lmg=lm_exhaust(only_causal_landmarks=false)",
"--evaluator",
"hgoalcount=goalcount(transform=adapt_costs(plusone))",
"--evaluator",
"hlm=lmcount(lmg,admissible=false)",
"--evaluator",
"hff=ff()",
"--evaluator",
"hblind=blind()",
"--search",
"eager(alt([tiebreaking([sum([weight(g(),8),weight(hblind,9)]),hblind]),tiebreaking([sum([weight(g(),8),weight(hlm,9)]),hlm]),tiebreaking([sum([weight(g(),8),weight(hff,9)]),hff]),tiebreaking([sum([weight(g(),8),weight(hgoalcount,9)]),hgoalcount])],boost=2005),preferred=[],reopen_closed=true,bound=BOUND)"]),
(24, [
"--landmarks",
"lmg=lm_zg(use_orders=false)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true,pref=false)",
"--search",
"eager(single(sum([g(),weight(hlm,3)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
(81, [
"--landmarks",
"lmg=lm_hm(conjunctive_landmarks=true,use_orders=false,m=1)",
"--evaluator",
"hlm=lmcount(lmg,admissible=true)",
"--search",
"eager(single(sum([g(),weight(hlm,5)])),preferred=[],reopen_closed=true,cost_type=one,bound=BOUND)"]),
]
| 19,090 |
Python
| 55.315634 | 525 | 0.598009 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_opt_fdss_2.py
|
OPTIMAL = True
CONFIGS = [
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
(1, ["--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search",
"astar(lmc,lazy_evaluator=lmc)"]),
(1, ["--search",
"astar(lmcut())"]),
(1, ["--search",
"astar(blind())"]),
]
| 948 |
Python
| 40.260868 | 116 | 0.60654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_2014.py
|
OPTIMAL = False
CONFIGS = [
# add_lm_lazy_greedy
(114, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_greedy
(187, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_eager_greedy
(33, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hadd,hlm],preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_greedy
(35, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hff,hlm],preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_greedy
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_greedy([hcea,hlm],preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_eager_greedy
(120, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_eager_greedy
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hcg,hff],preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_ff_lazy_greedy
(17, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hff],preferred=[hadd,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_cg_lazy_greedy
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hadd,hcg],preferred=[hadd,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_lm_lazy_wastar
(79, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hadd,hlm],w=3,preferred=[hadd,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_lazy_wastar
(159, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hff,hlm],w=3,preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcea,hlm],w=3,preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_greedy
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager_greedy([hcg,hlm],preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_lazy_wastar
(39, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcea,hff],w=3,preferred=[hcea,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lm_eager_wastar
(37, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(), weight(hcea, 3)])),single(sum([g(),weight(hcea,3)]),pref_only=true),single(sum([g(), weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcea,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_ff_lazy_wastar
(40, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--search",
"lazy_wastar([hcg,hff],w=3,preferred=[hcg,hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lm_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# add_eager_wastar
(77, ["--evaluator", "hadd=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hadd, 3)])),single(sum([g(), weight(hadd,3)]),pref_only=true)]),preferred=[hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_ff_eager_wastar
(40, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)", "--heuristic",
"hcea=cea(transform=H_COST_TRANSFORM)", "--search",
"eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hcea,3)])),single(sum([g(),weight(hcea,3)]),pref_only=true)]),preferred=[hff,hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_eager_wastar
(78, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"eager(alt([single(sum([g(),weight(hcg,3)])),single(sum([g(),weight(hcg,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_eager_greedy
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hcea],preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lm_lazy_wastar
(39, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--heuristic",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))", "--search",
"lazy_wastar([hcg,hlm],w=3,preferred=[hcg,hlm],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cea_lazy_wastar
(40, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hcea], w=3, preferred=[hcea],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_eager_wastar
(72, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hff, 3)])),single(sum([g(),weight(hff,3)]),pref_only=true)]),preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_eager_wastar
(38, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(alt([single(sum([g(), weight(hcg, 3)])),single(sum([g(),weight(hcg,3)]),pref_only=true)]),preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# ff_lazy_wastar
(38, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([hff], w=3, preferred=[hff],cost_type=S_COST_TYPE,bound=BOUND)"]),
# cg_lazy_greedy
(116, ["--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([hcg],preferred=[hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
# ff_lm_eager_wastar
FINAL_CONFIG = [
"--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator",
"hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone))",
"--search",
"iterated([eager(alt([single(sum([g(),weight(hff,3)])),single(sum([g(),weight(hff,3)]),pref_only=true),single(sum([g(),weight(hlm,3)])),single(sum([g(),weight(hlm,3)]),pref_only=true)]),preferred=[hff,hlm],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 8,552 |
Python
| 64.792307 | 277 | 0.615996 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_sat_fdss_1.py
|
OPTIMAL = False
CONFIGS = [
# alt_lazy_ff_cg
(49, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_ff_1
(171, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_lazy_cea_cg
(27, ["--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"lazy_greedy([hcea,hcg],preferred=[hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_ff_1
(340, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cg
(76, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hcg],preferred=[hff,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_ff_1
(88, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_add
(90, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hadd=add(transform=H_COST_TRANSFORM)", "--search",
"eager_greedy([hff,hadd],preferred=[hff,hadd],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_greedy_cea_1
(56, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"lazy_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# alt_eager_ff_cea_cg
(73, ["--evaluator", "hff=ff(transform=H_COST_TRANSFORM)",
"--evaluator", "hcea=cea(transform=H_COST_TRANSFORM)",
"--evaluator", "hcg=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([hff,hcea,hcg],preferred=[hff,hcea,hcg],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_add_1
(50, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_greedy_cea_1
(84, ["--evaluator", "h=cea(transform=H_COST_TRANSFORM)",
"--search",
"eager_greedy([h],preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_add_1
(166, ["--evaluator", "h=add(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_ff_1
(87, ["--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# lazy_wa3_cg_1
(73, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"lazy_wastar([h],w=3,preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
# eager_wa3_cg_1
(89, ["--evaluator", "h=cg(transform=H_COST_TRANSFORM)",
"--search",
"eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)"]),
]
FINAL_CONFIG = [
"--evaluator", "h=ff(transform=H_COST_TRANSFORM)",
"--search",
"iterated([eager(single(sum([g(),weight(h,3)])),preferred=[h],cost_type=S_COST_TYPE,bound=BOUND)],bound=BOUND,repeat_last=true)"]
| 3,544 |
Python
| 48.23611 | 133 | 0.582957 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/driver/portfolios/seq_opt_merge_and_shrink.py
|
OPTIMAL = True
CONFIGS = [
(800, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=true),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=infinity,threshold_before_merge=1))"]),
(1000, ["--search",
"astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),"
"shrink_strategy=shrink_bisimulation(greedy=false),"
"label_reduction=exact(before_shrinking=true,before_merging=false),"
"max_states=200000))"]),
]
| 706 |
Python
| 46.13333 | 118 | 0.655807 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/utils.py
|
from __future__ import print_function
import math
import os
import pickle
import shutil
import sys
import time
import random
import cProfile
import pstats
import io
from collections import defaultdict, deque, Counter, namedtuple
from itertools import count
from heapq import heappush, heappop
import numpy as np
INF = float('inf')
SEPARATOR = '\n' + 80*'-' + '\n'
try:
user_input = raw_input
except NameError:
user_input = input
inf_generator = count
##################################################
def int_ceil(f):
return int(math.ceil(f))
def get_python_version():
return sys.version_info[0]
def read(filename):
with open(filename, 'r') as f:
return f.read()
def write(filename, string):
with open(filename, 'w') as f:
f.write(string)
def write_pickle(filename, data):
# Cannot pickle lambda or nested functions
with open(filename, 'wb') as f:
pickle.dump(data, f)
def read_pickle(filename):
# Can sometimes read pickle3 from python2 by calling twice
with open(filename, 'rb') as f:
try:
return pickle.load(f)
except UnicodeDecodeError as e:
return pickle.load(f, encoding='latin1')
def safe_remove(p):
if os.path.exists(p):
os.remove(p)
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def safe_rm_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
def clear_dir(d):
safe_rm_dir(d)
ensure_dir(d)
def get_file_path(file, rel_path):
directory = os.path.dirname(os.path.abspath(file))
return os.path.join(directory, rel_path)
def open_pdf(filename):
import subprocess
# import os
# import webbrowser
subprocess.Popen('open {}'.format(filename), shell=True)
# os.system(filename)
# webbrowser.open(filename)
user_input('Display?')
# safe_remove(filename)
# TODO: close output
##################################################
def elapsed_time(start_time):
return time.time() - start_time
def safe_zip(sequence1, sequence2):
assert len(sequence1) == len(sequence2)
return zip(sequence1, sequence2)
def get_mapping(sequence1, sequence2):
return dict(safe_zip(sequence1, sequence2))
def apply_mapping(sequence, mapping):
return tuple(mapping.get(e, e) for e in sequence)
def safe_apply_mapping(sequence, mapping):
# TODO: flip arguments order
return tuple(mapping[e] for e in sequence)
def negate_test(test):
return lambda *args, **kwargs: not test(*args, **kwargs)
def flatten(iterable_of_iterables):
return (item for iterables in iterable_of_iterables for item in iterables)
def find(test, sequence):
for item in sequence:
if test(item):
return item
return None
def find_unique(test, sequence):
found, value = False, None
for item in sequence:
if test(item):
if found:
raise RuntimeError('Both elements {} and {} satisfy the test'.format(value, item))
found, value = True, item
if not found:
raise RuntimeError('Unable to find an element satisfying the test')
return value
def implies(a, b):
return not a or b
def irange(start, end=None, step=1):
# TODO: combine with my other infinite generator
if end is None:
end = start
start = 0
n = start
while n < end:
yield n
n += step
def argmin(fn, iterable):
return min(iterable, key=fn)
def argmax(fn, iterable):
return max(iterable, key=fn)
def invert_dict(d):
return {v: k for k, v in d.items()}
def randomize(iterable):
sequence = list(iterable)
random.shuffle(sequence)
return sequence
##################################################
BYTES_PER_KILOBYTE = math.pow(2, 10)
BYTES_PER_GIGABYTE = math.pow(2, 30)
KILOBYTES_PER_GIGABYTE = BYTES_PER_GIGABYTE / BYTES_PER_KILOBYTE
def get_peak_memory_in_kb():
# TODO: use psutil instead
import psutil
# https://pypi.org/project/psutil/
# https://psutil.readthedocs.io/en/latest/
#rss: aka "Resident Set Size", this is the non-swapped physical memory a process has used. (bytes)
#vms: aka "Virtual Memory Size", this is the total amount of virtual memory used by the process. (bytes)
#shared: (Linux) memory that could be potentially shared with other processes.
#text (Linux, BSD): aka TRS (text resident set) the amount of memory devoted to executable code.
#data (Linux, BSD): aka DRS (data resident set) the amount of physical memory devoted to other than executable code.
#lib (Linux): the memory used by shared libraries.
#dirty (Linux): the number of dirty pages.
#pfaults (macOS): number of page faults.
#pageins (macOS): number of actual pageins.
process = psutil.Process(os.getpid())
#process.pid()
#process.ppid()
pmem = process.memory_info() # this seems to actually get the current memory!
memory_in_kb = pmem.vms / BYTES_PER_KILOBYTE
return memory_in_kb
#print(process.memory_full_info())
#print(process.memory_percent())
# process.rlimit(psutil.RLIMIT_NOFILE) # set resource limits (Linux only)
#print(psutil.virtual_memory())
#print(psutil.swap_memory())
#print(psutil.pids())
#try:
# # This will only work on Linux systems.
# with open("/proc/self/status") as status_file:
# for line in status_file:
# parts = line.split()
# if parts[0] == "VmPeak:":
# return float(parts[1])
#except IOError:
# pass
#return 0.
def check_memory(max_memory):
if max_memory == INF:
return True
peak_memory = get_peak_memory_in_kb()
#print('Peak memory: {} | Max memory: {}'.format(peak_memory, max_memory))
if peak_memory <= max_memory:
return True
print('Peak memory of {} KB exceeds memory limit of {} KB'.format(
int(peak_memory), int(max_memory)))
return False
##################################################
class Saver(object):
# TODO: contextlib
def save(self):
raise NotImplementedError()
def restore(self):
raise NotImplementedError()
def __enter__(self):
# TODO: move the saving to enter?
self.save()
return self
def __exit__(self, type, value, traceback):
self.restore()
class Profiler(Saver):
fields = ['tottime', 'cumtime']
def __init__(self, field='tottime', num=10):
assert field in self.fields
self.field = field
self.num = num
self.pr = cProfile.Profile()
def save(self):
self.pr.enable()
return self.pr
def restore(self):
self.pr.disable()
if self.num is None:
return None
stream = None
#stream = io.StringIO()
stats = pstats.Stats(self.pr, stream=stream).sort_stats(self.field) # TODO: print multiple
stats.print_stats(self.num)
return stats
class Verbose(Saver): # TODO: use DisableOutput
def __init__(self, verbose=False):
self.verbose = verbose
def save(self):
if self.verbose:
return
self.stdout = sys.stdout
self.devnull = open(os.devnull, 'w')
sys.stdout = self.devnull
#self.stderr = sys.stderr
#self.devnull = open(os.devnull, 'w')
#sys.stderr = self.stderr
def restore(self):
if self.verbose:
return
sys.stdout = self.stdout
self.devnull.close()
#sys.stderr = self.stderr
#self.devnull.close()
class TmpCWD(Saver):
def __init__(self, temp_cwd):
self.tmp_cwd = temp_cwd
def save(self):
self.old_cwd = os.getcwd()
os.chdir(self.tmp_cwd)
def restore(self):
os.chdir(self.old_cwd)
##################################################
class Comparable(object):
def __lt__(self, other):
raise NotImplementedError()
def __eq__(self, other):
return not (self < other) and not (other < self)
def __ne__(self, other):
return (self < other) or (other < self)
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not other < self
class MockSet(object):
def __init__(self, test=lambda item: True):
self.test = test
def __contains__(self, item):
return self.test(item)
class Score(Comparable): # tuple
def __init__(self, *args):
# TODO: convert to float
#super(Score, self).__init__(args)
self.values = tuple(args)
def check_other(self, other):
return isinstance(other, Score) and (len(self.values) == len(other.values))
def __lt__(self, other):
assert self.check_other(other)
return self.values < other.values
def __iter__(self):
return iter(self.values)
def __neg__(self):
return self.__class__(*(type(value).__neg__(value) for value in self.values))
def __add__(self, other):
return self.__class__(*(self.values + other.values))
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, self.values)
class HeapElement(Comparable):
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
return self.key < other.key
def __iter__(self):
return iter([self.key, self.value])
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.key, self.value)
##################################################
def sorted_str_from_list(obj, **kwargs):
return '[{}]'.format(', '.join(sorted(str_from_object(item, **kwargs) for item in obj)))
def str_from_object(obj, ndigits=None): # str_object
if type(obj) in [list]: #, np.ndarray):
return '[{}]'.format(', '.join(str_from_object(item, ndigits) for item in obj))
if type(obj) == tuple:
return '({})'.format(', '.join(str_from_object(item, ndigits) for item in obj))
#if isinstance(obj, dict):
if type(obj) in [dict, defaultdict, Counter]:
return '{{{}}}'.format(', '.join('{}: {}'.format(str_from_object(key, ndigits), str_from_object(obj[key], ndigits)) \
for key in sorted(obj.keys(), key=lambda k: str_from_object(k, ndigits))))
if type(obj) in [set, frozenset]:
return '{{{}}}'.format(', '.join(sorted(str_from_object(item, ndigits) for item in obj)))
if (ndigits is not None) and (type(obj) in [float, np.float64]):
obj = round(obj, ndigits=ndigits)
if obj == 0.:
obj = 0. # NOTE - catches -0.0 bug
return '{0:.{1}f}'.format(obj, ndigits)
#if isinstance(obj, types.FunctionType):
# return obj.__name__
return str(obj)
#return repr(obj)
##################################################
def incoming_from_edges(edges):
incoming_vertices = defaultdict(set)
for v1, v2 in edges:
incoming_vertices[v2].add(v1)
return incoming_vertices
def outgoing_from_edges(edges):
outgoing_vertices = defaultdict(set)
for v1, v2 in edges:
outgoing_vertices[v1].add(v2)
return outgoing_vertices
def neighbors_from_orders(orders):
return incoming_from_edges(orders), \
outgoing_from_edges(orders)
def adjacent_from_edges(edges):
undirected_edges = defaultdict(set)
for v1, v2 in edges:
undirected_edges[v1].add(v2)
undirected_edges[v2].add(v1)
return undirected_edges
##################################################
def filter_orders(vertices, orders):
# TODO: rename to filter edges?
return [order for order in orders if all(v in vertices for v in order)]
def is_valid_topological_sort(vertices, orders, solution):
orders = filter_orders(vertices, orders)
if Counter(vertices) != Counter(solution):
return False
index_from_vertex = {v: i for i, v in enumerate(solution)}
for v1, v2 in orders:
if index_from_vertex[v1] >= index_from_vertex[v2]:
return False
return True
def dfs_topological_sort(vertices, orders, priority_fn=lambda v: 0):
# TODO: DFS for all topological sorts
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
def dfs(history, visited):
reverse_ordering = []
v1 = history[-1]
if v1 in visited:
return reverse_ordering
visited.add(v1)
for v2 in sorted(outgoing_edges[v1], key=priority_fn, reverse=True):
if v2 in history:
return None # Contains a cycle
result = dfs(history + [v2], visited)
if result is None:
return None
reverse_ordering.extend(result)
reverse_ordering.append(v1)
return reverse_ordering
visited = set()
reverse_order = []
for v0 in sorted(vertices, key=priority_fn, reverse=True):
if not incoming_edges[v0]:
result = dfs([v0], visited)
if result is None:
return None
reverse_order.extend(result)
ordering = reverse_order[::-1]
assert(is_valid_topological_sort(vertices, orders, ordering))
return ordering
def topological_sort(vertices, orders, priority_fn=lambda v: 0):
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
ordering = []
queue = []
for v in vertices:
if not incoming_edges[v]:
heappush(queue, HeapElement(priority_fn(v), v))
while queue:
priority, v1 = heappop(queue) # Lowest to highest
ordering.append(v1)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1)
if not incoming_edges[v2]:
heappush(queue, HeapElement(priority_fn(v2), v2))
if len(ordering) != len(vertices):
return None
assert is_valid_topological_sort(vertices, orders, ordering)
return ordering
def layer_sort(vertices, orders): # priority_fn=lambda v: 0
# TODO: more efficient hypergraph/layer distance (h_max)
orders = filter_orders(vertices, orders)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
visited = {}
queue = []
for v in vertices:
if not incoming_edges[v]:
visited[v] = 0
heappush(queue, HeapElement(visited[v], v))
while queue:
g, v1 = heappop(queue)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1) # TODO: non-uniform cost function for max
if not incoming_edges[v2] and (v2 not in visited):
visited[v2] = g + 1
heappush(queue, HeapElement(visited[v2], v2))
return visited
def is_acyclic(vertices, orders):
return topological_sort(vertices, orders) is not None
def sample_topological_sort(vertices, orders):
# https://stackoverflow.com/questions/38551057/random-topological-sorting-with-uniform-distribution-in-near-linear-time
# https://www.geeksforgeeks.org/all-topological-sorts-of-a-directed-acyclic-graph/
priorities = {v: random.random() for v in vertices}
return topological_sort(vertices, orders, priority_fn=priorities.get)
def transitive_closure(vertices, orders):
# Warshall's algorithm
orders = filter_orders(vertices, orders)
closure = set(orders)
for k in vertices:
for i in vertices:
for j in vertices:
if ((i, j) not in closure) and ((i, k) in closure) and ((k, j) in closure):
closure.add((i, j))
return closure
##################################################
def grow_component(sources, edges, disabled=set()):
processed = set(disabled)
cluster = []
queue = deque()
def add_cluster(v):
if v in processed:
return
processed.add(v)
cluster.append(v)
queue.append(v)
for v0 in sources:
add_cluster(v0)
while queue:
# TODO: add clusters here to ensure proper BFS
v1 = queue.popleft()
for v2 in edges[v1]:
add_cluster(v2)
return cluster
def breadth_first_search(source, edges, **kwargs):
return grow_component([source], edges, **kwargs)
def get_ancestors(source, edges):
return set(breadth_first_search(source, incoming_from_edges(edges))) - {source}
def get_descendants(source, edges):
return set(breadth_first_search(source, outgoing_from_edges(edges))) - {source}
def get_connected_components(vertices, edges):
edges = filter_orders(vertices, edges)
undirected_edges = adjacent_from_edges(edges)
clusters = []
processed = set()
for v0 in vertices:
if v0 in processed:
continue
cluster = grow_component({v0}, undirected_edges, processed)
processed.update(cluster)
if cluster:
clusters.append([v for v in vertices if v in cluster])
return clusters
##################################################
SearchNode = namedtuple('Node', ['g', 'parent'])
def dijkstra(sources, edges, op=sum): # sum | max
if not isinstance(edges, dict):
edges = {edge: 1 for edge in edges}
_, outgoing_edges = neighbors_from_orders(edges)
visited = {}
queue = []
for v0 in sources:
visited[v0] = SearchNode(g=0, parent=None)
queue.append(HeapElement(visited[v0].g, v0))
while queue:
current_g, current_v = heappop(queue)
if visited[current_v].g < current_g:
continue
for next_v in outgoing_edges[current_v]:
next_g = op([current_g, edges[(current_v, next_v)]])
if (next_v not in visited) or (next_g < visited[next_v].g):
visited[next_v] = SearchNode(next_g, current_v)
heappush(queue, HeapElement(next_g, next_v))
return visited
##################################################
def is_hashable(value):
#return isinstance(value, Hashable) # TODO: issue with hashable and numpy 2.7.6
try:
hash(value)
except TypeError:
return False
return True
# def hash_or_id(value):
# if is_hashable(value):
# return hash(value)
# return id(value)
def value_or_id(value):
if is_hashable(value):
return value
return id(value)
def is_64bits():
#return sys.maxsize > 2**32
import platform
bit, _ = platform.architecture()
return bit == '64bit'
def inclusive_range(start, stop, step=1):
sequence = list(np.arange(start, stop, step))
if sequence and (sequence[-1] == stop):
sequence.append(stop)
return sequence
def read_pddl(this_file, pddl_filename):
directory = os.path.dirname(os.path.abspath(this_file))
return read(os.path.join(directory, pddl_filename))
def lowercase(*strings):
return [string.lower() for string in strings]
def str_eq(s1, s2, ignore_case=True):
if ignore_case:
s1 = s1.lower()
s2 = s2.lower()
return s1 == s2
def clip(value, lower=-INF, upper=+INF):
return min(max(lower, value), upper)
| 19,236 |
Python
| 28.686728 | 125 | 0.60236 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/optimizer.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args, get_parameter_name, is_parameter, Minimize
from hsr_tamp.pddlstream.language.conversion import substitute_expression, list_from_conjunction
from hsr_tamp.pddlstream.language.external import parse_lisp_list, get_procedure_fn
from hsr_tamp.pddlstream.language.function import PredicateResult, FunctionResult
from hsr_tamp.pddlstream.language.object import Object, SharedOptValue
from hsr_tamp.pddlstream.language.stream import StreamInfo, Stream, StreamInstance, StreamResult, \
PartialInputs, NEGATIVE_SUFFIX, WildOutput
from hsr_tamp.pddlstream.language.generator import get_next
from hsr_tamp.pddlstream.utils import INF, get_mapping, safe_zip, str_from_object
from hsr_tamp.pddlstream.algorithms.reorder import get_stream_plan_components, get_partial_orders
DEFAULT_SIMULTANEOUS = False
DEFAULT_UNIQUE = True # TODO: would it ever even make sense to do shared here?
# TODO: revert to my previous specification where streams can simply be fused
VARIABLES = ':variables'
CONSTRAINT = ':constraint'
UNSATISFIABLE = 'unsatisfiable{}'.format(NEGATIVE_SUFFIX)
##################################################
class OptimizerOutput(object):
def __init__(self, assignments=[], facts=[], infeasible=[]): # infeasible=None
self.assignments = list(assignments)
self.facts = list(facts)
self.infeasible = list(map(frozenset, infeasible))
def to_wild(self):
return WildOutput(self.assignments, self.facts)
def __bool__(self):
return bool(self.assignments)
__nonzero__ = __bool__
def __repr__(self):
#return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
return str_from_object(self.__dict__)
class Optimizer(object):
def __init__(self, name, procedure, info):
self.name = name
self.procedure = procedure
self.info = info
self.variables = []
self.constraints = []
self.objectives = []
self.streams = []
def get_streams(self):
return self.variables + self.constraints
def __repr__(self):
return '{}'.format(self.name) #, self.streams)
class ComponentStream(Stream):
def __init__(self, optimizer, *args):
self.optimizer = optimizer
super(ComponentStream, self).__init__(*args)
##################################################
def get_list_gen_fn(procedure, inputs, outputs, certified, hint={}):
# TODO: prevent outputs of the sampler from being used as inputs (only consider initial values)
def list_gen_fn(*input_values):
mapping = get_mapping(inputs, input_values)
targets = substitute_expression(certified, mapping)
return procedure(outputs, targets, hint=hint)
return list_gen_fn
def get_effort_fn(optimizer_name):
# TODO: higher effort is the variable cannot be free for the testing process
# This might happen if the variable is certified to have a property after construction
def effort_fn(*input_values):
# parameter_indices = [i for i, value in enumerate(input_values) if is_parameter(value)]
# optimizer_indices = [i for i, value in enumerate(input_values) if isinstance(value, SharedOptValue)
# if input_values[i].stream.startswith(optimizer_name)]
#if not parameter_indices and not optimizer_indices:
# return INF
return 1
return effort_fn
def prune_dominated(collections):
for i, collection1 in enumerate(collections):
if all((i == j) or not (collection2 <= collection1)
for j, collection2 in enumerate(collections)):
yield collection1
##################################################
class OptimizerInfo(StreamInfo):
def __init__(self, planable=False, p_success=None, overhead=None):
super(OptimizerInfo, self).__init__(p_success=p_success, overhead=overhead)
self.planable = planable # TODO: this isn't currently used
# TODO: post-processing
class VariableStream(ComponentStream):
# TODO: allow generation of two variables
def __init__(self, optimizer, variables, inputs, domain, certified, infos):
name = '{}-{}'.format(optimizer.name, '-'.join(map(get_parameter_name, variables)))
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, variables, certified)
# TODO: need to convert OptimizerOutput
#gen_fn = empty_gen()
#info = StreamInfo(effort=get_effort_fn(optimizer_name, inputs, outputs))
#info = StreamInfo(opt_gen_fn=PartialInputs(unique=DEFAULT_UNIQUE, num=DEFAULT_NUM))
info = infos.get(name, None)
if info is None:
info = StreamInfo(opt_gen_fn=PartialInputs(unique=DEFAULT_UNIQUE),
simultaneous=DEFAULT_SIMULTANEOUS)
super(VariableStream, self).__init__(optimizer, name, gen_fn, inputs, domain,
variables, certified, info)
class ConstraintStream(ComponentStream):
def __init__(self, optimizer, constraint, domain, infos):
# TODO: could support fluents and compile them into conditional effects
inputs = get_args(constraint)
outputs = []
certified = [constraint]
name = '{}-{}'.format(optimizer.name, get_prefix(constraint))
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, outputs, certified)
#gen_fn = empty_gen()
info = infos.get(name, None)
if info is None:
info = StreamInfo(effort=get_effort_fn(optimizer.name),
simultaneous=DEFAULT_SIMULTANEOUS)
super(ConstraintStream, self).__init__(optimizer, name, gen_fn, inputs, domain,
outputs, certified, info)
##################################################
def parse_variable(optimizer, lisp_list, infos):
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {VARIABLES, ':inputs', ':domain', ':graph'}
return VariableStream(optimizer,
value_from_attribute[VARIABLES], # TODO: assume unique?
value_from_attribute.get(':inputs', []),
list_from_conjunction(value_from_attribute.get(':domain')),
list_from_conjunction(value_from_attribute.get(':graph')),
infos)
def parse_constraint(optimizer, lisp_list, infos):
# TODO: constraints with the same name
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {CONSTRAINT, ':necessary'} # , ':fluents'}
return ConstraintStream(optimizer,
value_from_attribute[CONSTRAINT],
list_from_conjunction(value_from_attribute[':necessary']),
infos)
# TODO: convert optimizer into a set of streams? Already present within test stream
def parse_optimizer(lisp_list, procedures, infos):
_, optimizer_name = lisp_list[:2]
procedure = get_procedure_fn(procedures, optimizer_name)
optimizer_info = infos.get(optimizer_name, OptimizerInfo())
optimizer = Optimizer(optimizer_name, procedure, optimizer_info)
for sub_list in lisp_list[2:]:
form = sub_list[0]
if form == VARIABLES:
optimizer.variables.append(parse_variable(optimizer, sub_list, infos))
elif form == CONSTRAINT:
optimizer.constraints.append(parse_constraint(optimizer, sub_list, infos))
elif form == ':objective':
optimizer.objectives.append(sub_list[1])
else:
raise ValueError(form)
return optimizer.get_streams()
##################################################
class OptimizerResult(StreamResult):
def get_components(self):
return self.external.stream_plan
def get_objectives(self):
return substitute_expression(self.external.objectives, self.mapping)
def get_unsatisfiable(self):
return self.instance.get_unsatisfiable()
class OptimizerInstance(StreamInstance):
_Result = OptimizerResult
def __init__(self, stream, input_objects, fluent_facts):
super(OptimizerInstance, self).__init__(stream, input_objects, fluent_facts)
all_constraints = frozenset(range(len(self.external.certified)))
print(all_constraints)
self.infeasible = {all_constraints}
# TODO: might need to block separate clusters at once in order to ensure that it captures the true behavior
# TODO: connected components on facts
# TODO: cluster connected components in the infeasible set
# TODO: compute things dependent on a stream and treat like an optimizer
# Also make an option to just treat everything like an optimizer
def _next_wild(self):
output, self.enumerated = get_next(self._generator, default=[])
if not isinstance(output, OptimizerOutput):
output = OptimizerOutput(assignments=output)
self.infeasible.update(output.infeasible)
# TODO: instead replace each time
return output.to_wild()
def get_unsatisfiable(self):
constraints = substitute_expression(self.external.certified, self.external.mapping)
index_from_constraint = {c: i for i, c in enumerate(constraints)}
# TODO: compute connected components
result_from_index = defaultdict(set)
for result in self.external.stream_plan:
for fact in result.get_certified():
if fact in index_from_constraint:
result_from_index[index_from_constraint[fact]].add(result)
# TODO: add implied results
#orders = get_partial_orders(self.external.stream_plan)
return [{result for index in cluster for result in result_from_index[index]}
for cluster in prune_dominated(self.infeasible)]
class OptimizerStream(Stream):
_Instance = OptimizerInstance
def __init__(self, optimizer, external_plan):
optimizer.streams.append(self)
self.optimizer = optimizer
self.stream_plan, self.function_plan = partition_external_plan(external_plan)
inputs, domain, outputs, certified, functions, self.macro_from_micro, \
self.input_objects, self.output_objects, self.fluent_facts = get_cluster_values(external_plan)
hint = self.create_hint()
self.objectives = certified + functions
gen_fn = get_list_gen_fn(optimizer.procedure, inputs, outputs, self.objectives, hint=hint)
#assert len(self.get_cluster_plans()) == 1
super(OptimizerStream, self).__init__(optimizer.name, gen_fn, inputs, domain, outputs,
certified, optimizer.info)
def create_hint(self):
hint = {}
for result, mapping in safe_zip(self.stream_plan, self.macro_from_micro):
if isinstance(result, StreamResult):
for param, obj in safe_zip(result.external.outputs, result.output_objects):
if isinstance(obj, Object):
hint[mapping[param]] = obj.value
return hint
@property
def mapping(self):
return get_mapping(self.inputs + self.outputs,
self.input_objects + self.output_objects)
def get_cluster_plans(self):
# TODO: split the optimizer into clusters when provably independent
return get_stream_plan_components(self.stream_plan + self.function_plan)
@property
def instance(self):
return self.get_instance(self.input_objects, fluent_facts=self.fluent_facts)
##################################################
def add_result_inputs(result, param_from_obj, local_mapping, inputs, input_objects):
for param, obj in zip(result.instance.external.inputs, result.instance.input_objects):
# TODO: only do optimistic parameters?
if obj not in param_from_obj:
param_from_obj[obj] = '?i{}'.format(len(inputs)) # '?_i{}'
inputs.append(param_from_obj[obj])
input_objects.append(obj)
local_mapping[param] = param_from_obj[obj]
def add_result_outputs(result, param_from_obj, local_mapping, outputs, output_objects):
for param, obj in zip(result.instance.external.outputs, result.output_objects):
if obj not in param_from_obj:
param_from_obj[obj] = '?o{}'.format(len(outputs))
outputs.append(param_from_obj[obj])
output_objects.append(obj)
local_mapping[param] = param_from_obj[obj]
def get_cluster_values(stream_plan):
param_from_obj = {}
macro_from_micro = []
inputs, domain, outputs, certified, functions = [], set(), [], set(), set()
input_objects, output_objects = [], []
fluent_facts = []
for result in stream_plan:
local_mapping = {} # global_from_local
stream = result.instance.external
add_result_inputs(result, param_from_obj, local_mapping, inputs, input_objects)
domain.update(set(substitute_expression(stream.domain, local_mapping)) - certified)
if isinstance(result, PredicateResult):
# functions.append(Equal(stream.head, result.value))
# TODO: do I need the new mapping here?
mapping = {inp: param_from_obj[inp] for inp in result.instance.input_objects}
functions.update(substitute_expression(result.get_certified(), mapping))
elif isinstance(result, FunctionResult):
functions.add(substitute_expression(Minimize(stream.head), local_mapping))
else:
fluent_facts.extend(result.instance.fluent_facts)
add_result_outputs(result, param_from_obj, local_mapping, outputs, output_objects)
certified.update(substitute_expression(stream.certified, local_mapping))
macro_from_micro.append(local_mapping) # TODO: append for functions as well?
#assert not fluent_facts
return inputs, sorted(domain), outputs, sorted(certified), sorted(functions), \
macro_from_micro, input_objects, output_objects, fluent_facts
| 14,249 |
Python
| 48.307958 | 115 | 0.645168 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/rule.py
|
from collections import deque
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.stream import Stream, StreamInfo
from hsr_tamp.pddlstream.language.external import parse_lisp_list
from hsr_tamp.pddlstream.language.generator import from_test, universe_test
from hsr_tamp.pddlstream.language.conversion import list_from_conjunction, substitute_expression
# TODO: could signal a rule by making its gen_fn just the constant True
# TODO: could apply the rule in the initial state once but then couldn't support unexpected facts
# TODO: prune unnecessary preconditions using rules
from hsr_tamp.pddlstream.utils import get_mapping
RULES = [] # TODO: no global
def parse_rule(lisp_list, stream_map, stream_info):
value_from_attribute = parse_lisp_list(lisp_list[1:])
assert set(value_from_attribute) <= {':inputs', ':domain', ':certified'}
# TODO: if len(certified) == 1, augment existing streams
RULES.append(Stream(name='rule{}'.format(len(RULES)),
gen_fn=from_test(universe_test),
inputs=value_from_attribute.get(':inputs', []),
domain=list_from_conjunction(value_from_attribute.get(':domain', [])),
fluents=[],
outputs=[],
certified=list_from_conjunction(value_from_attribute.get(':certified', [])),
info=StreamInfo(eager=True, p_success=1, overhead=0, verbose=False)))
return RULES[-1]
# TODO: could make p_success=0 to prevent use in search
##################################################
def apply_rules_to_streams(rules, streams):
# TODO: can actually this with multiple condition if stream certified contains all
# TODO: do also when no domain conditions
processed_rules = deque(rules)
while processed_rules:
rule = processed_rules.popleft()
if len(rule.domain) != 1:
continue
[rule_fact] = rule.domain
rule.info.p_success = 0 # Need not be applied
for stream in streams:
if not isinstance(stream, Stream):
continue
for certified_fact in stream.certified:
if get_prefix(rule_fact) == get_prefix(certified_fact):
mapping = get_mapping(get_args(rule_fact), get_args(certified_fact))
new_facts = set(substitute_expression(rule.certified, mapping)) - set(stream.certified)
stream.certified = stream.certified + tuple(new_facts)
if new_facts and (stream in rules):
processed_rules.append(stream)
| 2,680 |
Python
| 50.557691 | 107 | 0.633582 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/fluent.py
|
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.exogenous import replace_literals
from hsr_tamp.pddlstream.language.external import get_domain_predicates
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import find_unique, get_mapping, safe_apply_mapping
def get_predicate_map(state_streams):
predicate_map = {}
for state_stream in state_streams:
for fact in state_stream.certified:
predicate = get_prefix(fact)
if predicate in predicate_map:
# TODO: could make a disjunctive condition instead
raise NotImplementedError('Only one fluent stream can certify a predicate: {}'.format(predicate))
predicate_map[predicate] = state_stream
return predicate_map
def remap_certified(literal, stream):
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
if not all(arg in mapping for arg in stream.inputs): # Certified must contain all inputs
return None
return mapping
def compile_fluent_streams(domain, externals):
state_streams = set(filter(lambda e: isinstance(e, Stream) and e.is_special, externals))
predicate_map = get_predicate_map(state_streams)
if not predicate_map:
return state_streams
# TODO: allow usage as long as in the same action (e.g. for costs functions)
# TODO: could create a separate action per control parameter
if get_domain_predicates(externals) & set(predicate_map):
raise RuntimeError('Fluent streams certified facts cannot be domain facts')
# TODO: could make free parameters free
# TODO: could treat like a normal stream that generates values (but with no inputs required/needed)
import pddl
def fn(literal, action):
if literal.predicate not in predicate_map:
return literal
# TODO: other checks on only inputs
stream = predicate_map[literal.predicate]
mapping = remap_certified(literal, stream)
if mapping is None:
# TODO: this excludes typing. This is not entirely safe
return literal
output_args = set(mapping[arg] for arg in stream.outputs)
if isinstance(action, pddl.Action): # TODO: unified Action/Axiom effects
for effect in action.effects:
if isinstance(effect, pddl.Effect) and (output_args & set(effect.literal.args)):
raise RuntimeError('Fluent stream outputs cannot be in action effects: {}'.format(
effect.literal.predicate))
elif not stream.is_negated:
axiom = action
raise RuntimeError('Fluent stream outputs cannot be in an axiom: {}'.format(axiom.name))
blocked_args = safe_apply_mapping(stream.inputs, mapping)
blocked_literal = literal.__class__(stream.blocked_predicate, blocked_args).negate()
if stream.is_negated:
conditions = [blocked_literal]
conditions.extend(pddl.Atom(get_prefix(fact), safe_apply_mapping(get_args(fact), mapping)) # fd_from_fact
for fact in stream.domain) # TODO: be careful when using imply
return pddl.Conjunction(conditions) # TODO: prune redundant conditions
return pddl.Conjunction([literal, blocked_literal])
for action in domain.actions:
action.precondition = replace_literals(fn, action.precondition, action).simplified()
for effect in action.effects:
effect.condition = replace_literals(fn, effect.condition, action).simplified()
for axiom in domain.axioms:
axiom.condition = replace_literals(fn, axiom.condition, axiom).simplified()
return state_streams
| 3,832 |
Python
| 50.797297 | 117 | 0.68476 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/stream.py
|
import time
from collections import Counter, Sequence
from hsr_tamp.pddlstream.algorithms.common import INTERNAL_EVALUATION, add_fact
from hsr_tamp.pddlstream.algorithms.downward import make_axiom
from hsr_tamp.pddlstream.language.constants import AND, get_prefix, get_args, is_parameter, Fact, concatenate, StreamAction, Output
from hsr_tamp.pddlstream.language.conversion import list_from_conjunction, substitute_expression, \
get_formula_operators, values_from_objects, obj_from_value_expression, evaluation_from_fact, \
objects_from_values, substitute_fact
from hsr_tamp.pddlstream.language.external import ExternalInfo, Result, Instance, External, DEBUG, SHARED_DEBUG, DEBUG_MODES, \
get_procedure_fn, parse_lisp_list, select_inputs, convert_constants
from hsr_tamp.pddlstream.language.generator import get_next, from_fn, universe_test, from_test, BoundedGenerator
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject, UniqueOptValue, SharedOptValue, DebugValue, SharedDebugValue
from hsr_tamp.pddlstream.utils import str_from_object, get_mapping, irange, apply_mapping, safe_apply_mapping, safe_zip
VERBOSE_FAILURES = True
VERBOSE_WILD = False
DEFAULT_UNIQUE = False
NEGATIVE_BLOCKED = True
NEGATIVE_SUFFIX = '-negative'
CACHE_OPTIMISTIC = True
# TODO: could also make only wild facts and automatically identify output tuples satisfying certified
# TODO: default effort cost of streams with more inputs to be higher (but negated are free)
# TODO: automatically convert to test streams on inputs
##################################################
def get_empty_fn():
return lambda *input_values: None
def get_constant_fn(constant):
return lambda *input_values: constant
def get_identity_fn(indices):
return lambda *input_values: tuple(input_values[i] for i in indices)
##################################################
class PartialInputs(object):
def __init__(self, inputs='', unique=DEFAULT_UNIQUE, test=universe_test): #, num=1):
self.inputs = tuple(inputs.split())
self.unique = unique # TODO: refactor this
self.test = test
#self.num = num
self.stream = None
#def register(self, stream):
# assert self.stream is None
# self.stream = stream
# if self.unique:
# self.inputs = tuple(stream.inputs)
# assert set(self.inputs) <= set(stream.inputs)
#def __call__(self, *input_values):
# assert self.stream is not None
# if not self.test(*input_values):
# return
# input_objects = stream_instance.input_objects
# mapping = get_mapping(self.stream.inputs, input_objects)
# selected_objects = safe_apply_mapping(self.inputs, mapping)
# # for _ in irange(self.num):
# for _ in irange(stream_instance.num_optimistic):
# yield [tuple(SharedOptValue(self.stream.name, self.inputs, selected_objects, out)
# for out in self.stream.outputs)]
def get_opt_gen_fn(self, instance):
# TODO: just condition on the external
external = instance.external
inputs = external.inputs if self.unique else self.inputs
assert set(inputs) <= set(external.inputs)
unique = (set(inputs) == set(external.inputs))
# TODO: ensure no scoping errors with inputs
def gen_fn(*input_values):
if not self.test(*input_values):
return
# TODO: recover input_objects from input_values
selected_objects = select_inputs(instance, inputs)
for idx in irange(instance.num_optimistic): # self.num
# if len(inputs) == len(external.inputs):
# yield [tuple(UniqueOptValue(instance, idx, out)
# for out in external.outputs)]
# else:
if unique:
outputs = tuple(UniqueOptValue(instance, idx, out)
for out in external.outputs)
else:
outputs = tuple(SharedOptValue(external.name, inputs, selected_objects, out)
for out in external.outputs)
yield [outputs]
return gen_fn
def __repr__(self):
return repr(self.__dict__)
def get_constant_gen_fn(stream, constant):
def gen_fn(*input_values):
assert (len(stream.inputs) == len(input_values))
yield [tuple(constant for _ in range(len(stream.outputs)))]
return gen_fn
# def get_unique_fn(stream):
# # TODO: this should take into account the output number...
# def fn(*input_values):
# #input_objects = map(opt_obj_from_value, input_values)
# #stream_instance = stream.get_instance(input_objects)
# #output_values = tuple(UniqueOpt(stream_instance, i) for i in range(len(stream.outputs)))
# output_values = tuple(object() for _ in range(len(stream.outputs)))
# return [output_values]
# return fn
def get_debug_gen_fn(stream, shared=True):
if shared:
return from_fn(lambda *args, **kwargs: tuple(SharedDebugValue(stream.name, o) for o in stream.outputs))
return from_fn(lambda *args, **kwargs: tuple(DebugValue(stream.name, args, o) for o in stream.outputs))
##################################################
class WildOutput(object):
def __init__(self, values=[], facts=[], actions=[], enumerated=False, replan=False):
self.values = values
self.facts = facts
self.actions = actions
if self.actions:
raise NotImplementedError()
self.enumerated = enumerated
self.replan = replan # Reports back whether the problem has changed substantially
def __iter__(self):
return iter([self.values, self.facts])
class FluentOutput(object): # TODO: unify with OptimizerOutput
# TODO: allow fluent streams to report the subset of facts that caused failure
def __init__(self, assignments=[], facts=[], infeasible=[]):
self.assignments = list(assignments)
self.facts = list(facts)
self.infeasible = list(map(frozenset, infeasible))
def to_wild(self):
return WildOutput(self.assignments, self.facts)
def __bool__(self):
return bool(self.assignments)
__nonzero__ = __bool__
def __repr__(self):
#return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
return str_from_object(self.__dict__)
class StreamInfo(ExternalInfo):
def __init__(self, opt_gen_fn=None, negate=False, simultaneous=False,
verbose=True, **kwargs): # TODO: set negate to None to express no user preference
# TODO: could change frequency/priority for the incremental algorithm
# TODO: maximum number of evaluations per iteration of adaptive
super(StreamInfo, self).__init__(**kwargs)
# TODO: call this an abstraction instead
self.opt_gen_fn = PartialInputs() if opt_gen_fn is None else opt_gen_fn
self.negate = negate
self.simultaneous = simultaneous
self.verbose = verbose
# TODO: make this false by default for negated test streams
#self.order = 0
##################################################
class StreamResult(Result):
def __init__(self, instance, output_objects, opt_index=None,
call_index=None, list_index=None, optimistic=True):
super(StreamResult, self).__init__(instance, opt_index, call_index, optimistic)
self.output_objects = tuple(output_objects)
assert len(self.output_objects) == len(self.external.outputs)
self.list_index = list_index
self._mapping = None
self._certified = None
self._stream_fact = None
@property
def mapping(self):
if self._mapping is None:
self._mapping = get_mapping(self.external.outputs, self.output_objects)
self._mapping.update(self.instance.mapping)
return self._mapping
@property
def stream_fact(self):
if self._stream_fact is None:
self._stream_fact = substitute_expression(self.external.stream_fact, self.mapping)
return self._stream_fact
@property
def certified(self):
if self._certified is None:
self._certified = substitute_expression(self.external.certified, self.mapping)
return self._certified
def get_certified(self):
return self.certified
def get_action(self):
return StreamAction(self.name, self.input_objects, self.output_objects)
def get_optimistic(self):
raise NotImplementedError()
index = 0
#index = self.call_index
return self.instance.opt_results[index]
def remap_inputs(self, bindings):
new_instance = self.instance.remap_inputs(bindings)
return self.__class__(new_instance, self.output_objects, self.opt_index,
self.call_index, self.list_index, self.optimistic)
# def remap_outputs(self, bindings):
# new_instance = self.instance.remap_inputs(bindings)
# output_objects = apply_mapping(self.output_objects, bindings)
# return self.__class__(new_instance, output_objects, self.opt_index,
# self.call_index, self.list_index, self.optimistic)
def is_successful(self):
return True
def __repr__(self):
return '{}:{}->{}'.format(self.external.name,
str_from_object(self.instance.input_objects),
str_from_object(self.output_objects))
##################################################
class StreamInstance(Instance):
_Result = StreamResult
def __init__(self, stream, input_objects, fluent_facts):
super(StreamInstance, self).__init__(stream, input_objects)
self._generator = None
self.fluent_facts = frozenset(fluent_facts)
self.opt_gen_fns = [opt_gen_fn.get_opt_gen_fn(self) if isinstance(opt_gen_fn, PartialInputs) else opt_gen_fn
for opt_gen_fn in self.external.opt_gen_fns]
self.opt_gens = len(self.opt_gen_fns)*[None]
self._axiom_predicate = None
self._disabled_axiom = None
# TODO: keep track of unique outputs to prune repeated ones
def _check_output_values(self, new_values):
if not isinstance(new_values, Sequence):
raise ValueError('An output list for stream [{}] is not a sequence: {}'.format(self.external.name, new_values))
for output_values in new_values:
if not isinstance(output_values, Sequence):
raise ValueError('An output tuple for stream [{}] is not a sequence: {}'.format(
self.external.name, output_values))
if len(output_values) != len(self.external.outputs):
raise ValueError('An output tuple for stream [{}] has length {} instead of {}: {}'.format(
self.external.name, len(output_values), len(self.external.outputs), output_values))
def _check_wild_facts(self, new_facts):
if not isinstance(new_facts, Sequence):
raise ValueError('Output wild facts for wild stream [{}] is not a sequence: {}'.format(
self.external.name, new_facts))
def reset(self):
super(StreamInstance, self).reset()
self.previous_outputs = set()
self.num_optimistic = 1
#########################
def get_result(self, output_objects, opt_index=None, list_index=None, optimistic=True):
# TODO: rename to create_result because not unique
# TODO: ideally would increment a flag per stream for each failure
call_index = self.num_calls
#call_index = self.successes # Only counts iterations that return results for complexity
return self._Result(instance=self, output_objects=tuple(output_objects), opt_index=opt_index,
call_index=call_index, list_index=list_index, optimistic=optimistic)
def get_all_input_objects(self): # TODO: lazily compute
return set(self.input_objects) | {o for f in self.fluent_facts for o in get_args(f)}
def get_fluent_values(self):
return [Fact(get_prefix(f), values_from_objects(get_args(f))) for f in self.fluent_facts]
def _create_generator(self):
if self._generator is None:
input_values = self.get_input_values()
if self.external.is_fluent: # self.fluent_facts
self._generator = self.external.gen_fn(*input_values, fluents=self.get_fluent_values())
else:
self._generator = self.external.gen_fn(*input_values)
return self._generator
def _next_wild(self):
output, self.enumerated = get_next(self._generator, default=[])
if not isinstance(output, WildOutput):
output = WildOutput(values=output)
return output
def _next_outputs(self):
# TODO: deprecate
self._create_generator()
# TODO: shuffle history
# TODO: return all test stream outputs at once
if self.num_calls == len(self.history):
self.history.append(self._next_wild())
return self.history[self.num_calls]
def dump_new_values(self, new_values=[]):
if (not new_values and VERBOSE_FAILURES) or \
(new_values and self.info.verbose):
print('iter={}, outs={}) {}:{}->{}'.format(
self.get_iteration(), len(new_values), self.external.name,
str_from_object(self.get_input_values()), str_from_object(new_values)))
def dump_new_facts(self, new_facts=[]):
if VERBOSE_WILD and new_facts:
# TODO: format all_new_facts
print('iter={}, facts={}) {}:{}->{}'.format(
self.get_iteration(), self.external.name, str_from_object(self.get_input_values()),
new_facts, len(new_facts)))
def next_results(self, verbose=False):
assert not self.enumerated
start_time = time.time()
start_history = len(self.history)
new_values, new_facts = self._next_outputs()
self._check_output_values(new_values)
self._check_wild_facts(new_facts)
if verbose:
self.dump_new_values(new_values)
self.dump_new_facts(new_facts)
objects = [objects_from_values(output_values) for output_values in new_values]
new_objects = list(filter(lambda o: o not in self.previous_outputs, objects))
self.previous_outputs.update(new_objects) # Only counting new outputs as successes
new_results = [self.get_result(output_objects, list_index=list_index, optimistic=False)
for list_index, output_objects in enumerate(new_objects)]
if start_history <= len(self.history) - 1:
self.update_statistics(start_time, new_results)
new_facts = list(map(obj_from_value_expression, new_facts))
self.successful |= any(r.is_successful() for r in new_results)
self.num_calls += 1 # Must be after get_result
#if self.external.is_test and self.successful:
# # Set of possible test stream outputs is exhausted (excluding wild)
# self.enumerated = True
return new_results, new_facts
#########################
def get_representative_optimistic(self):
for opt_gn in self.opt_gens:
if (opt_gn is not None) and opt_gn.history and opt_gn.history[0]:
return opt_gn.history[0][0]
return None
def wrap_optimistic(self, output_values, call_index):
output_objects = []
representative_outputs = self.get_representative_optimistic()
assert representative_outputs is not None
for name, value, rep in zip(self.external.outputs, output_values, representative_outputs):
# TODO: retain the value from a custom opt_gen_fn but use unique
#unique = UniqueOptValue(instance=self, sequence_index=call_index, output=name) # object()
#param = unique if (self.opt_index == 0) else value
param = value
value = rep
output_objects.append(OptimisticObject.from_opt(value, param))
return tuple(output_objects)
def _create_opt_generator(self, opt_index=None):
# TODO: automatically refine opt_index based on self.opt_gens
if opt_index is None:
opt_index = self.opt_index
if self.opt_gens[opt_index] is None:
self.opt_gens[opt_index] = BoundedGenerator(self.opt_gen_fns[opt_index](*self.get_input_values()))
opt_gen = self.opt_gens[opt_index]
try:
next(opt_gen) # next | list
except StopIteration:
pass
return self.opt_gens[opt_index]
def next_optimistic(self):
if self.enumerated or self.disabled:
return []
opt_gen = self._create_opt_generator(self.opt_index)
# TODO: how do I distinguish between real and not real verifications of things?
output_set = set()
opt_results = []
for output_list in opt_gen.history:
self._check_output_values(output_list)
for output_values in output_list:
call_index = len(opt_results)
output_objects = self.wrap_optimistic(output_values, call_index)
if output_objects not in output_set:
output_set.add(output_objects) # No point returning the exact thing here...
opt_results.append(self._Result(instance=self, output_objects=output_objects,
opt_index=self.opt_index, call_index=call_index, list_index=0))
return opt_results
def get_blocked_fact(self):
if self.external.is_fluent:
assert self._axiom_predicate is not None
return Fact(self._axiom_predicate, self.input_objects)
return Fact(self.external.blocked_predicate, self.input_objects)
def _disable_fluent(self, evaluations, domain):
assert self.external.is_fluent
if self.successful or (self._axiom_predicate is not None):
return
self.disabled = True
index = len(self.external.disabled_instances)
self.external.disabled_instances.append(self)
self._axiom_predicate = '_ax{}-{}'.format(self.external.blocked_predicate, index)
add_fact(evaluations, self.get_blocked_fact(), result=INTERNAL_EVALUATION,
complexity=self.compute_complexity(evaluations))
# TODO: allow reporting back minimum unsatisfiable subset
static_fact = Fact(self._axiom_predicate, self.external.inputs)
preconditions = [static_fact] + list(self.fluent_facts)
derived_fact = Fact(self.external.blocked_predicate, self.external.inputs)
self._disabled_axiom = make_axiom(
parameters=self.external.inputs,
preconditions=preconditions,
derived=derived_fact)
domain.axioms.append(self._disabled_axiom)
def _disable_negated(self, evaluations):
assert self.external.is_negated
if self.successful:
return
self.disabled = True
add_fact(evaluations, self.get_blocked_fact(), result=INTERNAL_EVALUATION,
complexity=self.compute_complexity(evaluations))
def disable(self, evaluations, domain):
#assert not self.disabled
#super(StreamInstance, self).disable(evaluations, domain)
if self.external.is_fluent:
self._disable_fluent(evaluations, domain)
elif self.external.is_negated:
self._disable_negated(evaluations)
else:
self.disabled = True
def enable(self, evaluations, domain):
if not self.disabled:
return
#if self._disabled_axiom is not None:
# self.external.disabled_instances.remove(self)
# domain.axioms.remove(self._disabled_axiom)
# self._disabled_axiom = None
#super(StreamInstance, self).enable(evaluations, domain) # TODO: strange infinite loop bug if enabled?
evaluations.pop(evaluation_from_fact(self.get_blocked_fact()), None)
def remap_inputs(self, bindings):
# TODO: speed this procedure up
#if not any(o in bindings for o in self.get_all_input_objects()):
# return self
input_objects = apply_mapping(self.input_objects, bindings)
fluent_facts = [substitute_fact(f, bindings) for f in self.fluent_facts]
new_instance = self.external.get_instance(input_objects, fluent_facts=fluent_facts)
new_instance.opt_index = self.opt_index
return new_instance
def __repr__(self):
return '{}:{}->{}'.format(self.external.name, self.input_objects, self.external.outputs)
##################################################
class Stream(External):
_Instance = StreamInstance
def __init__(self, name, gen_fn, inputs, domain, outputs, certified, info=StreamInfo(), fluents=[]):
super(Stream, self).__init__(name, info, inputs, domain)
self.outputs = tuple(outputs)
self.certified = tuple(map(convert_constants, certified))
self.constants.update(a for i in certified for a in get_args(i) if not is_parameter(a))
self.fluents = fluents
#self.fluents = [] if (gen_fn in DEBUG_MODES) else fluents
for p, c in Counter(self.outputs).items():
if not is_parameter(p):
raise ValueError('Output [{}] for stream [{}] is not a parameter'.format(p, name))
if c != 1:
raise ValueError('Output [{}] for stream [{}] is not unique'.format(p, name))
for p in set(self.inputs) & set(self.outputs):
raise ValueError('Parameter [{}] for stream [{}] is both an input and output'.format(p, name))
certified_parameters = {a for i in certified for a in get_args(i) if is_parameter(a)}
for p in (certified_parameters - set(self.inputs + self.outputs)):
raise ValueError('Parameter [{}] for stream [{}] is not included within outputs'.format(p, name))
for p in (set(self.outputs) - certified_parameters):
print('Warning! Output [{}] for stream [{}] is not covered by a certified condition'.format(p, name))
# TODO: automatically switch to unique if only used once
self.gen_fn = gen_fn # DEBUG_MODES
if gen_fn == DEBUG:
self.gen_fn = get_debug_gen_fn(self, shared=False) # TODO: list of abstractions that is considered in turn
elif gen_fn == SHARED_DEBUG:
self.gen_fn = get_debug_gen_fn(self, shared=True)
assert callable(self.gen_fn)
self.opt_gen_fns = [PartialInputs(unique=True)]
if not self.is_test and not self.is_special and not \
(isinstance(self.info.opt_gen_fn, PartialInputs) and self.info.opt_gen_fn.unique):
self.opt_gen_fns.append(self.info.opt_gen_fn)
if NEGATIVE_BLOCKED:
self.blocked_predicate = '~{}{}'.format(self.name, NEGATIVE_SUFFIX) # Args are self.inputs
else:
self.blocked_predicate = '~{}'.format(self.name)
self.disabled_instances = [] # For tracking disabled axioms
self.stream_fact = Fact('_{}'.format(name), concatenate(inputs, outputs)) # TODO: just add to certified?
if self.is_negated:
if self.outputs:
raise ValueError('Negated streams cannot have outputs: {}'.format(self.outputs))
#assert len(self.certified) == 1 # TODO: is it okay to have more than one fact?
for certified in self.certified:
if not (set(self.inputs) <= set(get_args(certified))):
raise ValueError('Negated streams must have certified facts including all input parameters')
#def reset(self):
# super(Stream, self).reset()
# self.disabled_instances = []
@property
def num_opt_fns(self):
return len(self.opt_gen_fns) - 1
@property
def has_outputs(self):
return bool(self.outputs)
@property
def is_test(self):
return not self.has_outputs
@property
def is_fluent(self):
return bool(self.fluents)
@property
def is_negated(self):
return self.info.negate
@property
def is_function(self):
return False
def get_instance(self, input_objects, fluent_facts=frozenset()):
input_objects = tuple(input_objects)
fluent_facts = frozenset(fluent_facts)
assert all(isinstance(obj, Object) or isinstance(obj, OptimisticObject) for obj in input_objects)
key = (input_objects, fluent_facts)
if key not in self.instances:
self.instances[key] = self._Instance(self, input_objects, fluent_facts)
return self.instances[key]
def as_test_stream(self):
# TODO: method that converts a stream into a test stream (possibly from ss)
raise NotImplementedError()
def __repr__(self):
return '{}:{}->{}'.format(self.name, self.inputs, self.outputs)
##################################################
def create_equality_stream():
return Stream(name='equality', gen_fn=from_test(universe_test),
inputs=['?o'], domain=[('Object', '?o')],
outputs=[], certified=[('=', '?o', '?o')],
info=StreamInfo(eager=True), fluents=[])
def create_inequality_stream():
#from hsr_tamp.pddlstream.algorithms.downward import IDENTICAL
return Stream(name='inequality', gen_fn=from_test(lambda o1, o2: o1 != o2),
inputs=['?o1', '?o2'], domain=[('Object', '?o1'), ('Object', '?o2')],
outputs=[], certified=[('=', '?o1', '?o2')],
info=StreamInfo(eager=True), fluents=[])
##################################################
def parse_stream(lisp_list, stream_map, stream_info):
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {':stream', ':inputs', ':domain', ':fluents', ':outputs', ':certified'}
name = value_from_attribute[':stream']
domain = value_from_attribute.get(':domain', None)
# TODO: dnf_from_positive_formula(value_from_attribute.get(':domain', []))
if not (get_formula_operators(domain) <= {AND}):
# TODO: allow positive DNF
raise ValueError('Stream [{}] domain must be a conjunction'.format(name))
certified = value_from_attribute.get(':certified', None)
if not (get_formula_operators(certified) <= {AND}):
raise ValueError('Stream [{}] certified must be a conjunction'.format(name))
return Stream(name, get_procedure_fn(stream_map, name),
value_from_attribute.get(':inputs', []),
list_from_conjunction(domain),
value_from_attribute.get(':outputs', []),
list_from_conjunction(certified),
stream_info.get(name, StreamInfo()),
fluents=value_from_attribute.get(':fluents', []))
| 27,086 |
Python
| 46.68838 | 134 | 0.618401 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/function.py
|
import time
from hsr_tamp.pddlstream.language.conversion import substitute_expression, list_from_conjunction, str_from_head
from hsr_tamp.pddlstream.language.constants import Not, Equal, get_prefix, get_args, is_head, FunctionAction
from hsr_tamp.pddlstream.language.external import ExternalInfo, Result, Instance, External, DEBUG_MODES, get_procedure_fn
from hsr_tamp.pddlstream.utils import str_from_object, apply_mapping
# https://stackoverflow.com/questions/847936/how-can-i-find-the-number-of-arguments-of-a-python-function
#try:
# from inspect import getfullargspec as get_arg_spec
#except ImportError:
# from inspect import getargspec as get_arg_spec
#from inspect import getargspec as get_arg_spec
#from inspect import signature
##################################################
def add_opt_function(name, base_fn, stream_map, stream_info, constant=0., coefficient=1., **external_kwargs):
stream_fn = lambda *args, **kwargs: constant + coefficient*base_fn(*args, **kwargs)
stream_map[name] = stream_fn
opt_fn = lambda *args, **kwargs: constant
info = FunctionInfo(opt_fn=opt_fn, **external_kwargs)
stream_info[name] = info
return stream_map, stream_info
##################################################
class FunctionInfo(ExternalInfo):
_default_eager = True
def __init__(self, opt_fn=None, eager=_default_eager, verbose=True, **kwargs): # Setting eager=True as a heuristic
super(FunctionInfo, self).__init__(eager=eager, **kwargs)
self.opt_fn = opt_fn
self.verbose = verbose # TODO: move to ExternalInfo
#self.order = 0
class FunctionResult(Result):
def __init__(self, instance, value, optimistic=True):
super(FunctionResult, self).__init__(instance, opt_index=0, call_index=0, optimistic=optimistic)
self.instance = instance
self.value = value
self._certified = None
# TODO: could add empty output_objects tuple
@property
def certified(self):
if self._certified is None:
self._certified = [Equal(self.instance.head, self.value)]
return self._certified
def get_certified(self):
return self.certified
def get_action(self):
return FunctionAction(self.name, self.input_objects)
def remap_inputs(self, bindings):
#if not any(o in bindings for o in self.instance.get_all_input_objects()):
# return self
input_objects = apply_mapping(self.instance.input_objects, bindings)
new_instance = self.external.get_instance(input_objects)
return self.__class__(new_instance, self.value, self.optimistic)
def is_successful(self):
return True
def __repr__(self):
#from hsr_tamp.pddlstream.algorithms.downward import get_cost_scale
#value = math.log(self.value) # TODO: number of digits to display
return '{}={:.3f}'.format(str_from_head(self.instance.head), self.value)
class FunctionInstance(Instance):
_Result = FunctionResult
#_opt_value = 0
def __init__(self, external, input_objects):
super(FunctionInstance, self).__init__(external, input_objects)
self._head = None
@property
def head(self):
if self._head is None:
self._head = substitute_expression(self.external.head, self.mapping)
return self._head
@property
def value(self):
assert len(self.history) == 1
return self.history[0]
def _compute_output(self):
self.enumerated = True
self.num_calls += 1
if self.history:
return self.value
input_values = self.get_input_values()
value = self.external.fn(*input_values)
# TODO: cast the inputs and test whether still equal?
# if not (type(self.value) is self.external._codomain):
# if not isinstance(self.value, self.external.codomain):
if value < 0:
raise ValueError('Function [{}] produced a negative value [{}]'.format(self.external.name, value))
self.history.append(self.external.codomain(value))
return self.value
def next_results(self, verbose=False):
assert not self.enumerated
start_time = time.time()
start_history = len(self.history)
value = self._compute_output()
new_results = [self._Result(self, value, optimistic=False)]
new_facts = []
if (value is not False) and verbose:
# TODO: str(new_results[-1])
print('iter={}, outs={}) {}{}={:.3f}'.format(
self.get_iteration(), len(new_results), get_prefix(self.external.head),
str_from_object(self.get_input_values()), value))
if start_history <= len(self.history) - 1:
self.update_statistics(start_time, new_results)
self.successful |= any(r.is_successful() for r in new_results)
return new_results, new_facts
def next_optimistic(self):
if self.enumerated or self.disabled:
return []
# TODO: cache this value
opt_value = self.external.opt_fn(*self.get_input_values())
self.opt_results = [self._Result(self, opt_value, optimistic=True)]
return self.opt_results
def __repr__(self):
return '{}=?{}'.format(str_from_head(self.head), self.external.codomain.__name__)
class Function(External):
"""
An external nonnegative function F(i1, ..., ik) -> 0 <= int
External functions differ from streams in that their output isn't an object
"""
codomain = float # int | float
_Instance = FunctionInstance
#_default_p_success = 0.99 # 0.99 | 1 # Might be pruned using cost threshold
def __init__(self, head, fn, domain, info):
# TODO: function values that act as preconditions (cost must be below threshold)
if info is None:
# TODO: move the defaults to FunctionInfo in the event that an optimistic fn is specified
info = FunctionInfo() #p_success=self._default_p_success)
super(Function, self).__init__(get_prefix(head), info, get_args(head), domain)
self.head = head
opt_fn = lambda *args: self.codomain()
self.fn = opt_fn if (fn in DEBUG_MODES) else fn
#arg_spec = get_arg_spec(self.fn)
#if len(self.inputs) != len(arg_spec.args):
# raise TypeError('Function [{}] expects inputs {} but its procedure has inputs {}'.format(
# self.name, list(self.inputs), arg_spec.args))
self.opt_fn = opt_fn if (self.info.opt_fn is None) else self.info.opt_fn
self.num_opt_fns = 0 # TODO: support multiple opt_fns
@property
def function(self):
return get_prefix(self.head)
@property
def has_outputs(self):
return False
@property
def is_fluent(self):
return False
@property
def is_negated(self):
return False
@property
def is_function(self):
return True
@property
def is_cost(self):
return True
def __repr__(self):
return '{}=?{}'.format(str_from_head(self.head), self.codomain.__name__)
##################################################
class PredicateInfo(FunctionInfo):
_default_eager = False
class PredicateResult(FunctionResult):
def get_certified(self):
# TODO: cache these results
expression = self.instance.head
return [expression if self.value else Not(expression)]
def is_successful(self):
opt_value = self.external.opt_fn(*self.instance.get_input_values())
return self.value == opt_value
class PredicateInstance(FunctionInstance):
_Result = PredicateResult
#_opt_value = True # True | False | Predicate._codomain()
#def was_successful(self, results):
# #self.external.opt_fn(*input_values)
# return any(r.value for r in results)
class Predicate(Function):
"""
An external predicate P(i1, ..., ik) -> {False, True}
External predicates do not make the closed world assumption
"""
_Instance = PredicateInstance
codomain = bool
#def is_negative(self):
# return self._Instance._opt_value is False
def __init__(self, head, fn, domain, info):
if info is None:
info = PredicateInfo()
super(Predicate, self).__init__(head, fn, domain, info)
assert(self.info.opt_fn is None)
self.blocked_predicate = self.name
@property
def predicate(self):
return self.function
@property
def is_negated(self):
return True
@property
def is_cost(self):
return False
##################################################
def parse_common(lisp_list, stream_map, stream_info):
assert (2 <= len(lisp_list) <= 3)
head = tuple(lisp_list[1])
assert (is_head(head))
name = get_prefix(head)
fn = get_procedure_fn(stream_map, name)
domain = []
if len(lisp_list) == 3:
domain = list_from_conjunction(lisp_list[2])
info = stream_info.get(name, None)
return head, fn, domain, info
def parse_function(*args):
return Function(*parse_common(*args))
def parse_predicate(*args):
return Predicate(*parse_common(*args))
| 9,145 |
Python
| 38.593073 | 121 | 0.627337 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/temporal.py
|
from __future__ import print_function
#from os.path import expanduser
import os
import re
import subprocess
import time
import sys
import traceback
from collections import namedtuple
from hsr_tamp.pddlstream.algorithms.downward import TEMP_DIR, DOMAIN_INPUT, PROBLEM_INPUT, make_effects, \
parse_sequential_domain, get_conjunctive_parts, write_pddl, make_action, make_parameters, make_object, fd_from_fact, Domain, make_effects
from hsr_tamp.pddlstream.language.constants import DurativeAction, Fact, Not
from hsr_tamp.pddlstream.utils import INF, ensure_dir, write, user_input, safe_rm_dir, read, elapsed_time, find_unique, safe_zip
PLANNER = 'tfd' # tfd | tflap | optic | tpshe | cerberus
# tflap: no conditional effects, no derived predicates
# optic: no negative preconditions, no conditional effects, no goal derived predicates
# TODO: previously slow instantiation was due to a missing precondition on move
# TODO: installing coin broke FD compilation so I uninstalled it
# sudo apt-get install cmake coinor-libcbc-dev coinor-libclp-dev
# sudo apt-get install coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev doxygen libbz2-dev bison flex
# sudo apt-get install coinor-cbc
# sudo apt-get install apt-get -y install g++ make flex bison cmake doxygen coinor-clp coinor-libcbc-dev coinor-libclp-dev coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev libbz2-dev libgsl-dev libz-dev
# sudo apt-get install g++ make flex bison cmake doxygen coinor-clp coinor-libcbc-dev coinor-libclp-dev coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev libbz2-dev libgsl-dev libz-dev
# sudo apt-get remove coinor-libcbc-dev coinor-libclp-dev
# sudo apt-get remove coinor-libcoinutils-dev coinor-libosi-dev coinor-libcgl-dev
##################################################
# /home/caelan/Programs/VAL
ENV_VAR = 'TFD_PATH'
#TFD_PATH = '/home/caelan/Programs/tfd-src-0.4/downward'
#TFD_PATH = '/home/caelan/Programs/TemPorAl/src/src/TFD'
#TFD_PATH = '/home/caelan/Programs/TemPorAl/src/src/temporal-FD'
MAX_TIME = '{max_planner_time}'
PLAN_FILE = 'plan'
#TFD_TRANSLATE = os.path.join(TFD_PATH, 'downward/translate/') # TFD
# TODO: the search produces unsound plans when it prints the full state-space
# TODO: still occasionally does this with the current settings
TFD_OPTIONS = {
'a': False, # anytime search
't': MAX_TIME, # success timeout
'T': MAX_TIME, # failure timeout
'g': False, # greedy search
'l': True, # disable lazy evaluation (slow when using the makespan heuristic)
'v': True, # disable verbose
'y+Y': True, # CEA heuristic
'x+X': False, # makespan heuristic
'G': 'm', # g-value evaluation (m, c, t, w)
'Q': 'p', # queue (r, p, h)
'r': True, # reschedule # TODO: reschedule doesn't seem to work well with conditional effects
#'O': 1, # num ordered preferred ops, TFD doesn't support
#'C': 1, # num cheapest preferred ops, TFD doesn't support
#'E': 1000, # num expensive preferred ops
#'R': 1000, # num random preferred ops,
'e': True, # epsilon internally
'f': False, # epsilon externally
#'b': True, # reset after solution, TFD doesn't support
}
def create_planner(anytime=False, greedy=False, lazy=False, h_cea=False, h_makespan=False, reschedule=False):
planner = dict(TFD_OPTIONS)
planner.update({
'a': anytime, # anytime search
'g': greedy, # greedy search
'l': not lazy, # disable lazy evaluation (slow when using the makespan heuristic)
'y+Y': h_cea, # CEA heuristic
'x+X': h_makespan, # makespan heuristic
'r': reschedule, # reschedule
})
return planner
# https://github.com/caelan/TemporalFastDownward/blob/020da65a39d3f44c821cc2062d1006ccb0fcd7e5/downward/search/best_first_search.cc#L376
# best_first_search
# makespan seems to be computed using timestep plus longest action
def format_option(pair):
key, value = pair
if value is True:
return key
if value is False:
return None
return '{}+{}'.format(key, value)
# Contains universal conditions: 1
# Disabling rescheduling because of universal conditions in original task!
# TODO: convert finite quantifiers
# /home/caelan/Programs/VAL/validate /home/caelan/Programs/pddlstream/temp/domain.pddl /home/caelan/Programs/pddlstream/temp/problem.pddl /home/caelan/Programs/pddlstream/temp/plan
# Parameters just used in search (and split by +)
#TFD_COMMAND = 'plan.py n {} {} {}' # Default in plannerParameters.h
#TFD_COMMAND = 'plan.py y+Y+a+e+r+O+1+C+1+b {} {} {}' # Default in ./plan
#TFD_COMMAND = 'plan.py y+Y+e+O+1+C+1+b {} {} {}'
#TFD_COMMAND = 'plan.py +x+X+e+O+1+C+1+b+G+m+T+10+Q+p {} {} {}'
TFD_COMMAND = 'plan.py %s {} {} {}'
# TODO: TFD sometimes returns incorrect plans
# ./VAL/validate pddlstream/temp/domain.pddl pddlstream/temp/problem.pddl pddlstream/temp/plan
# Finds a plan and then retimes it
"""
Usage: search <option characters> (input read from stdin)
Options are:
a - enable anytime search (otherwise finish on first plan found)
t <timeout secs> - total timeout in seconds for anytime search (when plan found)
T <timeout secs> - total timeout in seconds for anytime search (when no plan found)
m <monitor file> - monitor plan, validate a given plan
g - perform greedy search (follow heuristic)
l - disable lazy evaluation (Lazy = use parent's f instead of child's)
v - disable verbose printouts
y - cyclic cg CEA heuristic
Y - cyclic cg CEA heuristic - preferred operators
x - cyclic cg makespan heuristic
X - cyclic cg makespan heuristic - preferred operators
G [m|c|t|w] - G value evaluation, one of m - makespan, c - pathcost, t - timestamp, w [weight] - weighted / Note: One of those has to be set!
Q [r|p|h] - queue mode, one of r - round robin, p - priority, h - hierarchical
K - use tss known filtering (might crop search space)!
n - no_heuristic
r - reschedule_plans
O [n] - prefOpsOrderedMode, with n being the number of pref ops used
C [n] - prefOpsCheapestMode, with n being the number of pref ops used
E [n] - prefOpsMostExpensiveMode, with n being the number of pref ops used
e - epsilonize internally
f - epsilonize externally
p <plan file> - plan filename prefix
M v - monitoring: verify timestamps
u - do not use cachin in heuristic
"""
# b - reset_after_solution_was_found
# p - plan_name
# i - reward_only_pref_op_queue
# S - pref_ops_concurrent_mode
# R - number_pref_ops_rand_mode
# K use_known_by_logical_state_only=True
# Default parameters (plan.py n {} {} {})
"""
Planner Paramters:
Anytime Search: Disabled
Timeout if plan was found: 0 seconds (no timeout)
Timeout while no plan was found: 0 seconds (no timeout)
Greedy Search: Disabled
Verbose: Enabled
Lazy Heuristic Evaluation: Enabled
Use caching in heuristic.
Cyclic CG heuristic: Disabled Preferred Operators: Disabled
Makespan heuristic: Disabled Preferred Operators: Disabled
No Heuristic: Enabled
Cg Heuristic Zero Cost Waiting Transitions: Enabled
Cg Heuristic Fire Waiting Transitions Only If Local Problems Matches State: Disabled
PrefOpsOrderedMode: Disabled with 1000 goals
PrefOpsCheapestMode: Disabled with 1000 goals
PrefOpsMostExpensiveMode: Disabled with 1000 goals
PrefOpsRandMode: Disabled with 1000 goals
PrefOpsConcurrentMode: Disabled
Reset after solution was found: Disabled
Reward only preferred operators queue: Disabled
GValues by: Timestamp
Queue management mode: Priority based
Known by logical state only filtering: Disabled
use_subgoals_to_break_makespan_ties: Disabled
Reschedule plans: Disabled
Epsilonize internally: Disabled
Epsilonize externally: Disabled
Keep original plans: Enabled
Plan name: "/home/caelan/Programs/pddlstream/temp/plan"
Plan monitor file: "" (no monitoring)
Monitoring verify timestamps: Disabled
"""
# plannerParameters.h
"""
anytime_search = false;
timeout_while_no_plan_found = 0;
timeout_if_plan_found = 0;
greedy = false;
lazy_evaluation = true;
verbose = true;
insert_let_time_pass_only_when_running_operators_not_empty = false;
cyclic_cg_heuristic = false;
cyclic_cg_preferred_operators = false;
makespan_heuristic = false;
makespan_heuristic_preferred_operators = false;
no_heuristic = false;
cg_heuristic_zero_cost_waiting_transitions = true;
cg_heuristic_fire_waiting_transitions_only_if_local_problems_matches_state = false;
use_caching_in_heuristic = true;
g_values = GTimestamp;
g_weight = 0.5;
queueManagementMode = BestFirstSearchEngine::PRIORITY_BASED;
use_known_by_logical_state_only = false;
use_subgoals_to_break_makespan_ties = false;
reschedule_plans = false;
epsilonize_internally = false;
epsilonize_externally = false;
keep_original_plans = true;
pref_ops_ordered_mode = false;
pref_ops_cheapest_mode = false;
pref_ops_most_expensive_mode = false;
pref_ops_rand_mode = false;
pref_ops_concurrent_mode = false;
number_pref_ops_ordered_mode = 1000;
number_pref_ops_cheapest_mode = 1000;
number_pref_ops_most_expensive_mode = 1000;
number_pref_ops_rand_mode = 1000;
reset_after_solution_was_found = false;
reward_only_pref_op_queue = false;
plan_name = "sas_plan";
planMonitorFileName = "";
monitoring_verify_timestamps = false;
"""
##################################################
TFLAP_PATH = '/home/caelan/Programs/tflap/src'
# Usage: tflap <domain_file> <problem_file> <output_file> [-ground] [-static] [-mutex] [-trace]
# -ground: generates the GroundedDomain.pddl and GroundedProblem.pddl files.
# -static: keeps the static data in the planning task.
# -nsas: does not make translation to SAS (finite-domain variables).
# -mutex: generates the mutex.txt file with the list of static mutex facts.
# -trace: generates the trace.txt file with the search tree.
TFLAP_COMMAND = 'tflap {} {} {}'
#TFLAP_COMMAND = 'tflap {} {} {} -trace' # Seems to repeatedly fail
##################################################
OPTIC_PATH = '/home/caelan/Programs/optic2018/src/optic/src/optic'
OPTIC_COMMAND = 'optic-clp -N {} {} | tee {}'
"""
Usage: optic/src/optic/optic-clp [OPTIONS] domainfile problemfile [planfile, if -r specified]
Options are:
-N Don't optimise solution quality (ignores preferences and costs);
-0 Abstract out timed initial literals that represent recurrent windows;
-n<lim> Optimise solution quality, capping cost at <lim>;
-citation Display citation to relevant papers;
-b Disable best-first search - if EHC fails, abort;
-E Skip EHC: go straight to best-first search;
-e Use standard EHC instead of steepest descent;
-h Disable helpful-action pruning;
-k Disable compression-safe action detection;
-c Enable the tie-breaking in RPG that favour actions that slot into the partial order earlier;
-S Sort initial layer facts in RPG by availability order (only use if using -c);
-m Disable the tie-breaking in search that favours plans with shorter makespans;
-F Full FF helpful actions (rather than just those in the RP applicable in the current state);
-r Read in a plan instead of planning;
-T Rather than building a partial order, build a total-order
-v<n> Verbose to degree n (n defaults to 1 if not specified).
-L<n> LP verbose to degree n (n defaults to 1 if not specified).
"""
"""
Unfortunately, at present, the planner does not fully support ADL
unless in the rules for derived predicates. Only two aspects of
ADL can be used in action definitions:
- forall conditions, containing a simple conjunct of propositional and
numeric facts;
- Conditional (when... ) effects, and then only with numeric conditions
and numeric consequences on values which do not appear in the
preconditions of actions.
"""
##################################################
# TODO: tpshe seems to be broken
"""
usage: plan.py [-h] [--generator GENERATOR] [--time TIME] [--memory MEMORY]
[--iterated] [--no-iterated] [--plan-file PLANFILE]
[--validate] [--no-validate]
planner domain problem
"""
TPSHE_PATH = '/home/caelan/Programs/temporal-planning/'
#TPSHE_COMMAND = 'python {}bin/plan.py she {} {} --time {} --no-iterated'
TPSHE_COMMAND = 'bin/plan.py she {} {} --iterated'
#TPSHE_COMMAND = 'python {}bin/plan.py she {} {} --time {}'
#TPSHE_COMMAND = 'python {}bin/plan.py tempo-3 {} {} --time {}'
#TPSHE_COMMAND = 'python {}bin/plan.py stp-3 {} {} --time {}'
#temp_path = '.'
TPSHE_OUTPUT_PATH = 'tmp_sas_plan'
##################################################
CERB_PATH = '/home/caelan/Programs/cerberus'
#CERB_PATH = '/home/caelan/Programs/pddlstream/FastDownward'
#CERB_COMMAND = 'fast-downward.py {} {}'
CERB_COMMAND = 'plan.py {} {} {}'
# https://ipc2018-classical.bitbucket.io/planner-abstracts/teams_15_16.pdf
##################################################
def parse_temporal_solution(solution):
makespan = 0.0
plan = []
# TODO: this regex doesn't work for @
regex = r'(\d+.\d+):\s+' \
r'\(\s*(\w+(?: \S+)*)\s*\)\s+' \
r'\[(\d+.\d+)\]'
for start, action, duration in re.findall(regex, solution):
entries = action.lower().split(' ')
action = DurativeAction(entries[0], tuple(entries[1:]), float(start), float(duration))
plan.append(action)
makespan = max(action.start + action.duration, makespan)
return plan, makespan
def parse_plans(temp_path, plan_files):
best_plan, best_makespan = None, INF
for plan_file in plan_files:
solution = read(os.path.join(temp_path, plan_file))
plan, makespan = parse_temporal_solution(solution)
if makespan < best_makespan:
best_plan, best_makespan = plan, makespan
return best_plan, best_makespan
##################################################
def get_end(action):
return action.start + action.duration
def compute_start(plan):
if not plan:
return 0.
return min(action.start for action in plan)
def compute_end(plan):
if not plan:
return 0.
return max(map(get_end, plan))
def compute_duration(plan):
return compute_end(plan) - compute_start(plan)
def apply_start(plan, new_start):
if not plan:
return plan
old_start = compute_start(plan)
delta_start = new_start - old_start
return [DurativeAction(name, args, start + delta_start, duration)
for name, args, start, duration in plan]
def retime_plan(plan, duration=1):
if plan is None:
return plan
# TODO: duration per action
return [DurativeAction(name, args, i * duration, duration)
for i, (name, args) in enumerate(plan)]
def reverse_plan(plan):
if plan is None:
return None
makespan = compute_duration(plan)
return [DurativeAction(action.name, action.args, makespan - get_end(action), action.duration)
for action in plan]
##################################################
TemporalDomain = namedtuple('TemporalDomain', ['name', 'requirements', 'types', 'constants',
'predicates', 'functions', 'actions', 'durative_actions', 'axioms'])
# TODO: rename SimplifiedDomain
SimplifiedDomain = namedtuple('SimplifiedDomain', ['name', 'requirements', 'types', 'type_dict', 'constants',
'predicates', 'predicate_dict', 'functions', 'actions', 'axioms',
'durative_actions', 'pddl'])
def get_tfd_path():
if ENV_VAR not in os.environ:
raise RuntimeError('Environment variable {} is not defined!'.format(ENV_VAR))
return os.path.join(os.environ[ENV_VAR], 'downward/')
def parse_temporal_domain(domain_pddl):
translate_path = os.path.join(get_tfd_path(), 'translate/') # tfd & temporal-FD
prefixes = ['pddl', 'normalize']
deleted = delete_imports(prefixes)
sys.path.insert(0, translate_path)
import pddl
import normalize
temporal_domain = TemporalDomain(*pddl.tasks.parse_domain(pddl.parser.parse_nested_list(domain_pddl.splitlines())))
name, requirements, constants, predicates, types, functions, actions, durative_actions, axioms = temporal_domain
fluents = normalize.get_fluent_predicates(temporal_domain)
sys.path.remove(translate_path)
delete_imports(prefixes)
sys.modules.update(deleted) # This is important otherwise classes are messed up
import pddl
import pddl_parser
assert not actions
simple_from_durative = simple_from_durative_action(durative_actions, fluents)
simple_actions = [action for triplet in simple_from_durative.values() for action in triplet]
requirements = pddl.Requirements([])
types = [pddl.Type(ty.name, ty.basetype_name) for ty in types]
pddl_parser.parsing_functions.set_supertypes(types)
predicates = [pddl.Predicate(p.name, p.arguments) for p in predicates]
constants = convert_parameters(constants)
axioms = list(map(convert_axiom, axioms))
return SimplifiedDomain(name, requirements, types, {ty.name: ty for ty in types}, constants,
predicates, {p.name: p for p in predicates}, functions,
simple_actions, axioms, simple_from_durative, domain_pddl)
DURATIVE_ACTIONS = ':durative-actions'
def parse_domain(domain_pddl):
try:
return parse_sequential_domain(domain_pddl)
except AssertionError as e:
if str(e) == DURATIVE_ACTIONS:
return parse_temporal_domain(domain_pddl)
raise e
##################################################
def delete_imports(prefixes=['pddl']):
deleted = {}
for name in list(sys.modules):
if not name.startswith('pddlstream') and any(name.startswith(prefix) for prefix in prefixes):
deleted[name] = sys.modules.pop(name)
return deleted
#def simple_action_stuff(name, parameters, condition, effects):
# import pddl
# parameters = [pddl.TypedObject(param.name, param.type) for param in parameters]
# return pddl.Action(name, parameters, len(parameters), condition, effects, None)
def convert_args(args):
return [var.name for var in args]
def convert_condition(condition):
import pddl
class_name = condition.__class__.__name__
# TODO: compare class_name to the pddl class name
if class_name in ('Truth', 'FunctionComparison'):
# TODO: currently ignoring numeric conditions
return pddl.Truth()
elif class_name == 'Atom':
return pddl.Atom(condition.predicate, convert_args(condition.args))
elif class_name == 'NegatedAtom':
return pddl.NegatedAtom(condition.predicate, convert_args(condition.args))
elif class_name == 'Conjunction':
return pddl.conditions.Conjunction(list(map(convert_condition, condition.parts)))
elif class_name == 'Disjunction':
return pddl.Disjunction(list(map(convert_condition, condition.parts)))
elif class_name == 'ExistentialCondition':
return pddl.ExistentialCondition(convert_parameters(condition.parameters),
list(map(convert_condition, condition.parts)))
elif class_name == 'UniversalCondition':
return pddl.UniversalCondition(convert_parameters(condition.parameters),
list(map(convert_condition, condition.parts)))
raise NotImplementedError(class_name)
def convert_effects(effects):
import pddl
new_effects = make_effects([('_noop',)]) # To ensure the action has at least one effect
for effect in effects:
class_name = effect.__class__.__name__
if class_name == 'Effect':
peffect_name = effect.peffect.__class__.__name__
if peffect_name in ('Increase', 'Decrease'):
# TODO: currently ignoring numeric conditions
continue
new_effects.append(pddl.Effect(convert_parameters(effect.parameters),
pddl.Conjunction(list(map(convert_condition, effect.condition))).simplified(),
convert_condition(effect.peffect)))
else:
raise NotImplementedError(class_name)
return new_effects
def convert_axiom(axiom):
import pddl
parameters = convert_parameters(axiom.parameters)
return pddl.Axiom(axiom.name, parameters, len(parameters),
convert_condition(axiom.condition).simplified())
def convert_parameters(parameters):
import pddl
return [pddl.TypedObject(param.name, param.type) for param in parameters]
SIMPLE_TEMPLATE = '{}-{}'
def expand_condition(condition):
import pddl
return [part for part in get_conjunctive_parts(convert_condition(condition).simplified())
if not isinstance(part, pddl.Truth)]
def convert_durative(durative_actions, fluents):
# TODO: if static, apply as a condition to all
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
import pddl
wait_action = make_action(
name='wait',
parameters=['?t1', '?t2'],
preconditions=[
('time', '?t1'), ('time', '?t2'),
('attime', '?t1'),
#('CanMove',),
],
effects=[
('attime', '?t2'),
Not(('attime', '?t2')),
#Not(('CanMove',)),
],
#cost=None,
)
#asdf = Fact('sum', ['?t1', '?t2'])
# TODO: need to connect the function
actions = [wait_action]
for action in durative_actions:
#print(type(action.duration))
static_condition = pddl.Conjunction(list({
part for condition in action.condition for part in get_conjunctive_parts(convert_condition(condition).simplified())
if not isinstance(part, pddl.Truth) and not (get_predicates(part) & fluents)}))
parameters = convert_parameters(action.parameters)
#start_cond, over_cond, end_cond = list(map(expand_condition, action.condition))
start_cond, over_cond, end_cond = list(map(convert_condition, action.condition))
#assert not over_cond
start_effects, end_effects = list(map(convert_effects, action.effects))
#start_effects, end_effects = action.effects
durative_predicate = 'durative-{}'.format(action.name)
fact = Fact(durative_predicate, ['?t2'] + [p.name for p in parameters])
start_parameters = [make_object(t) for t in ['?t1', '?dt', '?t2']] + parameters
start_action = pddl.Action('start-{}'.format(action.name), start_parameters, len(start_parameters),
pddl.Conjunction([pddl.Atom('sum', ['?t1', '?dt', '?t2']), pddl.Atom('attime', ['?t1']),
static_condition, start_cond, over_cond]).simplified(),
make_effects([fact]) + start_effects, None) # static_condition
# TODO: case matters
end_parameters = [make_object('?t2')] + parameters
end_action = pddl.Action('stop-{}'.format(action.name), end_parameters, len(end_parameters),
pddl.Conjunction([pddl.Atom('time', ['?t2']), pddl.Atom('attime', ['?t2']),
fd_from_fact(fact), static_condition, end_cond, over_cond]).simplified(),
make_effects([Not(fact)]) + end_effects, None) # static_condition
actions.extend([start_action, end_action])
for action in actions:
action.dump()
return actions
def simple_from_durative_action(durative_actions, fluents):
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
import pddl
simple_actions = {}
for action in durative_actions:
parameters = convert_parameters(action.parameters)
conditions = list(map(convert_condition, action.condition))
start_effects, end_effects = action.effects
over_effects = []
effects = list(map(convert_effects, [start_effects, over_effects, end_effects]))
static_condition = pddl.Conjunction(list({
part for condition in conditions for part in get_conjunctive_parts(condition.simplified())
if not isinstance(part, pddl.Truth) and not (get_predicates(part) & fluents)}))
# TODO: deal with case where there are fluents
actions = []
for i, (condition, effect) in enumerate(safe_zip(conditions, effects)):
# TODO: extract the durations by pretending they are action costs
actions.append(pddl.Action(SIMPLE_TEMPLATE.format(action.name, i), parameters, len(parameters),
pddl.Conjunction([static_condition, condition]).simplified(), effect, None))
#actions[-1].dump()
simple_actions[action] = actions
return simple_actions
def sequential_from_temporal_plan(plan):
if plan is None:
return plan
over_actions = []
state_changes = [DurativeAction(None, [], 0, 0)]
for durative_action in plan:
args = durative_action.args
start, end = durative_action.start, get_end(durative_action)
start_action, over_action, end_action = [SIMPLE_TEMPLATE.format(durative_action.name, i) for i in range(3)]
state_changes.append(DurativeAction(start_action, args, start, end - start))
#state_changes.append(DurativeAction(start_action, args, start, 0))
over_actions.append(DurativeAction(over_action, args, start, end - start))
state_changes.append(DurativeAction(end_action, args, end, 0))
state_changes = sorted(state_changes, key=lambda a: a.start)
sequence = []
for i in range(1, len(state_changes)):
# Technically should check the state change points as well
start_action = state_changes[i-1]
end_action = state_changes[i]
for over_action in over_actions:
if (over_action.start < end_action.start) and (start_action.start < get_end(over_action)): # Exclusive
sequence.append(over_action)
sequence.append(end_action)
return sequence
##################################################
def solve_tfd(domain_pddl, problem_pddl, planner=TFD_OPTIONS, max_planner_time=60, debug=False, **kwargs):
if PLANNER == 'tfd':
root = get_tfd_path()
# TODO: make a function for this
args = '+'.join(sorted(filter(lambda s: s is not None, map(format_option, planner.items()))))
template = TFD_COMMAND % args.format(max_planner_time=max_planner_time)
elif PLANNER == 'cerberus':
root, template = CERB_PATH, CERB_COMMAND
elif PLANNER == 'tflap':
root, template = TFLAP_PATH, TFLAP_COMMAND
elif PLANNER == 'optic':
root, template = OPTIC_PATH, OPTIC_COMMAND
elif PLANNER == 'tpshe':
root, template = TPSHE_PATH, TPSHE_COMMAND
else:
raise ValueError(PLANNER)
start_time = time.time()
domain_path, problem_path = write_pddl(domain_pddl, problem_pddl)
plan_path = os.path.join(TEMP_DIR, PLAN_FILE)
#assert not actions, "There shouldn't be any actions - just temporal actions"
paths = [os.path.join(os.getcwd(), p) for p in (domain_path, problem_path, plan_path)]
command = os.path.join(root, template.format(*paths))
print(command)
if debug:
stdout, stderr = None, None
else:
stdout, stderr = open(os.devnull, 'w'), open(os.devnull, 'w')
proc = subprocess.call(command, shell=True, cwd=root, stdout=stdout, stderr=stderr) # timeout=None (python3)
error = proc != 0
print('Error:', error)
# TODO: returns an error when no plan was found
# TODO: close any opened resources
temp_path = os.path.join(os.getcwd(), TEMP_DIR)
plan_files = sorted(f for f in os.listdir(temp_path) if f.startswith(PLAN_FILE))
print('Plans:', plan_files)
best_plan, best_makespan = parse_plans(temp_path, plan_files)
#if not debug:
# safe_rm_dir(TEMP_DIR)
print('Makespan: ', best_makespan)
print('Time:', elapsed_time(start_time))
sequential_plan = sequential_from_temporal_plan(best_plan)
return sequential_plan, best_makespan
| 28,081 |
Python
| 40.850969 | 213 | 0.663153 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/constants.py
|
from __future__ import print_function
import os
from collections import namedtuple
from hsr_tamp.pddlstream.utils import INF, str_from_object, read
EQ = '=' # xnor
AND = 'and'
OR = 'or'
NOT = 'not'
EXISTS = 'exists'
FORALL = 'forall'
WHEN = 'when'
IMPLY = 'imply'
MINIMIZE = 'minimize'
MAXIMIZE = 'maximize'
INCREASE = 'increase'
PARAMETER = '?'
TYPE = '-'
OBJECT = 'object'
TOTAL_COST = 'total-cost' # TotalCost
TOTAL_TIME = 'total-time'
CONNECTIVES = (AND, OR, NOT, IMPLY)
QUANTIFIERS = (FORALL, EXISTS)
OBJECTIVES = (MINIMIZE, MAXIMIZE, INCREASE)
OPERATORS = CONNECTIVES + QUANTIFIERS + (WHEN,) # + OBJECTIVES
# TODO: OPTIMAL
SUCCEEDED = True
FAILED = None
INFEASIBLE = False
NOT_PLAN = [FAILED, INFEASIBLE]
# TODO: rename PDDLProblem
PDDLProblem = namedtuple('PDDLProblem', ['domain_pddl', 'constant_map',
'stream_pddl', 'stream_map', 'init', 'goal'])
Solution = namedtuple('Solution', ['plan', 'cost', 'certificate'])
Certificate = namedtuple('Certificate', ['all_facts', 'preimage_facts'])
OptPlan = namedtuple('OptPlan', ['action_plan', 'preimage_facts'])
# TODO: stream and axiom plans
# TODO: annotate which step each fact is first used via layer
Assignment = namedtuple('Assignment', ['args'])
Action = namedtuple('Action', ['name', 'args'])
DurativeAction = namedtuple('DurativeAction', ['name', 'args', 'start', 'duration'])
StreamAction = namedtuple('StreamAction', ['name', 'inputs', 'outputs'])
FunctionAction = namedtuple('FunctionAction', ['name', 'inputs'])
Head = namedtuple('Head', ['function', 'args'])
Evaluation = namedtuple('Evaluation', ['head', 'value'])
Atom = lambda head: Evaluation(head, True)
NegatedAtom = lambda head: Evaluation(head, False)
##################################################
def Output(*args):
return tuple(args)
def And(*expressions):
if len(expressions) == 1:
return expressions[0]
return (AND,) + tuple(expressions)
def Or(*expressions):
if len(expressions) == 1:
return expressions[0]
return (OR,) + tuple(expressions)
def Not(expression):
return (NOT, expression)
def Imply(expression1, expression2):
return (IMPLY, expression1, expression2)
def Equal(expression1, expression2):
return (EQ, expression1, expression2)
def Minimize(expression):
return (MINIMIZE, expression)
def Type(param, ty):
return (param, TYPE, ty)
def Exists(args, expression):
return (EXISTS, args, expression)
def ForAll(args, expression):
return (FORALL, args, expression)
##################################################
def get_prefix(expression):
return expression[0]
def get_args(head):
return head[1:]
def concatenate(*args):
output = []
for arg in args:
output.extend(arg)
return tuple(output)
def Fact(predicate, args):
return (predicate,) + tuple(args)
def is_parameter(expression):
return isinstance(expression, str) and expression.startswith(PARAMETER)
def get_parameter_name(expression):
if is_parameter(expression):
return expression[len(PARAMETER):]
return expression
def is_head(expression):
return get_prefix(expression) not in OPERATORS
##################################################
def is_plan(plan):
return not any(plan is status for status in NOT_PLAN)
def get_length(plan):
return len(plan) if is_plan(plan) else INF
def str_from_action(action):
name, args = action[:2]
return '{}{}'.format(name, str_from_object(tuple(args)))
def str_from_plan(plan):
if not is_plan(plan):
return str(plan)
return str_from_object(list(map(str_from_action, plan)))
def print_plan(plan):
if not is_plan(plan):
return
step = 1
for action in plan:
if isinstance(action, DurativeAction):
name, args, start, duration = action
print('{:.2f} - {:.2f}) {} {}'.format(start, start+duration, name,
' '.join(map(str_from_object, args))))
elif isinstance(action, Action):
name, args = action
print('{:2}) {} {}'.format(step, name, ' '.join(map(str_from_object, args))))
#print('{}) {}{}'.format(step, name, str_from_object(tuple(args))))
step += 1
elif isinstance(action, StreamAction):
name, inputs, outputs = action
print(' {}({})->({})'.format(name, ', '.join(map(str_from_object, inputs)),
', '.join(map(str_from_object, outputs))))
elif isinstance(action, FunctionAction):
name, inputs = action
print(' {}({})'.format(name, ', '.join(map(str_from_object, inputs))))
else:
raise NotImplementedError(action)
def print_solution(solution):
plan, cost, evaluations = solution
solved = is_plan(plan)
if plan is None:
num_deferred = 0
else:
num_deferred = len([action for action in plan if isinstance(action, StreamAction)
or isinstance(action, FunctionAction)])
print()
print('Solved: {}'.format(solved))
print('Cost: {:.3f}'.format(cost))
print('Length: {}'.format(get_length(plan) - num_deferred))
print('Deferred: {}'.format(num_deferred))
print('Evaluations: {}'.format(len(evaluations)))
print_plan(plan)
def get_function(term):
if get_prefix(term) in (EQ, MINIMIZE, NOT):
return term[1]
return term
def partition_facts(facts):
functions = []
negated = []
positive = []
for fact in facts:
prefix = get_prefix(fact)
func = get_function(fact)
if prefix in (EQ, MINIMIZE):
functions.append(func)
elif prefix == NOT:
negated.append(func)
else:
positive.append(func)
return positive, negated, functions
def is_cost(o):
return get_prefix(o) == MINIMIZE
def get_costs(objectives):
return [o for o in objectives if is_cost(o)]
def get_constraints(objectives):
return [o for o in objectives if not is_cost(o)]
##################################################
DOMAIN_FILE = 'domain.pddl'
PROBLEM_FILE = 'problem.pddl'
STREAM_FILE = 'stream.pddl'
PDDL_FILES = [DOMAIN_FILE, PROBLEM_FILE]
PDDLSTREAM_FILES = [DOMAIN_FILE, STREAM_FILE]
def read_relative(file, relative_path): # file=__file__
directory = os.path.dirname(file)
path = os.path.abspath(os.path.join(directory, relative_path))
return read(os.path.join(directory, path))
def read_relative_dir(file, relative_dir='./', filenames=[]):
return [read_relative(file, os.path.join(relative_dir, filename)) for filename in filenames]
def read_pddl_pair(file, **kwargs):
return read_relative_dir(file, filenames=PDDL_FILES, **kwargs)
def read_pddlstream_pair(file, **kwargs):
return read_relative_dir(file, filenames=PDDLSTREAM_FILES, **kwargs)
| 6,912 |
Python
| 25.898833 | 96 | 0.61849 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/exogenous.py
|
from collections import defaultdict
from itertools import count
from hsr_tamp.pddlstream.algorithms.common import add_fact, INTERNAL_EVALUATION
from hsr_tamp.pddlstream.algorithms.downward import make_predicate, add_predicate, make_action, make_axiom, get_fluents
from hsr_tamp.pddlstream.language.constants import Head, Evaluation, get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, \
is_atom, fact_from_evaluation, substitute_expression, objects_from_values
from hsr_tamp.pddlstream.language.external import get_domain_predicates
from hsr_tamp.pddlstream.language.generator import from_fn
from hsr_tamp.pddlstream.language.stream import Stream
EXOGENOUS_AXIOMS = True
REPLACE_STREAM = True
# TODO: timed initial literals
# TODO: can do this whole story within the focused algorithm as well
class FutureValue(object):
# TODO: use this instead of debug value?
_output_counts = defaultdict(count)
def __init__(self, stream, input_values, output_parameter):
self.stream = stream
self.input_values = input_values
self.output_parameter = output_parameter
self.index = next(self._output_counts[output_parameter])
# TODO: hash this?
def __repr__(self):
return '@{}{}'.format(self.output_parameter[1:], self.index)
class FutureStream(Stream):
def __init__(self, stream, static_domain, fluent_domain, static_certified):
prefix = 'future-' if REPLACE_STREAM else ''
stream_name = '{}{}'.format(prefix, stream.name)
self.original = stream
self.fluent_domain = tuple(fluent_domain)
super(FutureStream, self).__init__(stream_name, stream.gen_fn, stream.inputs, static_domain,
stream.outputs, static_certified, stream.info, stream.fluents)
@property
def pddl_name(self):
return self.original.pddl_name
def get_fluent_domain(result):
# TODO: add to the stream itself
if not isinstance(result.external, FutureStream):
return tuple()
return substitute_expression(result.external.fluent_domain, result.mapping)
##################################################
def create_static_stream(stream, evaluations, fluent_predicates, future_fn):
def static_fn(*input_values):
instance = stream.get_instance(objects_from_values(input_values))
if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()):
return None
return tuple(FutureValue(stream.name, input_values, o) for o in stream.outputs)
#opt_evaluations = None
def static_opt_gen_fn(*input_values):
instance = stream.get_instance(objects_from_values(input_values))
if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()):
return
for output_values in stream.opt_gen_fn(*input_values):
yield output_values
# TODO: need to replace regular opt_gen_fn to update opt_evaluations
# if I want to prevent switch from normal to static in opt
# Focused algorithm naturally biases against using future because of axiom layers
fluent_domain = list(filter(lambda a: get_prefix(a) in fluent_predicates, stream.domain))
static_domain = list(filter(lambda a: a not in fluent_domain, stream.domain))
new_domain = list(map(future_fn, static_domain))
stream_atom = ('{}-result'.format(stream.name),) + tuple(stream.inputs + stream.outputs)
new_certified = [stream_atom] + list(map(future_fn, stream.certified))
static_stream = FutureStream(stream, new_domain, fluent_domain, new_certified)
if REPLACE_STREAM:
static_stream.gen_fn = from_fn(static_fn)
static_stream.opt_gen_fn = static_opt_gen_fn
return static_stream
# def replace_gen_fn(stream):
# future_gen_fn = from_fn(lambda *args: tuple(FutureValue(stream.name, args, o) for o in stream.outputs))
# gen_fn = stream.gen_fn
# def new_gen_fn(*input_values):
# if any(isinstance(value, FutureValue) for value in input_values):
# return future_gen_fn(*input_values)
# return gen_fn(*input_values)
# stream.gen_fn = new_gen_fn
##################################################
def augment_evaluations(evaluations, future_map):
for evaluation in list(filter(is_atom, evaluations)):
name = evaluation.head.function
if name in future_map:
new_head = Head(future_map[name], evaluation.head.args)
new_evaluation = Evaluation(new_head, evaluation.value)
add_fact(evaluations, fact_from_evaluation(new_evaluation),
result=INTERNAL_EVALUATION, complexity=0)
def rename_atom(atom, mapping):
name = get_prefix(atom)
if name not in mapping:
return atom
return (mapping[name],) + get_args(atom)
def compile_to_exogenous_actions(evaluations, domain, streams):
# TODO: version of this that operates on fluents of length one?
# TODO: better instantiation when have full parameters
fluent_predicates = get_fluents(domain)
certified_predicates = {get_prefix(a) for s in streams for a in s.certified}
future_map = {p: 'f-{}'.format(p) for p in certified_predicates}
augment_evaluations(evaluations, future_map)
future_fn = lambda a: rename_atom(a, future_map)
new_streams = []
for stream in list(streams):
if not isinstance(stream, Stream):
raise NotImplementedError(stream)
# TODO: could also just have conditions asserting that one of the fluent conditions fails
new_streams.append(create_static_stream(stream, evaluations, fluent_predicates, future_fn))
stream_atom = new_streams[-1].certified[0]
add_predicate(domain, make_predicate(get_prefix(stream_atom), get_args(stream_atom)))
preconditions = [stream_atom] + list(stream.domain)
effort = 1 # TODO: use stream info
#effort = 1 if unit_cost else result.instance.get_effort()
#if effort == INF:
# continue
domain.actions.append(make_action(
name='call-{}'.format(stream.name),
parameters=get_args(stream_atom),
preconditions=preconditions,
effects=stream.certified,
cost=effort))
stream.certified = tuple(set(stream.certified) |
set(map(future_fn, stream.certified)))
if REPLACE_STREAM:
streams.extend(new_streams)
else:
streams[:] = new_streams
##################################################
def get_exogenous_predicates(domain, streams):
return list(get_fluents(domain) & get_domain_predicates(streams))
def replace_literals(replace_fn, expression, *args):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return expression # TODO: replace constants?
if isinstance(expression, pddl.conditions.JunctorCondition):
new_parts = [replace_literals(replace_fn, p, *args) for p in expression.parts]
return expression.__class__(new_parts)
if isinstance(expression, pddl.conditions.QuantifiedCondition):
new_parts = [replace_literals(replace_fn, p, *args) for p in expression.parts]
return expression.__class__(expression.parameters, new_parts)
if isinstance(expression, pddl.conditions.Literal):
return replace_fn(expression, *args)
raise ValueError(expression)
def replace_predicates(predicate_map, expression):
def replace_fn(literal):
new_predicate = predicate_map.get(literal.predicate, literal.predicate)
return literal.__class__(new_predicate, literal.args)
return replace_literals(replace_fn, expression)
##################################################
def compile_to_exogenous_axioms(evaluations, domain, streams):
# TODO: no attribute certified
# TODO: recover the streams that are required
import pddl
fluent_predicates = get_fluents(domain)
certified_predicates = {get_prefix(a) for s in streams for a in s.certified}
future_map = {p: 'f-{}'.format(p) for p in certified_predicates}
augment_evaluations(evaluations, future_map)
future_fn = lambda a: rename_atom(a, future_map)
derived_map = {p: 'd-{}'.format(p) for p in certified_predicates}
derived_fn = lambda a: rename_atom(a, derived_map)
# TODO: could prune streams that don't need this treatment
for action in domain.actions:
action.precondition = replace_predicates(derived_map, action.precondition)
for effect in action.effects:
assert(isinstance(effect, pddl.Effect))
effect.condition = replace_predicates(derived_map, effect.condition)
for axiom in domain.axioms:
axiom.condition = replace_predicates(derived_map, axiom.condition)
#fluent_predicates.update(certified_predicates)
new_streams = []
for stream in list(streams):
if not isinstance(stream, Stream):
raise NotImplementedError(stream)
new_streams.append(create_static_stream(stream, evaluations, fluent_predicates, future_fn))
stream_atom = new_streams[-1].certified[0]
add_predicate(domain, make_predicate(get_prefix(stream_atom), get_args(stream_atom)))
preconditions = [stream_atom] + list(map(derived_fn, stream.domain))
for certified_fact in stream.certified:
derived_fact = derived_fn(certified_fact)
external_params = get_args(derived_fact)
internal_params = tuple(p for p in (stream.inputs + stream.outputs)
if p not in get_args(derived_fact))
domain.axioms.extend([
make_axiom(
parameters=external_params,
preconditions=[certified_fact],
derived=derived_fact),
make_axiom(
parameters=external_params+internal_params,
preconditions=preconditions,
derived=derived_fact),
])
stream.certified = tuple(set(stream.certified) |
set(map(future_fn, stream.certified)))
if REPLACE_STREAM:
streams.extend(new_streams)
else:
streams[:] = new_streams
##################################################
def compile_to_exogenous(evaluations, domain, streams):
exogenous_predicates = get_exogenous_predicates(domain, streams)
if not exogenous_predicates:
return False
print('Warning! The following predicates are mentioned in both action effects '
'and stream domain conditions: {}'.format(exogenous_predicates))
if EXOGENOUS_AXIOMS:
compile_to_exogenous_axioms(evaluations, domain, streams)
else:
compile_to_exogenous_actions(evaluations, domain, streams)
return True
| 10,844 |
Python
| 45.745689 | 119 | 0.658982 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/statistics.py
|
from __future__ import print_function
import os
import pickle
from collections import Counter, namedtuple
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.utils import INF, read_pickle, ensure_dir, write_pickle, get_python_version
LOAD_STATISTICS = True
SAVE_STATISTICS = True
DATA_DIR = 'statistics/py{:d}/'
DEFAULT_SEARCH_OVERHEAD = 1e2 # TODO: update this over time
EPSILON = 1e-6
# Can also include the overhead to process skeletons
Stats = namedtuple('Stats', ['p_success', 'overhead'])
# TODO: ability to "burn in" streams by sampling artificially to get better estimates
def safe_ratio(numerator, denominator, undefined=None):
if denominator == 0:
return undefined
return float(numerator) / denominator
def geometric_cost(cost, p):
return safe_ratio(cost, p, undefined=INF)
def check_effort(effort, max_effort):
if max_effort is None:
return True
return effort < max_effort # Exclusive
def compute_plan_effort(stream_plan, **kwargs):
# TODO: compute effort in the delete relaxation way
if not is_plan(stream_plan):
return INF
if not stream_plan:
return 0
return sum(result.get_effort(**kwargs) for result in stream_plan)
##################################################
# TODO: write to a "local" folder containing temp, data2, data3, visualizations
def get_data_path(stream_name):
data_dir = DATA_DIR.format(get_python_version())
file_name = '{}.pkl'.format(stream_name)
return os.path.join(data_dir, file_name)
def load_data(pddl_name):
if not LOAD_STATISTICS:
return {}
filename = get_data_path(pddl_name)
if not os.path.exists(filename):
return {}
#try:
data = read_pickle(filename) # TODO: try/except
#except pickle.UnpicklingError:
#return {}
#print('Loaded:', filename)
return data
def load_stream_statistics(externals):
if not externals:
return
pddl_name = externals[0].pddl_name # TODO: ensure the same
# TODO: fresh restart flag
data = load_data(pddl_name)
for external in externals:
if external.name in data:
external.load_statistics(data[external.name])
##################################################
def dump_online_statistics(externals):
print('\nLocal External Statistics')
overall_calls = 0
overall_overhead = 0
for external in externals:
external.dump_online()
overall_calls += external.online_calls
overall_overhead += external.online_overhead
print('Overall calls: {} | Overall overhead: {:.3f}'.format(overall_calls, overall_overhead))
def dump_total_statistics(externals):
print('\nTotal External Statistics')
for external in externals:
external.dump_total()
# , external.get_effort()) #, data[external.name])
##################################################
def merge_data(external, previous_data):
# TODO: compute distribution of successes given feasible
# TODO: can estimate probability of success given feasible
# TODO: single tail hypothesis testing (probability that came from this distribution)
distribution = []
for instance in external.instances.values():
if instance.results_history:
# attempts = len(instance.results_history)
# successes = sum(map(bool, instance.results_history))
# print(instance, successes, attempts)
# TODO: also first attempt, first success
last_success = -1
for i, results in enumerate(instance.results_history):
if results:
distribution.append(i - last_success)
# successful = (0 <= last_success)
last_success = i
combined_distribution = previous_data.get('distribution', []) + distribution
# print(external, distribution)
# print(external, Counter(combined_distribution))
# TODO: count num failures as well
# Alternatively, keep metrics on the lower bound and use somehow
# Could assume that it is some other distribution beyond that point
return {
'calls': external.total_calls,
'overhead': external.total_overhead,
'successes': external.total_successes,
'distribution': combined_distribution,
}
# TODO: make an instance method
def write_stream_statistics(externals, verbose):
# TODO: estimate conditional to affecting history on skeleton
# TODO: estimate conditional to first & attempt and success
# TODO: relate to success for the full future plan
# TODO: Maximum Likelihood Exponential - average (biased in general)
if not externals:
return
if verbose:
#dump_online_statistics(externals)
dump_total_statistics(externals)
pddl_name = externals[0].pddl_name # TODO: ensure the same
previous_data = load_data(pddl_name)
data = {}
for external in externals:
if not hasattr(external, 'instances'):
continue # TODO: SynthesizerStreams
#total_calls = 0 # TODO: compute these values
previous_statistics = previous_data.get(external.name, {})
data[external.name] = merge_data(external, previous_statistics)
if not SAVE_STATISTICS:
return
filename = get_data_path(pddl_name)
ensure_dir(filename)
write_pickle(filename, data)
if verbose:
print('Wrote:', filename)
##################################################
def hash_object(evaluations, obj):
# TODO: hash an object by the DAG of streams that produced it
# Use this to more finely estimate the parameters of a stream
# Can marginalize over conditional information to recover the same overall statistics
# Can also apply this directly to domain facts
raise NotImplementedError()
##################################################
class PerformanceInfo(object):
def __init__(self, p_success=1-EPSILON, overhead=EPSILON, effort=None, estimate=False):
# TODO: make info just a dict
self.estimate = estimate
if self.estimate:
p_success = overhead = effort = None
if p_success is not None:
assert 0. <= p_success <= 1.
if overhead is not None:
assert 0. <= overhead
#if effort is not None:
# assert 0 <= effort
self.p_success = p_success
self.overhead = overhead
self.effort = effort
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, repr(self.__dict__))
class Performance(object):
def __init__(self, name, info):
self.name = name.lower()
self.info = info
self.initial_calls = 0
self.initial_overhead = 0.
self.initial_successes = 0
# TODO: online learning vs offline learning
self.online_calls = 0
self.online_overhead = 0.
self.online_successes = 0
@property
def total_calls(self):
return self.initial_calls + self.online_calls
@property
def total_overhead(self):
return self.initial_overhead + self.online_overhead
@property
def total_successes(self):
return self.initial_successes + self.online_successes
def load_statistics(self, statistics):
self.initial_calls = statistics['calls']
self.initial_overhead = statistics['overhead']
self.initial_successes = statistics['successes']
def update_statistics(self, overhead, success):
self.online_calls += 1
self.online_overhead += overhead
self.online_successes += success
def _estimate_p_success(self, reg_p_success=1., reg_calls=1):
# TODO: use prior from info instead?
return safe_ratio(self.total_successes + reg_p_success * reg_calls,
self.total_calls + reg_calls,
undefined=reg_p_success)
def _estimate_overhead(self, reg_overhead=1e-6, reg_calls=1):
# TODO: use prior from info instead?
return safe_ratio(self.total_overhead + reg_overhead * reg_calls,
self.total_calls + reg_calls,
undefined=reg_overhead)
def get_p_success(self):
# TODO: could precompute and store
if self.info.p_success is None:
return self._estimate_p_success()
return self.info.p_success
def get_overhead(self):
if self.info.overhead is None:
return self._estimate_overhead()
return self.info.overhead
def could_succeed(self):
return self.get_p_success() > 0
def _estimate_effort(self, search_overhead=DEFAULT_SEARCH_OVERHEAD):
p_success = self.get_p_success()
return geometric_cost(self.get_overhead(), p_success) + \
(1 - p_success) * geometric_cost(search_overhead, p_success)
def get_effort(self, **kwargs):
if self.info.effort is None:
return self._estimate_effort(**kwargs)
elif callable(self.info.effort):
return 0 # This really is a bound on the effort
return self.info.effort
def get_statistics(self, negate=False): # negate=True is for the "worst-case" ordering
sign = -1 if negate else +1
return Stats(p_success=self.get_p_success(), overhead=sign * self.get_overhead())
def dump_total(self):
print('External: {} | n: {:d} | p_success: {:.3f} | overhead: {:.3f}'.format(
self.name, self.total_calls, self._estimate_p_success(), self._estimate_overhead()))
def dump_online(self):
if not self.online_calls:
return
print('External: {} | n: {:d} | p_success: {:.3f} | mean overhead: {:.3f} | overhead: {:.3f}'.format(
self.name, self.online_calls,
safe_ratio(self.online_successes, self.online_calls),
safe_ratio(self.online_overhead, self.online_calls),
self.online_overhead))
##################################################
# TODO: cannot easily do Bayesian hypothesis testing because might never receive ground truth when empty
# In some cases, the stream does finish though
# Estimate probability that will generate result
# Need to produce belief that has additional samples
# P(Success | Samples) = estimated parameter
# P(Success | ~Samples) = 0
# T(Samples | ~Samples) = 0
# T(~Samples | Samples) = 1-p
# TODO: estimate a parameter conditioned on successful streams?
# Need a transition fn as well because generating a sample might change state
# Problem with estimating prior. Don't always have data on failed streams
# Goal: estimate P(Success | History)
# P(Success | History) = P(Success | Samples) * P(Samples | History)
# Previously in Instance
# def get_belief(self):
# #return 1.
# #prior = self.external.prior
# prior = 1. - 1e-2
# n = self.num_calls
# p_obs_given_state = self.external.get_p_success()
# p_state = prior
# for i in range(n):
# p_nobs_and_state = (1-p_obs_given_state)*p_state
# p_nobs_and_nstate = (1-p_state)
# p_nobs = p_nobs_and_state + p_nobs_and_nstate
# p_state = p_nobs_and_state/p_nobs
# return p_state
# def update_belief(self, success):
# # Belief that remaining sequence is non-empty
# # Belief only degrades in this case
# nonempty = 0.9
# p_success_nonempty = 0.5
# if success:
# p_success = p_success_nonempty*nonempty
# else:
# p_success = (1-p_success_nonempty)*nonempty + (1-nonempty)
#def get_p_success(self):
#p_success_belief = self.external.get_p_success()
#belief = self.get_belief()
#return p_success_belief*belief
# TODO: use the external as a prior
# TODO: Bayesian estimation of likelihood that has result
# Model hidden state of whether has values or if will produce values?
# TODO: direct estimation of different buckets in which it will finish
# TODO: we have samples from the CDF or something
#def get_p_success(self):
# return self.external.get_p_success()
#
#def get_overhead(self):
# return self.external.get_overhead()
| 12,081 |
Python
| 37.113565 | 109 | 0.634716 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/attachments.py
|
import os
import sys
import copy
from hsr_tamp.pddlstream.algorithms.advanced import get_predicates
from hsr_tamp.pddlstream.algorithms.downward import get_literals, get_conjunctive_parts, fd_from_fact, EQ, make_object, \
pddl_from_instance, DEFAULT_MAX_TIME, get_cost_scale
from hsr_tamp.pddlstream.language.object import Object
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl, substitute_fact
from hsr_tamp.pddlstream.language.fluent import get_predicate_map, remap_certified
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import INF, invert_dict, get_mapping, safe_zip
# Intuition: static facts about whether this state satisfies a condition
# The state can be seen as a hidden parameter with a precondition that you are at it
# TODO: refactor to algorithms
PYPLANNERS_VAR = 'PYPLANNERS_PATH'
PLACEHOLDER_OBJ = Object.from_value('~')
DEFAULT_PYPLANNER = {
'search': 'eager',
'evaluator': 'greedy',
'heuristic': 'ff',
'successors': 'all',
}
def get_pyplanners_path():
return os.environ.get(PYPLANNERS_VAR, None)
def has_attachments(domain):
return any(getattr(action, 'attachments', {}) for action in domain.actions)
##################################################
def compile_fluents_as_attachments(domain, externals):
state_streams = set(filter(lambda e: isinstance(e, Stream) and e.is_fluent, externals)) # TODO: is_negated/is_special
if not state_streams:
return externals
predicate_map = get_predicate_map(state_streams)
if predicate_map and (get_pyplanners_path() is None):
# TODO: fluent streams with outputs
# Could convert the free parameter to a constant
raise NotImplementedError('Algorithm does not support fluent streams: {}'.format(
[stream.name for stream in state_streams]))
import pddl
domain.constants.append(make_object(PLACEHOLDER_OBJ.pddl))
for action in domain.actions:
for effect in action.effects:
# TODO: conditional effects
if any(literal.predicate in predicate_map for literal in get_literals(effect.condition)):
raise ValueError('Attachments cannot be in action effects: {}'.format(effect))
action.attachments = {}
preconditions = set()
for literal in get_conjunctive_parts(action.precondition):
#if not isinstance(literal, pddl.Literal):
# raise NotImplementedError('Only literals are supported: {}'.format(literal))
if not get_predicates(literal) & set(predicate_map):
preconditions.add(literal)
continue
if not isinstance(literal, pddl.Literal):
raise NotImplementedError(literal)
# Drops the original precondition
stream = predicate_map[literal.predicate]
mapping = remap_certified(literal, stream)
assert mapping is not None
action.attachments[literal] = stream
preconditions.update(pddl.Atom(EQ, (mapping[out], PLACEHOLDER_OBJ.pddl))
for out in stream.outputs)
preconditions.update(fd_from_fact(substitute_fact(fact, mapping))
for fact in stream.domain)
action.precondition = pddl.Conjunction(preconditions).simplified()
#fn = lambda l: pddl.Truth() if l.predicate in predicate_map else l
#action.precondition = replace_literals(fn, action.precondition).simplified()
#action.dump()
return [external for external in externals if external not in state_streams]
##################################################
def get_attachment_test(action_instance):
from hsr_tamp.pddlstream.algorithms.scheduling.apply_fluents import get_fluent_instance
from hsr_tamp.pddlstream.language.fluent import remap_certified
# TODO: support for focused (need to resolve after binding)
# TODO: ensure no OptimisticObjects
fd_action_from_state = {}
def test(state):
if state in fd_action_from_state:
return True
#new_instance = action_instance
new_instance = copy.deepcopy(action_instance)
if not hasattr(action_instance.action, 'attachments'):
fd_action_from_state[state] = new_instance
return True
for literal, stream in new_instance.action.attachments.items():
param_from_inp = remap_certified(literal, stream)
input_objects = tuple(obj_from_pddl(
new_instance.var_mapping[param_from_inp[inp]]) for inp in stream.inputs)
stream_instance = get_fluent_instance(stream, input_objects, state) # Output automatically cached
results = stream_instance.first_results(num=1)
#results = stream_instance.all_results()
failure = not results
if literal.negated != failure:
return False
#args = action_instance.name.strip('()').split(' ')
#idx_from_param = {p.name: i for i, p in enumerate(action_instance.action.parameters)}
param_from_out = remap_certified(literal, stream)
result = results[0] # Arbitrary
out_from_obj = invert_dict(result.mapping)
for obj in result.output_objects:
param = param_from_out[out_from_obj[obj]]
new_instance.var_mapping[param] = obj.pddl
# idx = idx_from_param[param]
# args[1+idx] = obj.pddl
#action_instance.name = '({})'.format(' '.join(args))
fd_action_from_state[state] = new_instance
return True
return test, fd_action_from_state
def solve_pyplanners(instantiated, planner=None, max_planner_time=DEFAULT_MAX_TIME, max_cost=INF):
if instantiated is None:
return None, INF
# https://github.mit.edu/caelan/stripstream/blob/c8c6cd1d6bd5e2e8e31cd5603e28a8e0d7bb2cdc/stripstream/algorithms/search/pyplanners.py
pyplanners_path = get_pyplanners_path()
if pyplanners_path is None:
raise RuntimeError('Must clone https://github.com/caelan/pyplanners '
'and set the environment variable {} to its path'.format(PYPLANNERS_VAR))
if pyplanners_path not in sys.path:
sys.path.append(pyplanners_path)
# TODO: could operate on translated SAS instead
from strips.states import State, PartialState
from strips.operators import Action, Axiom
from strips.utils import solve_strips, default_derived_plan
import pddl
# TODO: PLUSONE costs
pyplanner = dict(DEFAULT_PYPLANNER)
if isinstance(planner, dict):
pyplanner.update(planner)
fd_action_from_py_action = {}
py_actions = []
for action in instantiated.actions:
#action.dump()
py_action = Action({'fd_action': action})
py_action.conditions = set(action.precondition)
py_action.effects = set()
for condition, effect in action.del_effects:
assert not condition
py_action.effects.add(effect.negate())
for condition, effect in action.add_effects:
assert not condition
py_action.effects.add(effect)
py_action.cost = action.cost
py_action.test, fd_action_from_py_action[py_action] = get_attachment_test(action)
py_actions.append(py_action)
py_axioms = []
for axiom in instantiated.axioms:
#axiom.dump()
py_axiom = Axiom({'fd_axiom_id': id(axiom)}) # Not hashable for some reason
py_axiom.conditions = set(axiom.condition)
py_axiom.effects = {axiom.effect}
py_axioms.append(py_axiom)
goal = PartialState(instantiated.goal_list)
fluents = {f.positive() for f in goal.conditions}
for py_operator in py_actions + py_axioms:
fluents.update(f.positive() for f in py_operator.conditions)
initial = State(atom for atom in instantiated.task.init
if isinstance(atom, pddl.Atom) and (atom in fluents))
plan, state_space = solve_strips(initial, goal, py_actions, py_axioms,
max_time=max_planner_time, max_cost=max_cost, **pyplanner)
if plan is None:
return None, INF
#fd_plan = [action.fd_action for action in plan.operators]
states = plan.get_states() # get_states | get_derived_states
fd_plan = [fd_action_from_py_action[action][state] for state, action in safe_zip(states[:-1], plan.operators)]
actions = [pddl_from_instance(action) for action in fd_plan]
#print(actions)
cost = plan.cost / get_cost_scale()
return actions, cost
| 8,628 |
Python
| 43.479381 | 137 | 0.653106 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/external.py
|
from collections import Counter
from hsr_tamp.pddlstream.algorithms.common import compute_complexity
from hsr_tamp.pddlstream.language.constants import get_args, is_parameter, get_prefix, Fact
from hsr_tamp.pddlstream.language.conversion import values_from_objects, substitute_fact, obj_from_value_expression
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.statistics import Performance, PerformanceInfo, DEFAULT_SEARCH_OVERHEAD, Stats
from hsr_tamp.pddlstream.utils import elapsed_time, get_mapping, flatten, INF, safe_apply_mapping, Score, INF
DEBUG = 'debug'
SHARED_DEBUG = 'shared_debug'
DEBUG_MODES = [DEBUG, SHARED_DEBUG]
never_defer = lambda *args, **kwargs: False
defer_unique = lambda result, *args, **kwargs: result.is_refined()
defer_shared = lambda *args, **kwargs: True
def select_inputs(instance, inputs):
external = instance.external
assert set(inputs) <= set(external.inputs)
mapping = get_mapping(external.inputs, instance.input_objects)
return safe_apply_mapping(inputs, mapping)
def get_defer_any_unbound(unique=False):
def defer_any_unbound(result, bound_objects=set(), *args, **kwargs):
# The set bound_objects may contain shared objects in which case replanning is required
if unique and not defer_unique(result):
return False
return not all(isinstance(obj, Object) or (obj in bound_objects) for obj in result.input_objects)
return defer_any_unbound
def get_defer_all_unbound(inputs='', unique=False): # TODO: shortcut for all inputs
inputs = tuple(inputs.split())
# Empty implies defer_shared
def defer_all_unbound(result, bound_objects=set(), *args, **kwargs):
if unique and not defer_unique(result):
return False
return not any(isinstance(obj, Object) or (obj in bound_objects)
for obj in select_inputs(result.instance, inputs))
return defer_all_unbound
def get_domain_predicates(streams):
return {get_prefix(a) for s in streams for a in s.domain}
def convert_constants(fact):
# TODO: take the constant map as an input
# TODO: throw an error if undefined
return Fact(get_prefix(fact), [p if is_parameter(p) else Object.from_name(p) for p in get_args(fact)])
##################################################
class ExternalInfo(PerformanceInfo):
def __init__(self, eager=False, eager_skeleton=False, defer_fn=never_defer, **kwargs):
super(ExternalInfo, self).__init__(**kwargs)
# TODO: enable eager=True for inexpensive test streams by default
# TODO: change p_success and overhead if it's a function or test stream
self.eager = eager
self.eager_skeleton = eager_skeleton # TODO: apply in binding and adaptive
# TODO: automatically set tests and costs to be eager
self.defer_fn = defer_fn # Old syntax was defer=True
#self.complexity_fn = complexity_fn
##################################################
class Result(object):
def __init__(self, instance, opt_index, call_index, optimistic):
self.instance = instance
self.opt_index = opt_index
self.call_index = call_index
self.optimistic = optimistic
@property
def external(self):
return self.instance.external
@property
def info(self):
return self.external.info
@property
def name(self):
return self.external.name
@property
def input_objects(self):
return self.instance.input_objects
@property
def domain(self):
return self.instance.domain
def is_refined(self):
# TODO: result.opt_index is None
return self.opt_index == 0 # TODO: base on output objects instead
def is_deferrable(self, *args, **kwargs):
return self.info.defer_fn(self, *args, **kwargs)
def get_domain(self):
return self.instance.get_domain()
def get_certified(self):
raise NotImplementedError()
def get_components(self):
return [self]
def get_unsatisfiable(self):
return [self.get_components()]
def get_action(self):
raise NotImplementedError()
def remap_inputs(self, bindings):
raise NotImplementedError()
def is_successful(self):
raise NotImplementedError()
def compute_complexity(self, evaluations, **kwargs):
# Should be constant
return compute_complexity(evaluations, self.get_domain(), **kwargs) + \
self.external.get_complexity(self.call_index)
def get_effort(self, **kwargs):
if not self.optimistic:
return 0 # Unit efforts?
if self.external.is_negated:
return 0
# TODO: this should be the min of all instances
return self.instance.get_effort(**kwargs)
def success_heuristic(self): # High is likely to succeed
# self.external.is_function
num_free = sum(isinstance(obj, OptimisticObject) for obj in self.input_objects)
return Score(num_free, -len(self.external.inputs)) # TODO: treat objects in the same domain as a unit
def overhead_heuristic(self): # Low is cheap
return self.external.overhead_heuristic()
def stats_heuristic(self): # Low is cheap and unlikely to succeed
#return self.overhead_heuristic() + self.success_heuristic()
return Score(self.overhead_heuristic(), self.success_heuristic())
#return Stats(self.overhead_heuristic(), self.success_heuristic())
def effort_heuristic(self): # Low is cheap and likely to succeed
return Score(self.overhead_heuristic(), -self.success_heuristic())
##################################################
class Instance(object):
_Result = None
def __init__(self, external, input_objects):
self.external = external
self.input_objects = tuple(input_objects)
self.disabled = False # TODO: perform disabled using complexity
self.history = [] # TODO: facts history
self.results_history = []
self._mapping = None
self._domain = None
self.reset()
@property
def info(self):
return self.external.info
@property
def mapping(self):
if self._mapping is None:
self._mapping = get_mapping(self.external.inputs, self.input_objects)
#for constant in self.external.constants: # TODO: no longer needed
# self._mapping[constant] = Object.from_name(constant)
return self._mapping
@property
def domain(self):
if self._domain is None:
#self._domain = substitute_expression(self.external.domain, self.mapping)
self._domain = tuple(substitute_fact(atom, self.mapping)
for atom in self.external.domain)
return self._domain
def get_iteration(self):
return INF if self.enumerated else self.num_calls
def get_domain(self):
return self.domain
def get_all_input_objects(self):
return set(self.input_objects)
def get_input_values(self):
return values_from_objects(self.input_objects)
#def is_first_call(self): # TODO: use in streams
# return self.online_calls == 0
#def has_previous_success(self):
# return self.online_success != 0
def reset(self):
#self.enable(evaluations={}, domain=None)
self.disabled = False
self.opt_index = self.external.num_opt_fns
self.num_calls = 0
self.enumerated = False
self.successful = False
def is_refined(self):
return self.opt_index == 0
def refine(self):
# TODO: could instead create a new Instance per opt_index
if not self.is_refined():
self.opt_index -= 1
return self.opt_index
def next_results(self, verbose=False):
raise NotImplementedError()
def first_results(self, num=1, **kwargs):
results = []
index = 0
while len(results) < num:
while index >= len(self.results_history):
if self.enumerated:
return results
self.next_results(**kwargs)
results.extend(self.results_history[index])
index += 1
return results
def all_results(self, **kwargs):
return self.first_results(num=INF, **kwargs)
def get_results(self, start=0):
results = []
for index in range(start, self.num_calls):
results.extend(self.results_history[index])
return results
def compute_complexity(self, evaluations, **kwargs):
# Will change as self.num_calls increases
#num_calls = INF if self.enumerated else self.num_calls
return compute_complexity(evaluations, self.get_domain(), **kwargs) + \
self.external.get_complexity(self.num_calls)
def get_effort(self, search_overhead=DEFAULT_SEARCH_OVERHEAD):
# TODO: handle case where resampled several times before the next search (search every ith time)
replan_effort = self.opt_index * search_overhead # By linearity of expectation
effort_fn = self.external.info.effort
if callable(effort_fn):
return replan_effort + effort_fn(*self.get_input_values())
return replan_effort + self.external.get_effort(search_overhead=search_overhead)
def update_statistics(self, start_time, results):
overhead = elapsed_time(start_time)
successes = sum(r.is_successful() for r in results)
self.external.update_statistics(overhead, bool(successes))
self.results_history.append(results)
#self.successes += successes
def disable(self, evaluations, domain):
self.disabled = True
def enable(self, evaluations, domain):
self.disabled = False
##################################################
class External(Performance):
_Instance = None
def __init__(self, name, info, inputs, domain):
super(External, self).__init__(name, info)
self.inputs = tuple(inputs)
self.domain = tuple(map(convert_constants, domain))
for p, c in Counter(self.inputs).items():
if not is_parameter(p):
# AssertionError: Expected item to be a variable: q2 in (?q1 q2)
raise ValueError('Input [{}] for stream [{}] is not a parameter'.format(p, name))
if c != 1:
raise ValueError('Input [{}] for stream [{}] is not unique'.format(p, name))
parameters = {a for i in self.domain for a in get_args(i) if is_parameter(a)}
for p in (parameters - set(self.inputs)):
raise ValueError('Parameter [{}] for stream [{}] is not included within inputs'.format(p, name))
for p in (set(self.inputs) - parameters):
print('Warning! Input [{}] for stream [{}] is not covered by a domain condition'.format(p, name))
self.constants = {a for i in self.domain for a in get_args(i) if not is_parameter(a)}
self.instances = {}
def reset(self, *args, **kwargs):
for instance in self.instances.values():
instance.reset(*args, **kwargs)
# TODO: naming convention for statics and fluents
@property
def has_outputs(self):
raise NotImplementedError()
@property
def is_fluent(self):
raise NotImplementedError()
@property
def is_negated(self):
raise NotImplementedError()
@property
def is_special(self):
return self.is_fluent or self.is_negated
@property
def is_function(self):
raise NotImplementedError()
@property
def is_cost(self):
return False
@property
def zero_complexity(self):
return self.is_special or not self.has_outputs
def get_complexity(self, num_calls=0):
if self.zero_complexity:
return 0
return num_calls + 1
def get_instance(self, input_objects):
input_objects = tuple(input_objects)
assert len(input_objects) == len(self.inputs)
if input_objects not in self.instances:
self.instances[input_objects] = self._Instance(self, input_objects)
return self.instances[input_objects]
def overhead_heuristic(self): # Low is little overhead
# TODO: infer other properties from use in the context of a stream plan
# TODO: use num_certified (only those that are in another stream) instead of num_outputs?
#num_inputs = len(self.inputs)
#num_domain = len(self.domain)
return Score(self.is_fluent, not self.is_function, self.has_outputs, len(self.inputs)) # structural/relational overhead
#overhead = 1e0*num_inputs + 1e1*num_outputs + 1e2*bool(num_fluents)
#return overhead
##################################################
def get_procedure_fn(stream_map, name):
if not isinstance(stream_map, dict): # DEBUG_MODES
return stream_map
if name not in stream_map:
raise ValueError('Undefined external procedure: {}'.format(name))
return stream_map[name]
def is_attribute(attribute):
return isinstance(attribute, str) and attribute.startswith(':')
def parse_lisp_list(lisp_list):
attributes = [lisp_list[i] for i in range(0, len(lisp_list), 2)]
for attribute in attributes:
if not is_attribute(attribute):
raise ValueError('Expected an attribute but got: {}'.format(attribute))
values = [lisp_list[i] for i in range(1, len(lisp_list), 2)]
if len(lisp_list) % 2 != 0:
raise ValueError('No value specified for attribute [{}]'.format(lisp_list[-1]))
return get_mapping(attributes, values)
| 13,621 |
Python
| 41.04321 | 127 | 0.639601 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/write_pddl.py
|
import re
import math
from hsr_tamp.pddlstream.language.constants import AND, OR, OBJECT, TOTAL_COST, TOTAL_TIME, is_cost, get_prefix, \
CONNECTIVES, QUANTIFIERS
from hsr_tamp.pddlstream.language.conversion import pddl_from_object, is_atom, is_negated_atom, objects_from_evaluations
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
DEFAULT_TYPE = OBJECT # number
def pddl_parameter(param):
return '{} - {}'.format(param, DEFAULT_TYPE)
#return param
def pddl_parameters(parameters):
return ' '.join(map(pddl_parameter, parameters))
def pddl_head(name, args):
return '({})'.format(' '.join([name] + list(map(pddl_from_object, args))))
def pddl_from_evaluation(evaluation):
#if evaluation.head.function == TOTAL_COST:
# return None
head = pddl_head(evaluation.head.function, evaluation.head.args)
if is_atom(evaluation):
return head
elif is_negated_atom(evaluation):
return '(not {})'.format(head)
#value = int(evaluation.value)
value = evaluation.value # floats are fine for temporal planners
#value = int(math.ceil(evaluation.value))
return '(= {} {})'.format(head, value)
def pddl_functions(predicates):
return '\n\t\t'.join(sorted(p.pddl() for p in predicates))
def pddl_connective(literals, connective):
if not literals:
return '()'
if len(literals) == 1:
return literals[0].pddl()
return '({} {})'.format(connective, ' '.join(l.pddl() for l in literals))
def pddl_conjunction(literals):
return pddl_connective(literals, AND)
def pddl_disjunction(literals):
return pddl_connective(literals, OR)
def pddl_from_expression(expression):
if isinstance(expression, Object) or isinstance(expression, OptimisticObject):
return pddl_from_object(expression)
if isinstance(expression, str):
return expression
return '({})'.format(' '.join(map(pddl_from_expression, expression)))
##################################################
def pddl_problem(problem, domain, evaluations, goal_expression, objective=None):
objects = objects_from_evaluations(evaluations)
s = '(define (problem {})\n' \
'\t(:domain {})\n' \
'\t(:objects {})\n' \
'\t(:init \n\t\t{})\n' \
'\t(:goal {})'.format(
problem, domain,
' '.join(sorted(map(pddl_from_object, objects))), # map(pddl_parameter,
'\n\t\t'.join(sorted(filter(lambda p: p is not None,
map(pddl_from_evaluation, evaluations)))),
pddl_from_expression(goal_expression))
if objective is not None:
s += '\n\t(:metric minimize ({}))'.format(objective)
return s + ')\n'
def get_problem_pddl(evaluations, goal_exp, domain_pddl, temporal=True):
[domain_name] = re.findall(r'\(domain ([^ ]+)\)', domain_pddl)
problem_name = domain_name
objective = TOTAL_TIME if temporal else TOTAL_COST
problem_pddl = pddl_problem(domain_name, problem_name, evaluations, goal_exp, objective=objective)
#write_pddl(domain_pddl, problem_pddl, TEMP_DIR)
return problem_pddl
| 3,115 |
Python
| 37 | 120 | 0.649117 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/generator.py
|
import time
from collections import Iterator, namedtuple, deque
from itertools import count
from hsr_tamp.pddlstream.utils import INF, elapsed_time
# TODO: indicate wild stream output just from the output form
# TODO: depth limited and cycle-free optimistic objects
class BoundedGenerator(Iterator):
"""
A generator with a fixed length.
The generator tracks its number of calls, allowing it to terminate with one fewer call
"""
def __init__(self, generator, max_calls=INF):
self.generator = generator
self.max_calls = max_calls
self.stopped = False
self.history = []
@property
def calls(self):
return len(self.history)
@property
def enumerated(self):
return self.stopped or (self.max_calls <= self.calls)
def next(self):
if self.enumerated:
raise StopIteration()
try:
self.history.append(next(self.generator))
except StopIteration:
self.stopped = True
raise StopIteration()
return self.history[-1]
__next__ = next
def get_next(generator, default=[]):
new_values = default
enumerated = False
try:
new_values = next(generator)
except StopIteration:
enumerated = True
if isinstance(generator, BoundedGenerator):
enumerated |= generator.enumerated
return new_values, enumerated
##################################################
# Methods that convert some procedure -> function to a generator of lists
def from_list_gen_fn(list_gen_fn):
# Purposefully redundant for now
return list_gen_fn
def from_gen_fn(gen_fn):
return from_list_gen_fn(lambda *args, **kwargs: ([] if ov is None else [ov]
for ov in gen_fn(*args, **kwargs)))
def from_sampler(sampler, max_attempts=INF):
def gen_fn(*input_values):
attempts = count()
while next(attempts) < max_attempts:
yield sampler(*input_values)
return from_gen_fn(gen_fn)
##################################################
# Methods that convert some procedure -> function to a BoundedGenerator
def from_list_fn(list_fn):
#return lambda *args, **kwargs: iter([list_fn(*args, **kwargs)])
return lambda *args, **kwargs: BoundedGenerator(iter([list_fn(*args, **kwargs)]), max_calls=1)
def from_fn(fn):
def list_fn(*args, **kwargs):
outputs = fn(*args, **kwargs)
return [] if outputs is None else [outputs]
return from_list_fn(list_fn)
def outputs_from_boolean(boolean):
return tuple() if boolean else None
def from_test(test):
return from_fn(lambda *args, **kwargs: outputs_from_boolean(test(*args, **kwargs)))
def from_constant(constant):
return from_fn(fn_from_constant(constant))
def negate_test(test):
return lambda *args, **kwargs: not test(*args, **kwargs)
def from_gen(gen):
return from_gen_fn(lambda *args, **kwargs: iter(gen))
def empty_gen():
return from_gen([])
##################################################
# Methods that convert some procedure -> function
def fn_from_constant(constant):
return lambda *args, **kwargs: constant
universe_test = fn_from_constant(True)
empty_test = fn_from_constant(False)
##################################################
def accelerate_list_gen_fn(list_gen_fn, num_elements=1, max_attempts=1, max_time=INF):
"""
Accelerates a list_gen_fn by eagerly generating num_elements at a time if possible
"""
def new_list_gen_fn(*inputs):
generator = list_gen_fn(*inputs)
terminated = False
while not terminated:
start_time = time.time()
elements = []
for i in range(max_attempts):
if terminated or (num_elements <= len(elements)) or (max_time <= elapsed_time(start_time)):
break
new_elements, terminated = get_next(generator)
elements.extend(new_elements)
yield elements
return new_list_gen_fn
##################################################
Composed = namedtuple('Composed', ['outputs', 'step', 'generator'])
def compose_gen_fns(*gen_fns):
assert gen_fns
# Assumes consistent ordering of inputs/outputs
# Samplers are a special case where only the first needs to be a generator
# TODO: specify info about what to compose
# TODO: alternatively, make a new stream that composes several
def gen_fn(*inputs):
queue = deque([Composed([], 0, gen_fns[0](*inputs))])
while queue:
composed = queue.popleft()
new_outputs_list, terminated = get_next(composed.generator)
for new_outputs in new_outputs_list:
outputs = composed.outputs + new_outputs
if composed.step == (len(gen_fns) - 1):
yield outputs
else:
next_step = composed.step + 1
generator = gen_fns[next_step](*(inputs + composed.output_values))
queue.append(Composed(outputs, next_step, generator))
if not new_outputs_list:
yield None
if not terminated:
queue.append(composed)
return gen_fn
def wild_gen_fn_from_gen_fn(gen_fn):
def wild_gen_fn(*args, **kwargs):
for output_list in gen_fn(*args, **kwargs):
fact_list = []
yield output_list, fact_list
return wild_gen_fn
def gen_fn_from_wild_gen_fn(wild_gen_fn):
def gen_fn(*args, **kwargs):
for output_list, _ in wild_gen_fn(*args, **kwargs):
yield output_list
return wild_gen_fn
| 5,672 |
Python
| 30.17033 | 107 | 0.592031 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/conversion.py
|
from __future__ import print_function
import collections
from itertools import product
from hsr_tamp.pddlstream.language.constants import EQ, AND, OR, NOT, CONNECTIVES, QUANTIFIERS, OPERATORS, OBJECTIVES, \
Head, Evaluation, get_prefix, get_args, is_parameter, is_plan, Fact, Not, Equal, Action, StreamAction, \
FunctionAction, DurativeAction, Solution, Assignment, OptPlan, Certificate
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import str_from_object, apply_mapping
def replace_expression(parent, fn):
prefix = get_prefix(parent)
if prefix == EQ:
assert(len(parent) == 3)
value = parent[2]
if isinstance(parent[2], collections.Sequence):
value = replace_expression(value, fn)
return prefix, replace_expression(parent[1], fn), value
elif prefix in (CONNECTIVES + OBJECTIVES):
children = parent[1:]
return (prefix,) + tuple(replace_expression(child, fn) for child in children)
elif prefix in QUANTIFIERS:
assert(len(parent) == 3)
parameters = parent[1]
child = parent[2]
return prefix, parameters, replace_expression(child, fn)
name = get_prefix(parent).lower()
args = get_args(parent)
return Fact(name, map(fn, args))
def obj_from_value_expression(parent):
return replace_expression(parent, lambda o: o if is_parameter(o) else Object.from_value(o))
def value_from_obj_expression(parent):
return replace_expression(parent, lambda o: o.value)
def value_from_evaluation(evaluation):
return value_from_obj_expression(fact_from_evaluation(evaluation))
##################################################
def get_formula_operators(formula):
if formula is None:
return set()
prefix = get_prefix(formula)
if prefix not in OPERATORS:
return set()
operators = {prefix}
for subformula in formula[1:]:
operators.update(get_formula_operators(subformula))
return operators
def dnf_from_positive_formula(parent):
if parent is None:
return []
prefix = get_prefix(parent)
assert(prefix not in (QUANTIFIERS + (NOT, EQ))) # also check if atom?
children = []
if prefix == AND:
for combo in product(*(dnf_from_positive_formula(child) for child in parent[1:])):
children.append([fact for clause in combo for fact in clause])
elif prefix == OR:
for child in parent[1:]:
children.extend(dnf_from_positive_formula(child))
else:
# TODO: IMPLY
children.append([tuple(parent)])
return children
def list_from_conjunction(parent):
if parent is None:
return []
clauses = dnf_from_positive_formula(parent)
if not clauses:
return clauses
if len(clauses) >= 2:
raise ValueError('Formula {} has more than one conjunctive clauses'.format(parent))
return clauses[0]
def substitute_expression(parent, mapping):
if any(isinstance(parent, Class) for Class in [str, Object, OptimisticObject]):
return mapping.get(parent, parent)
return tuple(substitute_expression(child, mapping) for child in parent)
def substitute_fact(fact, mapping):
return Fact(get_prefix(fact), apply_mapping(get_args(fact), mapping))
##################################################
def pddl_from_object(obj):
if isinstance(obj, str):
return obj
return obj.pddl
def pddl_list_from_expression(tree):
if isinstance(tree, Object) or isinstance(tree, OptimisticObject):
return pddl_from_object(tree)
if isinstance(tree, str):
return tree
return tuple(map(pddl_list_from_expression, tree))
##################################################
def is_atom(evaluation):
return evaluation.value is True
def is_negated_atom(evaluation):
return evaluation.value is False
def objects_from_evaluations(evaluations):
# TODO: assumes object predicates
objects = set()
for evaluation in evaluations:
objects.update(evaluation.head.args)
return objects
##################################################
def head_from_fact(fact):
return Head(get_prefix(fact), get_args(fact))
def evaluation_from_fact(fact):
prefix = get_prefix(fact)
if prefix == EQ:
head, value = fact[1:]
elif prefix == NOT:
head = fact[1]
value = False
else:
head = fact
value = True
return Evaluation(head_from_fact(head), value)
def fact_from_evaluation(evaluation):
fact = Fact(evaluation.head.function, evaluation.head.args)
if is_atom(evaluation):
return fact
elif is_negated_atom(evaluation):
return Not(fact)
return Equal(fact, evaluation.value)
# def state_from_evaluations(evaluations):
# # TODO: default value?
# # TODO: could also implement within predicates
# state = {}
# for evaluation in evaluations:
# if evaluation.head in state:
# assert(evaluation.value == state[evaluation.head])
# state[evaluation.head] = evaluation.value
# return state
##################################################
def obj_from_pddl(pddl):
if pddl in Object._obj_from_name:
return Object.from_name(pddl)
elif pddl in OptimisticObject._obj_from_name:
return OptimisticObject.from_name(pddl)
raise ValueError(pddl)
def values_from_objects(objects):
return tuple(obj.value for obj in objects)
#return tuple(map(value_from_object, objects))
def temporal_from_sequential(action):
# TODO: clean this up
assert isinstance(action, DurativeAction)
name, args, start, duration = action
if name[-2] != '-':
return action
new_name, index = name[:-2], int(name[-1])
if index != 0: # Only keeps the start action
return None
return DurativeAction(new_name, args, start, duration)
def transform_action_args(action, fn):
if isinstance(action, Action):
name, args = action
return Action(name, tuple(map(fn, args)))
elif isinstance(action, DurativeAction):
action = temporal_from_sequential(action)
if action is None:
return None
name, args, start, duration = action
return DurativeAction(name, tuple(map(fn, args)), start, duration)
elif isinstance(action, StreamAction):
name, inputs, outputs = action
return StreamAction(name, tuple(map(fn, inputs)), tuple(map(fn, outputs)))
elif isinstance(action, FunctionAction):
name, inputs = action
return FunctionAction(name, tuple(map(fn, inputs)))
elif isinstance(action, Assignment):
args, = action
return Assignment(tuple(map(fn, args)))
raise NotImplementedError(action)
def transform_plan_args(plan, fn):
if not is_plan(plan):
return plan
return list(filter(lambda a: a is not None, [transform_action_args(action, fn) for action in plan]))
# TODO: would be better just to rename everything at the start. Still need to handle constants
def obj_from_pddl_plan(pddl_plan):
return transform_plan_args(pddl_plan, obj_from_pddl)
def param_from_object(obj):
if isinstance(obj, OptimisticObject):
return repr(obj)
#return obj.pddl
if isinstance(obj, Object):
return obj.value
raise ValueError(obj)
def params_from_objects(objects):
return tuple(map(param_from_object, objects))
def objects_from_values(values):
return tuple(map(Object.from_value, values))
##################################################
#def expression_holds(expression, evaluations):
# pass
def revert_solution(plan, cost, evaluations):
all_facts = list(map(value_from_evaluation, evaluations))
if isinstance(plan, OptPlan):
action_plan = transform_plan_args(plan.action_plan, param_from_object)
preimage_facts = list(map(value_from_obj_expression, plan.preimage_facts))
else:
action_plan = transform_plan_args(plan, param_from_object)
preimage_facts = None
certificate = Certificate(all_facts, preimage_facts)
return Solution(action_plan, cost, certificate)
#def opt_obj_from_value(value):
# if Object.has_value(value):
# return Object.from_value(value)
# return OptimisticObject.from_opt(value)
# # TODO: better way of doing this?
# #return OptimisticObject._obj_from_inputs.get(value, Object.from_value(value))
def str_from_head(head):
return '{}{}'.format(get_prefix(head), str_from_object(get_args(head)))
def str_from_fact(fact):
prefix = get_prefix(fact)
if prefix == NOT:
return '~{}'.format(str_from_fact(fact[1]))
if prefix == EQ: # TODO: predicate = vs function =
_, head, value = fact
return '{}={}'.format(str_from_fact(head), value)
return str_from_head(fact)
| 8,812 |
Python
| 33.560784 | 119 | 0.650817 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/language/object.py
|
from collections import namedtuple, defaultdict
from itertools import count
from hsr_tamp.pddlstream.language.constants import get_parameter_name
#from hsr_tamp.pddlstream.language.conversion import values_from_objects
from hsr_tamp.pddlstream.utils import str_from_object, is_hashable
USE_HASH = True
USE_OBJ_STR = True
USE_OPT_STR = True
OPT_PREFIX = '#'
PREFIX_LEN = 1
class Object(object):
_prefix = 'v'
_obj_from_id = {}
_obj_from_value = {}
_obj_from_name = {}
def __init__(self, value, stream_instance=None, name=None):
self.value = value
self.index = len(Object._obj_from_name)
if name is None:
#name = str(value) # TODO: use str for the name when possible
name = '{}{}'.format(self._prefix, self.index)
self.pddl = name
self.stream_instance = stream_instance # TODO: store first created stream instance
Object._obj_from_id[id(self.value)] = self
Object._obj_from_name[self.pddl] = self
if is_hashable(value):
Object._obj_from_value[self.value] = self
def is_unique(self):
return True
def is_shared(self):
return False
@staticmethod
def from_id(value):
if id(value) not in Object._obj_from_id:
return Object(value)
return Object._obj_from_id[id(value)]
@staticmethod
def has_value(value):
if USE_HASH and not is_hashable(value):
return id(value) in Object._obj_from_id
return value in Object._obj_from_value
@staticmethod
def from_value(value):
if USE_HASH and not is_hashable(value):
return Object.from_id(value)
if value not in Object._obj_from_value:
return Object(value)
return Object._obj_from_value[value]
@staticmethod
def from_name(name):
return Object._obj_from_name[name]
@staticmethod
def reset():
Object._obj_from_id.clear()
Object._obj_from_value.clear()
Object._obj_from_name.clear()
def __lt__(self, other): # For heapq on python3
return self.index < other.index
def __repr__(self):
if USE_OBJ_STR:
return str_from_object(self.value) # str
return self.pddl
##################################################
class UniqueOptValue(namedtuple('UniqueOptTuple', ['instance', 'sequence_index', 'output'])):
@property
def parameter(self):
# return self.instance.external.outputs[self.output_index]
return self.output
class SharedOptValue(namedtuple('SharedOptTuple', ['stream', 'inputs', 'input_objects', 'output'])):
@property
def values(self):
return tuple(obj.value for obj in self.input_objects)
#return values_from_objects(self.input_objects)
##################################################
class DebugValue(object): # TODO: could just do an object
_output_counts = defaultdict(count)
_prefix = '@' # $ | @
def __init__(self, stream, input_values, output_parameter):
self.stream = stream
self.input_values = input_values
self.output_parameter = output_parameter
self.index = next(self._output_counts[output_parameter])
# def __iter__(self):
# return self.stream, self.input_values, self.output_parameter
# def __hash__(self):
# return hash(tuple(self)) # self.__class__
# def __eq__(self, other):
# return (self.__class__ == other.__class__) and (tuple(self) == tuple(other))
def __repr__(self):
# Can also just return first letter of the prefix
return '{}{}{}'.format(self._prefix, get_parameter_name(self.output_parameter), self.index)
class SharedDebugValue(namedtuple('SharedDebugValue', ['stream', 'output_parameter'])):
# TODO: this alone doesn't refining at the shared object level
_prefix = '&' # $ | @ | &
def __repr__(self):
#index = hash(self.stream) % 1000
#index = self.stream.outputs.index(self.output_parameter) # TODO: self.stream is a str
#return '{}{}{}'.format(self._prefix, get_parameter_name(self.output_parameter), index)
#return '{}{}'.format(self._prefix, self.stream)
return '{}{}'.format(self._prefix, get_parameter_name(self.output_parameter))
##################################################
# TODO: just one object class or have Optimistic extend Object
# TODO: make a parameter class that has access to some underlying value
class OptimisticObject(object):
_prefix = '{}o'.format(OPT_PREFIX) # $ % #
_obj_from_inputs = {}
_obj_from_name = {}
_count_from_prefix = {}
def __init__(self, value, param):
# TODO: store first created instance
self.value = value
self.param = param
self.index = len(OptimisticObject._obj_from_inputs)
if USE_OPT_STR and isinstance(self.param, UniqueOptValue):
# TODO: instead just endow UniqueOptValue with a string function
#parameter = self.param.instance.external.outputs[self.param.output_index]
parameter = self.param.output
prefix = get_parameter_name(parameter)[:PREFIX_LEN]
var_index = next(self._count_from_prefix.setdefault(prefix, count()))
self.repr_name = '{}{}{}'.format(OPT_PREFIX, prefix, var_index) #self.index)
self.pddl = self.repr_name
else:
self.pddl = '{}{}'.format(self._prefix, self.index)
self.repr_name = self.pddl
OptimisticObject._obj_from_inputs[(value, param)] = self
OptimisticObject._obj_from_name[self.pddl] = self
def is_unique(self):
return isinstance(self.param, UniqueOptValue)
def is_shared(self):
#return isinstance(self.param, SharedOptValue)
return not isinstance(self.param, UniqueOptValue) # OptValue
@staticmethod
def from_opt(value, param):
# TODO: make param have a default value?
key = (value, param)
if key not in OptimisticObject._obj_from_inputs:
return OptimisticObject(value, param)
return OptimisticObject._obj_from_inputs[key]
@staticmethod
def from_name(name):
return OptimisticObject._obj_from_name[name]
@staticmethod
def reset():
OptimisticObject._obj_from_inputs.clear()
OptimisticObject._obj_from_name.clear()
OptimisticObject._count_from_prefix.clear()
def __lt__(self, other): # For heapq on python3
return self.index < other.index
def __repr__(self):
return self.repr_name
#return repr(self.repr_name) # Prints in quotations
| 6,602 |
Python
| 39.509202 | 100 | 0.617995 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/serialized.py
|
from __future__ import print_function
from hsr_tamp.pddlstream.algorithms.meta import solve_restart, solve
from hsr_tamp.pddlstream.language.temporal import parse_domain
from hsr_tamp.pddlstream.utils import INF, Verbose, str_from_object, SEPARATOR
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.focused import solve_focused
from hsr_tamp.pddlstream.language.conversion import Certificate, Object, \
transform_plan_args, value_from_evaluation
from hsr_tamp.pddlstream.language.constants import PDDLProblem, get_function, get_prefix, print_solution, AND, get_args, And, \
Solution, Or, is_plan
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, \
get_action_instances, apply_action, evaluation_from_fd, get_fluents
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init
def serialize_goal(goal):
if get_prefix(goal) == AND:
return get_args(goal)
return [goal]
def partition_facts(domain, facts):
fluents = get_fluents(domain)
static_facts = []
fluent_facts = []
for fact in facts:
if get_prefix(get_function(fact)).lower() in fluents:
fluent_facts.append(fact)
else:
static_facts.append(fact)
return static_facts, fluent_facts
def apply_actions(domain, state, plan, unit_costs=False):
import pddl
# Goal serialization just assumes the tail of the plan includes an abstract action to achieve each condition
static_state, _ = partition_facts(domain, state)
print('Static:', static_state)
# TODO: might need properties that involve an object that aren't useful yet
evaluations = evaluations_from_init(state)
#goal_exp = obj_from_value_expression(goal)
goal_exp = None
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
task.init = set(task.init)
for instance in get_action_instances(task, transform_plan_args(plan, Object.from_value)):
apply_action(task.init, instance)
fluents = get_fluents(domain)
fluent_state = [value_from_evaluation(evaluation_from_fd(atom))
for atom in task.init if isinstance(atom, pddl.Atom) and (atom.predicate in fluents)]
print('Fluent:', fluent_state)
state = static_state + fluent_state
return state
##################################################
def solve_serialized(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True,
retain_facts=True, **kwargs):
# TODO: be careful of CanMove deadends
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = initial_problem
_, _, domain, streams = parse_problem(
initial_problem, stream_info, constraints=None, unit_costs=unit_costs, unit_efforts=unit_efforts)
static_init, _ = partition_facts(domain, init) # might not be able to reprove static_int
#global_all, global_preimage = [], []
global_plan = []
global_cost = 0
state = list(init)
goals = serialize_goal(goal)
# TODO: instead just track how the true init updates
for i in range(len(goals)):
# TODO: option in algorithms to pass in existing facts
for stream in streams:
stream.reset()
goal = And(*goals[:i+1])
print('Goal:', str_from_object(goal))
# No strict need to reuse streams because generator functions
#local_problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, state, goal)
local_problem = PDDLProblem(domain_pddl, constant_map, streams, None, state, goal)
with Verbose(verbose):
solution = solve_focused(local_problem, stream_info=stream_info, unit_costs=unit_costs,
unit_efforts=unit_efforts, verbose=True, **kwargs)
print_solution(solution)
local_plan, local_cost, local_certificate = solution
if local_plan is None:
# TODO: replan upon failure
global_certificate = Certificate(all_facts={}, preimage_facts=None)
return Solution(None, INF, global_certificate)
if retain_facts:
state = local_certificate.all_facts
else:
_, fluent_facts = partition_facts(domain, state)
state = static_init + fluent_facts + local_certificate.preimage_facts # TODO: include functions
#print('State:', state)
# TODO: indicate when each fact is used
# TODO: record failed facts
global_plan.extend(local_plan) # TODO: compute preimage of the executed plan
global_cost += local_cost
static_state, _ = partition_facts(domain, state)
#global_all.extend(partition_facts(domain, local_certificate.all_facts)[0])
#global_preimage.extend(static_state)
print('Static:', static_state)
state = apply_actions(domain, state, local_plan, unit_costs=unit_costs)
print(SEPARATOR)
#user_input('Continue?')
# TODO: could also just test the goal here
# TODO: constrain future plan skeletons
global_certificate = Certificate(all_facts={}, preimage_facts=None)
return global_plan, global_cost, global_certificate
##################################################
def solve_deferred(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True,
retain_facts=True, **kwargs):
# TODO: serialize solving deferred problems
# TODO: can impose plan skeleton constraints as well
# TODO: investigate case where the first plan skeleton isn't feasible (e.g. due to blockage)
raise NotImplementedError()
#######################################################
def create_simplified_problem(problem, use_actions=False, use_streams=False, new_goal=None):
# TODO: check whether goal is a conjunction
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = problem
if not use_streams:
stream_pddl = None
if new_goal is None:
new_goal = goal_parts
domain = parse_domain(domain_pddl) # TODO: Constant map value @base not mentioned in domain :constants
if not use_actions:
domain.actions[:] = [] # No actions
return PDDLProblem(domain, constant_map, stream_pddl, stream_map, init, new_goal)
def test_init_goal(problem, **kwargs):
problem = create_simplified_problem(problem, use_actions=False, use_streams=False, new_goal=None)
plan, cost, certificate = solve(problem, **kwargs)
assert not plan
is_goal = is_plan(plan)
return is_goal, certificate
#######################################################
def solve_all_goals(initial_problem, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
# TODO(caelan): cleaner specification of goal ordering
goal_formula = And(*goal_parts)
print(solve_all_goals.__name__, goal_formula)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
return solve_restart(problem, **kwargs)
def solve_first_goal(initial_problem, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
achieved_parts = []
unachieved_parts = []
for task_part in goal_parts:
# TODO: store any stream evaluations (tests) and limit complexity
problem = create_simplified_problem(initial_problem, new_goal=task_part)
solution = solve_restart(problem, **kwargs)
plan, _, _ = solution
if plan is None:
unachieved_parts.append(task_part)
elif len(plan) == 0:
achieved_parts.append(task_part)
else:
raise RuntimeError(task_part)
# print(achieved_parts)
# print(unachieved_parts)
# TODO: reset to initial state if not achieved
goal_formula = And(*achieved_parts)
if unachieved_parts:
goal_formula = And(Or(*unachieved_parts), goal_formula)
print(solve_all_goals.__name__, goal_formula)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
return solve_restart(problem, **kwargs)
def solve_next_goal(initial_problem, serialize=True, **kwargs):
domain_pddl, constant_map, stream_pddl, stream_map, init, goal_parts = initial_problem
# TODO: store serialization state to ensure progress is made
# goal_formula = And(Or(*task_parts), *reset_parts) # TODO: still possibly having the disjunctive goal issue
indices = list(range(0, len(goal_parts), 1)) if serialize else [len(goal_parts)]
for i in indices:
goal_parts = goal_parts[:i+1]
goal_formula = And(*goal_parts)
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula)
print('Goal {}: {}'.format(i, goal_formula))
# TODO: reset streams?
solution = solve_restart(problem, **kwargs)
# TODO: detect which goals were achieved
plan, _, _ = solution
if plan is None:
return solution
if (i == len(indices) - 1) or (len(plan) >= 1):
return solution
return Solution(plan=[], cost=0, certificate=[])
| 9,246 |
Python
| 44.777228 | 127 | 0.660934 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/common.py
|
import time
from collections import namedtuple, OrderedDict
from hsr_tamp.pddlstream.language.constants import is_plan, get_length, FAILED #, INFEASIBLE, SUCCEEDED
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, obj_from_value_expression, revert_solution
from hsr_tamp.pddlstream.utils import INF, elapsed_time, check_memory
# Complexity is a way to characterize the number of external evaluations required for a solution
# Most algorithms regularize to prefer lower complexity solutions
# Also consider depth, level, priority, layer
# Effort incorporates success rate while complexity doesn't
# Complexity could incorporate how likely something is to help with a task in general though
# Effort relates to future expected time while complexity refers to past time
COMPLEXITY_OP = max # max | sum
INIT_EVALUATION = None
INTERNAL_EVALUATION = False
UNKNOWN_EVALUATION = 'unknown'
EvaluationNode = namedtuple('EvaluationNode', ['complexity', 'result'])
Solution = namedtuple('Solution', ['plan', 'cost', 'time'])
SOLUTIONS = [] # TODO: remove global variable
class SolutionStore(object):
def __init__(self, evaluations, max_time, success_cost, verbose, max_memory=INF):
# TODO: store a map from head to value?
# TODO: include other problem information here?
# TODO: determine when the plan converges
self.evaluations = evaluations
#self.initial_evaluations = copy.copy(evaluations)
self.start_time = time.time()
self.max_time = max_time
self.max_memory = max_memory
self.success_cost = success_cost # Inclusive
self.verbose = verbose
#self.best_cost = self.cost_fn(self.best_plan)
self.solutions = []
self.sample_time = 0.
@property
def search_time(self):
return self.elapsed_time() - self.sample_time
@property
def best_plan(self):
# TODO: return INFEASIBLE if can prove no solution
return self.solutions[-1].plan if self.solutions else FAILED
@property
def best_cost(self):
return self.solutions[-1].cost if self.solutions else INF
def add_plan(self, plan, cost):
# TODO: double-check that plan is a solution
if is_plan(plan) and (cost < self.best_cost):
self.solutions.append(Solution(plan, cost, elapsed_time(self.start_time)))
def has_solution(self):
return is_plan(self.best_plan)
def is_solved(self):
return self.has_solution() and (self.best_cost <= self.success_cost)
def elapsed_time(self):
return elapsed_time(self.start_time)
def is_timeout(self):
return (self.max_time <= self.elapsed_time()) or not check_memory(self.max_memory)
def is_terminated(self):
return self.is_solved() or self.is_timeout()
#def __repr__(self):
# raise NotImplementedError()
def extract_solution(self):
SOLUTIONS[:] = self.solutions
return revert_solution(self.best_plan, self.best_cost, self.evaluations)
def export_summary(self): # TODO: log, etc...
# TODO: SOLUTIONS
#status = SUCCEEDED if self.is_solved() else FAILED # TODO: INFEASIBLE, OPTIMAL
return {
'solved': self.is_solved(),
#'solved': self.has_solution(),
'solutions': len(self.solutions),
'cost': self.best_cost,
'length': get_length(self.best_plan),
'evaluations': len(self.evaluations),
'search_time': self.search_time,
'sample_time': self.sample_time,
'run_time': self.elapsed_time(),
'timeout': self.is_timeout(),
#'status': status,
}
##################################################
def add_fact(evaluations, fact, result=INIT_EVALUATION, complexity=0):
evaluation = evaluation_from_fact(fact)
if (evaluation not in evaluations) or (complexity < evaluations[evaluation].complexity):
evaluations[evaluation] = EvaluationNode(complexity, result)
return True
return False
def add_facts(evaluations, facts, **kwargs):
new_evaluations = []
for fact in facts:
if add_fact(evaluations, fact, **kwargs):
new_evaluations.append(evaluation_from_fact(fact))
return new_evaluations
def add_certified(evaluations, result, **kwargs):
complexity = result.compute_complexity(evaluations, **kwargs)
return add_facts(evaluations, result.get_certified(), result=result, complexity=complexity)
def evaluations_from_init(init):
evaluations = OrderedDict()
for raw_fact in init:
fact = obj_from_value_expression(raw_fact)
add_fact(evaluations, fact, result=INIT_EVALUATION, complexity=0)
return evaluations
def combine_complexities(complexities, complexity_op=COMPLEXITY_OP):
return complexity_op([0] + list(complexities))
def compute_complexity(evaluations, facts, complexity_op=COMPLEXITY_OP):
if not facts:
return 0
return complexity_op(evaluations[evaluation_from_fact(fact)].complexity for fact in facts)
##################################################
def optimistic_complexity(evaluations, optimistic_facts, fact):
if fact in optimistic_facts: # Matters due to reachieving
return optimistic_facts[fact]
evaluation = evaluation_from_fact(fact)
#if evaluation in evaluations:
return evaluations[evaluation].complexity
#return optimistic_facts[fact]
def stream_plan_preimage(stream_plan):
# Easy because monotonic
preimage = set()
achieved = set()
for stream in stream_plan:
preimage.update(set(stream.get_domain()) - achieved)
achieved.update(stream.get_certified())
return preimage
def stream_plan_complexity(evaluations, stream_plan, stream_calls, complexity_op=COMPLEXITY_OP):
if not is_plan(stream_plan):
return INF
# TODO: difference between a result having a particular complexity and the next result having something
#optimistic_facts = {}
optimistic_facts = {fact: evaluations[evaluation_from_fact(fact)].complexity
for fact in stream_plan_preimage(stream_plan)}
result_complexities = []
#complexity = 0
for i, result in enumerate(stream_plan):
# if result.external.get_complexity(num_calls=INF) == 0: # TODO: skip if true
result_complexity = complexity_op([0] + [optimistic_facts[fact]
#optimistic_complexity(evaluations, optimistic_facts, fact)
for fact in result.get_domain()])
# if stream_calls is None:
# num_calls = result.instance.num_calls
# else:
num_calls = stream_calls[i]
result_complexity += result.external.get_complexity(num_calls)
result_complexities.append(result_complexity)
#complexity = complexity_op(complexity, result_complexity)
for fact in result.get_certified():
if fact not in optimistic_facts:
optimistic_facts[fact] = result_complexity
complexity = complexity_op([0] + result_complexities)
return complexity
def is_instance_ready(evaluations, instance):
return all(evaluation_from_fact(f) in evaluations
for f in instance.get_domain())
| 7,300 |
Python
| 40.016854 | 116 | 0.661918 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/refinement.py
|
from __future__ import print_function
import time
from itertools import product
from copy import deepcopy, copy
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.scheduling.plan_streams import plan_streams, OptSolution
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.algorithms.constraints import add_plan_constraints, PlanConstraints, WILD
from hsr_tamp.pddlstream.language.constants import FAILED, INFEASIBLE, is_plan
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact, substitute_expression
from hsr_tamp.pddlstream.language.function import FunctionResult, Function
from hsr_tamp.pddlstream.language.stream import StreamResult, Result
from hsr_tamp.pddlstream.language.statistics import check_effort, compute_plan_effort
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import INF, safe_zip, get_mapping, implies, elapsed_time
CONSTRAIN_STREAMS = False
CONSTRAIN_PLANS = False
MAX_DEPTH = INF # 1 | INF
def is_refined(stream_plan):
# TODO: lazily expand the shared objects in some cases to prevent increase in size
if not is_plan(stream_plan):
return True
# TODO: some of these opt_index equal None
return all((result.opt_index is None) or result.is_refined()
for result in stream_plan)
##################################################
def optimistic_process_instance(instantiator, instance, verbose=False):
for result in instance.next_optimistic():
if verbose:
print(result) # TODO: make a debug tools that reports the optimistic streams
new_facts = False
complexity = instantiator.compute_complexity(instance)
for fact in result.get_certified():
new_facts |= instantiator.add_atom(evaluation_from_fact(fact), complexity)
if isinstance(result, FunctionResult) or new_facts:
yield result
def prune_high_effort_streams(streams, max_effort=INF, **effort_args):
# TODO: convert streams to test streams with extremely high effort
low_effort_streams = []
for stream in streams:
effort = stream.get_effort(**effort_args)
if isinstance(stream, Function) or check_effort(effort, max_effort):
low_effort_streams.append(stream)
return low_effort_streams
def optimistic_process_streams(evaluations, streams, complexity_limit=INF, **effort_args):
optimistic_streams = prune_high_effort_streams(streams, **effort_args)
instantiator = Instantiator(optimistic_streams)
for evaluation, node in evaluations.items():
if node.complexity <= complexity_limit:
instantiator.add_atom(evaluation, node.complexity)
results = []
while instantiator and (instantiator.min_complexity() <= complexity_limit):
results.extend(optimistic_process_instance(instantiator, instantiator.pop_stream()))
# TODO: instantiate and solve to avoid repeated work
exhausted = not instantiator
return results, exhausted
##################################################
def optimistic_stream_instantiation(instance, bindings, opt_evaluations, only_immediate=False):
# TODO: combination for domain predicates
new_instances = []
input_candidates = [bindings.get(i, [i]) for i in instance.input_objects]
if only_immediate and not all(len(candidates) == 1 for candidates in input_candidates):
return new_instances
for input_combo in product(*input_candidates):
mapping = get_mapping(instance.input_objects, input_combo)
domain_evaluations = set(map(evaluation_from_fact, substitute_expression(
instance.get_domain(), mapping))) # TODO: could just instantiate first
if domain_evaluations <= opt_evaluations:
new_instance = instance.external.get_instance(input_combo)
# TODO: method for eagerly evaluating some of these?
if not new_instance.is_refined():
new_instance.refine()
new_instances.append(new_instance)
return new_instances
def optimistic_stream_evaluation(evaluations, stream_plan, use_bindings=True):
# TODO: can also use the instantiator and operate directly on the outputs
# TODO: could bind by just using new_evaluations
evaluations = set(evaluations) # Converts to a set for subset testing
opt_evaluations = set(evaluations)
new_results = []
bindings = {} # TODO: report the depth considered
for opt_result in stream_plan: # TODO: just refine the first step of the plan
for new_instance in optimistic_stream_instantiation(
opt_result.instance, (bindings if use_bindings else {}), opt_evaluations):
for new_result in new_instance.next_optimistic():
opt_evaluations.update(map(evaluation_from_fact, new_result.get_certified()))
new_results.append(new_result)
if isinstance(new_result, StreamResult): # Could not add if same value
for opt, obj in safe_zip(opt_result.output_objects, new_result.output_objects):
bindings.setdefault(opt, []).append(obj)
return new_results, bindings
##################################################
# def compute_stream_results(evaluations, opt_results, externals, complexity_limit, **effort_args):
# # TODO: revisit considering double bound streams
# functions = list(filter(lambda s: type(s) is Function, externals))
# opt_evaluations = evaluations_from_stream_plan(evaluations, opt_results)
# new_results, _ = optimistic_process_streams(opt_evaluations, functions, complexity_limit, **effort_args)
# return opt_results + new_results
def compute_skeleton_constraints(opt_plan, bindings):
skeleton = []
groups = {arg: values for arg, values in bindings.items() if len(values) != 1}
action_plan, preimage_facts = opt_plan
for name, args in action_plan:
new_args = []
for arg in args:
if isinstance(arg, Object):
new_args.append(arg)
elif isinstance(arg, OptimisticObject):
new_args.append(WILD)
# TODO: might cause some strange effects on continuous_tamp -p blocked
#assert bindings.get(arg, [])
#if len(bindings[arg]) == 1:
# new_args.append(bindings[arg][0])
#else:
# #new_args.append(WILD)
# new_args.append(arg)
else:
raise ValueError(arg)
skeleton.append((name, new_args))
# exact=False because we might need new actions
return PlanConstraints(skeletons=[skeleton], groups=groups, exact=False, max_cost=INF)
def get_optimistic_solve_fn(goal_exp, domain, negative, max_cost=INF, **kwargs):
# TODO: apply to hierarchical actions representations (will need to instantiate more actions)
def fn(evaluations, results, constraints):
if constraints is None:
return plan_streams(evaluations, goal_exp, domain, results, negative,
max_cost=max_cost, **kwargs)
#print(*relaxed_stream_plan(evaluations, goal_exp, domain, results, negative,
# max_cost=max_cost, **kwargs))
#constraints.dump()
domain2 = deepcopy(domain)
evaluations2 = copy(evaluations)
goal_exp2 = add_plan_constraints(constraints, domain2, evaluations2, goal_exp, internal=True)
max_cost2 = max_cost if (constraints is None) else min(max_cost, constraints.max_cost)
return plan_streams(evaluations2, goal_exp2, domain2, results, negative,
max_cost=max_cost2, **kwargs)
return fn
##################################################
def hierarchical_plan_streams(evaluations, externals, results, optimistic_solve_fn, complexity_limit,
depth, constraints, **effort_args):
if MAX_DEPTH <= depth:
return OptSolution(None, None, INF), depth
stream_plan, opt_plan, cost = optimistic_solve_fn(evaluations, results, constraints)
if not is_plan(opt_plan) or is_refined(stream_plan):
return OptSolution(stream_plan, opt_plan, cost), depth
#action_plan, preimage_facts = opt_plan
#dump_plans(stream_plan, action_plan, cost)
#create_visualizations(evaluations, stream_plan, depth)
#print(depth, get_length(stream_plan))
#print('Stream plan ({}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format(
# get_length(stream_plan), compute_plan_effort(stream_plan), stream_plan,
# get_length(action_plan), cost, str_from_plan(action_plan)))
#if is_plan(stream_plan):
# for result in stream_plan:
# effort = compute_result_effort(result, unit_efforts=True)
# if effort != 0:
# print(result, effort)
#print()
# TODO: identify control parameters that can be separated across actions
new_depth = depth + 1
new_results, bindings = optimistic_stream_evaluation(evaluations, stream_plan)
if not (CONSTRAIN_STREAMS or CONSTRAIN_PLANS):
return OptSolution(FAILED, FAILED, INF), new_depth
#if CONSTRAIN_STREAMS:
# next_results = compute_stream_results(evaluations, new_results, externals, complexity_limit, **effort_args)
#else:
next_results, _ = optimistic_process_streams(evaluations, externals, complexity_limit, **effort_args)
next_constraints = None
if CONSTRAIN_PLANS:
next_constraints = compute_skeleton_constraints(opt_plan, bindings)
return hierarchical_plan_streams(evaluations, externals, next_results, optimistic_solve_fn, complexity_limit,
new_depth, next_constraints, **effort_args)
def iterative_plan_streams(all_evaluations, externals, optimistic_solve_fn, complexity_limit, **effort_args):
# Previously didn't have unique optimistic objects that could be constructed at arbitrary depths
start_time = time.time()
complexity_evals = {e: n for e, n in all_evaluations.items() if n.complexity <= complexity_limit}
num_iterations = 0
while True:
num_iterations += 1
results, exhausted = optimistic_process_streams(complexity_evals, externals, complexity_limit, **effort_args)
opt_solution, final_depth = hierarchical_plan_streams(
complexity_evals, externals, results, optimistic_solve_fn, complexity_limit,
depth=0, constraints=None, **effort_args)
stream_plan, action_plan, cost = opt_solution
print('Attempt: {} | Results: {} | Depth: {} | Success: {} | Time: {:.3f}'.format(
num_iterations, len(results), final_depth, is_plan(action_plan), elapsed_time(start_time)))
if is_plan(action_plan):
return OptSolution(stream_plan, action_plan, cost)
if final_depth == 0:
status = INFEASIBLE if exhausted else FAILED
return OptSolution(status, status, cost)
# TODO: should streams along the sampled path automatically have no optimistic value
| 11,177 |
Python
| 51.478873 | 117 | 0.668426 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/instantiate_task.py
|
from __future__ import print_function
import os
from collections import namedtuple, defaultdict, deque, Counter
from time import time
from hsr_tamp.pddlstream.algorithms.downward import get_literals, get_precondition, get_fluents, get_function_assignments, \
TRANSLATE_OUTPUT, parse_sequential_domain, parse_problem, task_from_domain_problem, GOAL_NAME, literal_holds, \
get_conjunctive_parts, get_conditional_effects
from hsr_tamp.pddlstream.algorithms.relation import Relation, compute_order, solve_satisfaction
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import flatten, apply_mapping, MockSet, elapsed_time, Verbose, safe_remove, ensure_dir, \
str_from_object, user_input, Profiler
import pddl
import instantiate
import translate
import normalize
FD_INSTANTIATE = True
InstantiatedTask = namedtuple('InstantiatedTask', ['task', 'atoms', 'actions', 'axioms',
'reachable_action_params', 'goal_list'])
def instantiate_goal(goal):
goal_list = get_conjunctive_parts(goal)
assert all(isinstance(item, pddl.Literal) for item in goal_list)
return goal_list
def get_goal_instance(goal):
return pddl.PropositionalAction(GOAL_NAME, instantiate_goal(goal), [], None)
##################################################
def get_constants(atom):
return tuple((i, a) for i, a in enumerate(atom.args) if not is_parameter(a))
def instantiate_condition(action, is_static, args_from_predicate):
parameters = {p.name for p in action.parameters}
#if not parameters:
# yield {}
# return
static_conditions = list(filter(is_static, get_literals(get_precondition(action))))
static_parameters = set(filter(is_parameter, flatten(atom.args for atom in static_conditions)))
if not (parameters <= static_parameters):
raise NotImplementedError('Could not instantiate action {} due to parameters: {}'.format(
action.name, str_from_object(parameters - static_parameters)))
atoms_from_cond = {condition: args_from_predicate[condition.predicate, get_constants(condition)]
for condition in static_conditions}
conditions, atoms = zip(*atoms_from_cond.items())
relations = [Relation(conditions[index].args, atoms[index])
for index in compute_order(conditions, atoms)]
solution = solve_satisfaction(relations)
for element in solution.body:
yield solution.get_mapping(element)
def get_reachable_action_params(instantiated_actions):
# TODO: use pddl_from_instance
reachable_action_params = defaultdict(list)
for inst_action in instantiated_actions:
action = inst_action.action
parameters = [p.name for p in action.parameters]
args = apply_mapping(parameters, inst_action.var_mapping)
reachable_action_params[action].append(args) # TODO: does this actually do anything
return reachable_action_params
##################################################
def filter_negated(conditions, negated_from_name):
return list(filter(lambda a: a.predicate not in negated_from_name, conditions))
def get_achieving_axioms(state, operators, negated_from_name={}):
# TODO: order by stream effort
# marking algorithm for propositional Horn logic
unprocessed_from_literal = defaultdict(list)
operator_from_literal = {}
remaining_from_stream = {}
reachable_operators = set() # TODO: only keep facts
queue = deque()
def process_axiom(op, effect):
reachable_operators.add(id(op))
if effect not in operator_from_literal:
operator_from_literal[effect] = op
queue.append(effect)
# TODO: could produce a list of all derived conditions
for op in operators:
preconditions = get_precondition(op)
for cond, effect in get_conditional_effects(op):
conditions = cond + preconditions
remaining_from_stream[id(op), effect] = 0
for literal in filter_negated(conditions, negated_from_name):
if literal_holds(state, literal):
operator_from_literal[literal] = None
else:
remaining_from_stream[id(op), effect] += 1
unprocessed_from_literal[literal].append((op, effect))
if remaining_from_stream[id(op), effect] == 0:
process_axiom(op, effect)
while queue:
literal = queue.popleft()
for op, effect in unprocessed_from_literal[literal]:
remaining_from_stream[id(op), effect] -= 1
if remaining_from_stream[id(op), effect] == 0:
process_axiom(op, effect)
return operator_from_literal, [op for op in operators if id(op) in reachable_operators]
##################################################
def instantiate_domain(task, prune_static=True):
fluent_predicates = get_fluents(task)
is_static = lambda a: isinstance(a, pddl.Atom) and (a.predicate not in fluent_predicates)
fluent_facts = MockSet(lambda a: not prune_static or not is_static(a))
init_facts = set(task.init)
function_assignments = get_function_assignments(task)
type_to_objects = instantiate.get_objects_by_type(task.objects, task.types)
constants_from_predicate = defaultdict(set)
for action in task.actions + task.axioms:
for atom in filter(is_static, get_literals(get_precondition(action))):
constants = tuple((i, a) for i, a in enumerate(atom.args) if not is_parameter(a))
constants_from_predicate[atom.predicate].add(constants)
predicate_to_atoms = defaultdict(set)
args_from_predicate = defaultdict(set)
for atom in filter(is_static, task.init): # TODO: compute which predicates might involve constants
predicate_to_atoms[atom.predicate].add(atom)
args_from_predicate[atom.predicate].add(atom.args)
for constants in constants_from_predicate[atom.predicate]:
if all(atom.args[i] == o for i, o in constants):
args_from_predicate[atom.predicate, constants].add(atom.args)
instantiated_actions = []
for action in task.actions:
for variable_mapping in instantiate_condition(action, is_static, args_from_predicate):
inst_action = action.instantiate(variable_mapping, init_facts, fluent_facts, type_to_objects,
task.use_min_cost_metric, function_assignments, predicate_to_atoms)
if inst_action:
instantiated_actions.append(inst_action)
instantiated_axioms = []
for axiom in task.axioms:
for variable_mapping in instantiate_condition(axiom, is_static, args_from_predicate):
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
reachable_facts, reachable_operators = get_achieving_axioms(init_facts, instantiated_actions + instantiated_axioms)
atoms = {atom.positive() for atom in (init_facts | set(reachable_facts)) if isinstance(atom, pddl.Literal)}
relaxed_reachable = all(literal_holds(init_facts, goal) or goal in reachable_facts
for goal in instantiate_goal(task.goal))
reachable_actions = [action for action in reachable_operators
if isinstance(action, pddl.PropositionalAction)]
reachable_axioms = [axiom for axiom in reachable_operators
if isinstance(axiom, pddl.PropositionalAxiom)]
return relaxed_reachable, atoms, reachable_actions, reachable_axioms
##################################################
def dump_instantiated(instantiated):
print('Instantiated frequencies:\n'
'Atoms: {}\n'
'Actions: {}\n'
'Axioms: {}'.format(
str_from_object(Counter(atom.predicate for atom in instantiated.atoms)),
str_from_object(Counter(action.action.name for action in instantiated.actions)),
str_from_object(Counter(axiom.axiom.name for axiom in instantiated.axioms))))
def instantiate_task(task, check_infeasible=True, use_fd=FD_INSTANTIATE, **kwargs):
start_time = time()
print()
normalize.normalize(task)
#with Profiler(field='tottime', num=25):
if use_fd:
# TODO: recover relaxed reachability (from model)
relaxed_reachable, atoms, actions, axioms, reachable_action_params = instantiate.explore(task)
else:
relaxed_reachable, atoms, actions, axioms = instantiate_domain(task, **kwargs)
reachable_action_params = get_reachable_action_params(actions)
#for atom in sorted(filter(lambda a: isinstance(a, pddl.Literal), set(task.init) | set(atoms)),
# key=lambda a: a.predicate):
# print(fact_from_fd(atom))
#print(axioms)
#for i, action in enumerate(sorted(actions, key=lambda a: a.name)):
# print(i, transform_action_args(pddl_from_instance(action), obj_from_pddl))
print('Infeasible:', not relaxed_reachable)
print('Instantiation time: {:.3f}s'.format(elapsed_time(start_time)))
if check_infeasible and not relaxed_reachable:
return None
goal_list = instantiate_goal(task.goal)
instantiated = InstantiatedTask(task, atoms, actions, axioms, reachable_action_params, goal_list)
dump_instantiated(instantiated)
return instantiated
##################################################
def sas_from_instantiated(instantiated_task):
import timers
import fact_groups
import options
import simplify
import variable_order
from translate import translate_task, unsolvable_sas_task, strips_to_sas_dictionary, \
build_implied_facts, build_mutex_key, solvable_sas_task
start_time = time()
print()
if not instantiated_task:
return unsolvable_sas_task("No relaxed solution")
task, atoms, actions, axioms, reachable_action_params, goal_list = instantiated_task
# TODO: option to skip and just use binary variables
with timers.timing("Computing fact groups", block=True):
groups, mutex_groups, translation_key = fact_groups.compute_groups(
task, atoms, reachable_action_params)
with timers.timing("Building STRIPS to SAS dictionary"):
ranges, strips_to_sas = strips_to_sas_dictionary(
groups, assert_partial=options.use_partial_encoding)
with timers.timing("Building dictionary for full mutex groups"):
mutex_ranges, mutex_dict = strips_to_sas_dictionary(
mutex_groups, assert_partial=False)
if options.add_implied_preconditions:
with timers.timing("Building implied facts dictionary..."):
implied_facts = build_implied_facts(strips_to_sas, groups,
mutex_groups)
else:
implied_facts = {}
with timers.timing("Building mutex information", block=True):
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
with timers.timing("Translating task", block=True):
sas_task = translate_task(
strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
task.init, goal_list, actions, axioms, task.use_min_cost_metric,
implied_facts)
if options.filter_unreachable_facts:
with timers.timing("Detecting unreachable propositions", block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task("Simplified to trivially false goal")
except simplify.TriviallySolvable:
return solvable_sas_task("Simplified to empty goal")
if options.reorder_variables or options.filter_unimportant_vars:
with timers.timing("Reordering and filtering variables", block=True):
variable_order.find_and_apply_variable_order(
sas_task, options.reorder_variables,
options.filter_unimportant_vars)
translate.dump_statistics(sas_task)
print('Translation time: {:.3f}s'.format(elapsed_time(start_time)))
return sas_task
##################################################
def write_sas_task(sas_task, temp_dir):
translate_path = os.path.join(temp_dir, TRANSLATE_OUTPUT)
#clear_dir(temp_dir)
safe_remove(translate_path)
ensure_dir(translate_path)
with open(os.path.join(temp_dir, TRANSLATE_OUTPUT), "w") as output_file:
sas_task.output(output_file)
return translate_path
def sas_from_pddl(task, debug=False):
#normalize.normalize(task)
#sas_task = translate.pddl_to_sas(task)
with Verbose(debug):
instantiated = instantiate_task(task)
#instantiated = convert_instantiated(instantiated)
sas_task = sas_from_instantiated(instantiated)
sas_task.metric = task.use_min_cost_metric # TODO: are these sometimes not equal?
return sas_task
def translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, verbose):
domain = parse_sequential_domain(domain_pddl)
problem = parse_problem(domain, problem_pddl)
task = task_from_domain_problem(domain, problem, add_identical=False)
sas_task = sas_from_pddl(task)
write_sas_task(sas_task, temp_dir)
return task
def convert_instantiated(instantiated_task, verbose=False):
task, atoms, actions, axioms, reachable_action_params, goal_list = instantiated_task
normalize.normalize(task)
import axiom_rules
#axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goal_list)
#init = task.init + axiom_init
import options
with Verbose(verbose):
axioms, axiom_layers = axiom_rules.handle_axioms(actions, axioms, goal_list, options.layer_strategy)
init = task.init
# axioms.sort(key=lambda axiom: axiom.name)
# for axiom in axioms:
# axiom.dump()
#return InstantiatedTask(task, atoms, actions, axioms, reachable_action_params, goal_list)
return InstantiatedTask(task, init, actions, axioms, reachable_action_params, goal_list) # init instead of atoms
| 14,150 |
Python
| 44.210862 | 124 | 0.665654 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/focused.py
|
from __future__ import print_function
import time
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.advanced import enforce_simultaneous, automatically_negate_externals
from hsr_tamp.pddlstream.algorithms.common import SolutionStore
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.disabled import push_disabled, reenable_disabled, process_stream_plan
from hsr_tamp.pddlstream.algorithms.disable_skeleton import create_disabled_axioms
#from hsr_tamp.pddlstream.algorithms.downward import has_costs
from hsr_tamp.pddlstream.algorithms.incremental import process_stream_queue
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.refinement import iterative_plan_streams, get_optimistic_solve_fn
from hsr_tamp.pddlstream.algorithms.scheduling.plan_streams import OptSolution
from hsr_tamp.pddlstream.algorithms.reorder import reorder_stream_plan
from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.algorithms.visualization import reset_visualizations, create_visualizations, \
has_pygraphviz, log_plans
from hsr_tamp.pddlstream.language.constants import is_plan, get_length, str_from_plan, INFEASIBLE
from hsr_tamp.pddlstream.language.fluent import compile_fluent_streams
from hsr_tamp.pddlstream.language.function import Function, Predicate
from hsr_tamp.pddlstream.language.optimizer import ComponentStream
from hsr_tamp.pddlstream.algorithms.recover_optimizers import combine_optimizers
from hsr_tamp.pddlstream.language.statistics import load_stream_statistics, \
write_stream_statistics, compute_plan_effort
from hsr_tamp.pddlstream.language.stream import Stream, StreamResult
from hsr_tamp.pddlstream.utils import INF, implies, str_from_object, safe_zip
def get_negative_externals(externals):
negative_predicates = list(filter(lambda s: type(s) is Predicate, externals)) # and s.is_negative()
negated_streams = list(filter(lambda s: isinstance(s, Stream) and s.is_negated, externals))
return negative_predicates + negated_streams
def partition_externals(externals, verbose=False):
functions = list(filter(lambda s: type(s) is Function, externals))
negative = get_negative_externals(externals)
optimizers = list(filter(lambda s: isinstance(s, ComponentStream) and (s not in negative), externals))
streams = list(filter(lambda s: s not in (functions + negative + optimizers), externals))
if verbose:
print('Streams: {}\nFunctions: {}\nNegated: {}\nOptimizers: {}'.format(
streams, functions, negative, optimizers))
return streams, functions, negative, optimizers
##################################################
def recover_optimistic_outputs(stream_plan):
if not is_plan(stream_plan):
return stream_plan
new_mapping = {}
new_stream_plan = []
for result in stream_plan:
new_result = result.remap_inputs(new_mapping)
new_stream_plan.append(new_result)
if isinstance(new_result, StreamResult):
opt_result = new_result.instance.opt_results[0] # TODO: empty if disabled
new_mapping.update(safe_zip(new_result.output_objects, opt_result.output_objects))
return new_stream_plan
def check_dominated(skeleton_queue, stream_plan):
if not is_plan(stream_plan):
return True
for skeleton in skeleton_queue.skeletons:
# TODO: has stream_plans and account for different output object values
if frozenset(stream_plan) <= frozenset(skeleton.stream_plan):
print(stream_plan)
print(skeleton.stream_plan)
raise NotImplementedError()
##################################################
def solve_abstract(problem, constraints=PlanConstraints(), stream_info={}, replan_actions=set(),
unit_costs=False, success_cost=INF,
max_time=INF, max_iterations=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
max_skeletons=INF, search_sample_ratio=0, bind=True, max_failures=0,
unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True,
visualize=False, verbose=True, **search_kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param constraints: PlanConstraints on the set of legal solutions
:param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
:param replan_actions: the actions declared to induce replanning for the purpose of deferred stream evaluation
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param max_skeletons: the maximum number of plan skeletons (max_skeletons=None indicates not adaptive)
:param search_sample_ratio: the desired ratio of sample time / search time when max_skeletons!=None
:param bind: if True, propagates parameter bindings when max_skeletons=None
:param max_failures: the maximum number of stream failures before switching phases when max_skeletons=None
:param unit_efforts: use unit stream efforts rather than estimated numeric efforts
:param max_effort: the maximum amount of stream effort
:param effort_weight: a multiplier for stream effort compared to action costs
:param reorder: if True, reorder stream plans to minimize the expected sampling overhead
:param visualize: if True, draw the constraint network and stream plan as a graphviz file
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# TODO: select whether to search or sample based on expected success rates
# TODO: no optimizers during search with relaxed_stream_plan
# TODO: locally optimize only after a solution is identified
# TODO: replan with a better search algorithm after feasible
# TODO: change the search algorithm and unit costs based on the best cost
use_skeletons = (max_skeletons is not None)
#assert implies(use_skeletons, search_sample_ratio > 0)
eager_disabled = (effort_weight is None) # No point if no stream effort biasing
num_iterations = eager_calls = 0
complexity_limit = initial_complexity
evaluations, goal_exp, domain, externals = parse_problem(
problem, stream_info=stream_info, constraints=constraints,
unit_costs=unit_costs, unit_efforts=unit_efforts)
automatically_negate_externals(domain, externals)
enforce_simultaneous(domain, externals)
compile_fluent_streams(domain, externals)
# TODO: make effort_weight be a function of the current cost
# if (effort_weight is None) and not has_costs(domain):
# effort_weight = 1
load_stream_statistics(externals)
if visualize and not has_pygraphviz():
visualize = False
print('Warning, visualize=True requires pygraphviz. Setting visualize=False')
if visualize:
reset_visualizations()
streams, functions, negative, optimizers = partition_externals(externals, verbose=verbose)
eager_externals = list(filter(lambda e: e.info.eager, externals))
positive_externals = streams + functions + optimizers
has_optimizers = bool(optimizers) # TODO: deprecate
assert implies(has_optimizers, use_skeletons)
################
store = SolutionStore(evaluations, max_time, success_cost, verbose, max_memory=max_memory)
skeleton_queue = SkeletonQueue(store, domain, disable=not has_optimizers)
disabled = set() # Max skeletons after a solution
while (not store.is_terminated()) and (num_iterations < max_iterations) and (complexity_limit <= max_complexity):
num_iterations += 1
eager_instantiator = Instantiator(eager_externals, evaluations) # Only update after an increase?
if eager_disabled:
push_disabled(eager_instantiator, disabled)
if eager_externals:
eager_calls += process_stream_queue(eager_instantiator, store,
complexity_limit=complexity_limit, verbose=verbose)
################
print('\nIteration: {} | Complexity: {} | Skeletons: {} | Skeleton Queue: {} | Disabled: {} | Evaluations: {} | '
'Eager Calls: {} | Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
num_iterations, complexity_limit, len(skeleton_queue.skeletons), len(skeleton_queue), len(disabled),
len(evaluations), eager_calls, store.best_cost, store.search_time, store.sample_time, store.elapsed_time()))
optimistic_solve_fn = get_optimistic_solve_fn(goal_exp, domain, negative,
replan_actions=replan_actions, reachieve=use_skeletons,
max_cost=min(store.best_cost, constraints.max_cost),
max_effort=max_effort, effort_weight=effort_weight, **search_kwargs)
# TODO: just set unit effort for each stream beforehand
if (max_skeletons is None) or (len(skeleton_queue.skeletons) < max_skeletons):
disabled_axioms = create_disabled_axioms(skeleton_queue) if has_optimizers else []
if disabled_axioms:
domain.axioms.extend(disabled_axioms)
stream_plan, opt_plan, cost = iterative_plan_streams(evaluations, positive_externals,
optimistic_solve_fn, complexity_limit, max_effort=max_effort)
for axiom in disabled_axioms:
domain.axioms.remove(axiom)
else:
stream_plan, opt_plan, cost = OptSolution(INFEASIBLE, INFEASIBLE, INF) # TODO: apply elsewhere
################
#stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) or stream_plan
stream_plan = combine_optimizers(evaluations, stream_plan)
#stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations
# [s for s in synthesizers if not s.post_only])
#stream_plan = recover_optimistic_outputs(stream_plan)
if reorder:
# TODO: this blows up memory wise for long stream plans
stream_plan = reorder_stream_plan(store, stream_plan)
num_optimistic = sum(r.optimistic for r in stream_plan) if stream_plan else 0
action_plan = opt_plan.action_plan if is_plan(opt_plan) else opt_plan
print('Stream plan ({}, {}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format(
get_length(stream_plan), num_optimistic, compute_plan_effort(stream_plan), stream_plan,
get_length(action_plan), cost, str_from_plan(action_plan)))
if is_plan(stream_plan) and visualize:
log_plans(stream_plan, action_plan, num_iterations)
create_visualizations(evaluations, stream_plan, num_iterations)
################
if (stream_plan is INFEASIBLE) and (not eager_instantiator) and (not skeleton_queue) and (not disabled):
break
if not is_plan(stream_plan):
print('No plan: increasing complexity from {} to {}'.format(complexity_limit, complexity_limit+complexity_step))
complexity_limit += complexity_step
if not eager_disabled:
reenable_disabled(evaluations, domain, disabled)
#print(stream_plan_complexity(evaluations, stream_plan))
if not use_skeletons:
process_stream_plan(store, domain, disabled, stream_plan, opt_plan, cost, bind=bind, max_failures=max_failures)
continue
################
#optimizer_plan = replan_with_optimizers(evaluations, stream_plan, domain, optimizers)
optimizer_plan = None
if optimizer_plan is not None:
# TODO: post process a bound plan
print('Optimizer plan ({}, {:.3f}): {}'.format(
get_length(optimizer_plan), compute_plan_effort(optimizer_plan), optimizer_plan))
skeleton_queue.new_skeleton(optimizer_plan, opt_plan, cost)
allocated_sample_time = (search_sample_ratio * store.search_time) - store.sample_time \
if len(skeleton_queue.skeletons) <= max_skeletons else INF
if skeleton_queue.process(stream_plan, opt_plan, cost, complexity_limit, allocated_sample_time) is INFEASIBLE:
break
################
summary = store.export_summary()
summary.update({
'iterations': num_iterations,
'complexity': complexity_limit,
'skeletons': len(skeleton_queue.skeletons),
})
print('Summary: {}'.format(str_from_object(summary, ndigits=3))) # TODO: return the summary
write_stream_statistics(externals, verbose)
return store.extract_solution()
solve_focused = solve_abstract # TODO: deprecate solve_focused
##################################################
def solve_focused_original(problem, fail_fast=False, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param fail_fast: whether to switch phases as soon as a stream fails
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_failures = 0 if fail_fast else INF
return solve_abstract(problem, max_skeletons=None, search_sample_ratio=None,
bind=False, max_failures=max_failures, **kwargs)
def solve_binding(problem, fail_fast=False, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param fail_fast: whether to switch phases as soon as a stream fails
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_failures = 0 if fail_fast else INF
return solve_abstract(problem, max_skeletons=None, search_sample_ratio=None,
bind=True, max_failures=max_failures, **kwargs)
def solve_adaptive(problem, max_skeletons=INF, search_sample_ratio=1, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param max_skeletons: the maximum number of plan skeletons to consider
:param search_sample_ratio: the desired ratio of search time / sample time
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
max_skeletons = INF if max_skeletons is None else max_skeletons
#search_sample_ratio = clip(search_sample_ratio, lower=0) # + EPSILON
#assert search_sample_ratio > 0
return solve_abstract(problem, max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
bind=None, max_failures=None, **kwargs)
def solve_hierarchical(problem, **kwargs):
"""
Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams
:param problem: a PDDLStream problem
:param search_sample_ratio: the desired ratio of sample time / search time
:param kwargs: keyword args for solve_focused
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_adaptive(problem, max_skeletons=1, search_sample_ratio=INF, # TODO: rename to sample_search_ratio
bind=None, max_failures=None, **kwargs)
| 17,028 |
Python
| 54.288961 | 124 | 0.689453 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/search.py
|
from __future__ import print_function
from copy import deepcopy
from time import time
from hsr_tamp.pddlstream.algorithms.downward import run_search, TEMP_DIR, write_pddl
from hsr_tamp.pddlstream.algorithms.instantiate_task import write_sas_task, translate_and_write_pddl
from hsr_tamp.pddlstream.utils import INF, Verbose, safe_rm_dir, elapsed_time
# TODO: manual_patterns
# Specify on the discrete variables that are updated via conditional effects
# http://www.fast-downward.org/Doc/PatternCollectionGenerator
# TODO: receding horizon planning
# TODO: allow switch to higher-level in heuristic
# TODO: recursive application of these
# TODO: write the domain and problem PDDL files that are used for debugging purposes
def solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **search_args):
# TODO: can solve using another planner and then still translate using FastDownward
# Can apply plan constraints (skeleton constraints) here as well
start_time = time()
with Verbose(debug):
print('\n' + 50*'-' + '\n')
write_sas_task(sas_task, temp_dir)
solution = run_search(temp_dir, debug=True, **search_args)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
#for axiom in sas_task.axioms:
# # TODO: return the set of axioms here as well
# var, value = axiom.effect
# print(sas_task.variables.value_names[var])
# axiom.dump()
return solution
def solve_from_pddl(domain_pddl, problem_pddl, temp_dir=TEMP_DIR, clean=False, debug=False, **search_kwargs):
# TODO: combine with solve_from_task
#return solve_tfd(domain_pddl, problem_pddl)
start_time = time()
with Verbose(debug):
write_pddl(domain_pddl, problem_pddl, temp_dir)
#run_translate(temp_dir, verbose)
translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, debug)
solution = run_search(temp_dir, debug=debug, **search_kwargs)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return solution
##################################################
def apply_sas_operator(init, op):
for var, pre, post, cond in op.pre_post:
assert (pre == -1) or (init.values[var] == pre)
assert not cond
init.values[var] = post
def name_from_action(action, args):
return '({})'.format(' '.join((action,) + args))
def parse_sas_plan(sas_task, plan):
op_from_name = {op.name: op for op in sas_task.operators} # No need to keep repeats
sas_plan = []
for action, args in plan:
name = name_from_action(action, args)
sas_plan.append(op_from_name[name])
return sas_plan
##################################################
SERIALIZE = 'serialize'
def plan_subgoals(sas_task, subgoal_plan, temp_dir, **kwargs):
full_plan = []
full_cost = 0
for subgoal in subgoal_plan:
sas_task.goal.pairs = subgoal
write_sas_task(sas_task, temp_dir)
plan, cost = run_search(temp_dir, debug=True, **kwargs)
if plan is None:
return None, INF
full_plan.extend(plan)
full_cost += cost
for sas_action in parse_sas_plan(sas_task, plan):
apply_sas_operator(sas_task.init, sas_action)
return full_plan, full_cost
def serialized_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs):
# TODO: specify goal grouping / group by predicate & objects
# TODO: version that solves for all disjuctive subgoals at once
start_time = time()
with Verbose(debug):
print('\n' + 50*'-' + '\n')
subgoal_plan = [sas_task.goal.pairs[:i+1] for i in range(len(sas_task.goal.pairs))]
plan, cost = plan_subgoals(sas_task, subgoal_plan, temp_dir, **kwargs)
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return plan, cost
##################################################
class ABSTRIPSLayer(object):
def __init__(self, pos_pre=[], neg_pre=[], pos_eff=[], neg_eff=[], horizon=INF):
self.pos_pre = pos_pre
self.neg_pre = neg_pre
self.pos_eff = pos_eff
self.neg_eff = neg_eff
self.horizon = horizon # TODO: cost units instead?
assert 1 <= self.horizon
if self.pos_eff:
raise NotImplementedError()
if self.neg_eff:
raise NotImplementedError()
##################################################
def prune_hierarchy_pre_eff(sas_task, layers):
positive_template = 'Atom {}('
negated_template = 'NegatedAtom {}('
pruned_pre = set() # TODO: effects
for layer in layers:
pruned_pre.update(positive_template.format(p.lower()) for p in layer.pos_pre)
pruned_pre.update(negated_template.format(p.lower()) for p in layer.neg_pre)
pruned = set()
for var, names in enumerate(sas_task.variables.value_names):
for val, name in enumerate(names):
if any(name.startswith(p) for p in pruned_pre):
pruned.add((var, val))
for op in sas_task.operators:
for k, pair in reversed(list(enumerate(op.prevail))):
if pair in pruned:
op.prevail.pop(0)
for k, pair in reversed(list(enumerate(sas_task.goal.pairs))):
if pair in pruned:
sas_task.goal.pairs.pop(0)
return pruned
def add_subgoals(sas_task, subgoal_plan):
if not subgoal_plan:
return None
subgoal_var = len(sas_task.variables.ranges)
subgoal_range = len(subgoal_plan) + 1
sas_task.variables.ranges.append(subgoal_range)
sas_task.variables.axiom_layers.append(-1)
sas_task.variables.value_names.append(
['subgoal{}'.format(i) for i in range(subgoal_range)])
sas_task.init.values.append(0)
sas_task.goal.pairs.append((subgoal_var, subgoal_range - 1))
# TODO: make this a subroutine that depends on the length
for i, op in enumerate(sas_task.operators):
if op.name not in subgoal_plan:
continue
subgoal = subgoal_plan.index(op.name) + 1
pre_post = (subgoal_var, subgoal - 1, subgoal, [])
op.pre_post.append(pre_post)
# TODO: maybe this should be the resultant state instead?
# TODO: prevail should just be the last prevail
# name = '(subgoal{}_{})'.format(subgoal, i)
# subgoal_cost = 1 # Can strengthen for stronger heuristics
# local_sas_task.operators.append(sas_tasks.SASOperator(
# name, op.prevail, [pre_post], subgoal_cost))
return subgoal_var
def abstrips_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs):
# Like partial order planning in terms of precondition order
# TODO: add achieve subgoal actions
# TODO: most generic would be a heuristic on each state
if hierarchy == SERIALIZE:
return serialized_solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
if not hierarchy:
return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
start_time = time()
plan, cost = None, INF
with Verbose(debug):
print('\n' + 50*'-' + '\n')
last_plan = []
for level in range(len(hierarchy)+1):
local_sas_task = deepcopy(sas_task)
prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned
add_subgoals(local_sas_task, last_plan)
write_sas_task(local_sas_task, temp_dir)
plan, cost = run_search(temp_dir, debug=True, **kwargs)
if (level == len(hierarchy)) or (plan is None):
# TODO: fall back on standard search
break
last_plan = [name_from_action(action, args) for action, args in plan]
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
return plan, cost
##################################################
# TODO: can structure these subproblems prioritizing depth rather than width
# TODO: reconcile shared objects on each level
# Each operator in the hierarchy is a legal "operator" that may need to be refined
def abstrips_solve_from_task_sequential(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False,
hierarchy=[], subgoal_horizon=1, **kwargs):
# TODO: version that plans for each goal individually
# TODO: can reduce to goal serialization if binary flag for each subgoal
if not hierarchy:
return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs)
start_time = time()
plan, cost = None, INF
with Verbose(debug):
last_plan = None
for level in range(len(hierarchy) + 1):
local_sas_task = deepcopy(sas_task)
prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned
# The goal itself is effectively a subgoal
# Handle this subgoal horizon
subgoal_plan = [local_sas_task.goal.pairs[:]]
# TODO: do I want to consider the "subgoal action" as a real action?
if last_plan is not None:
subgoal_var = add_subgoals(local_sas_task, last_plan)
subgoal_plan = [[(subgoal_var, val)] for val in range(1,
local_sas_task.variables.ranges[subgoal_var], subgoal_horizon)] + subgoal_plan
hierarchy_horizon = min(hierarchy[level-1].horizon, len(subgoal_plan))
subgoal_plan = subgoal_plan[:hierarchy_horizon]
plan, cost = plan_subgoals(local_sas_task, subgoal_plan, temp_dir, **kwargs)
if (level == len(hierarchy)) or (plan is None):
# TODO: fall back on normal
# TODO: search in space of subgoals
break
last_plan = [name_from_action(action, args) for action, args in plan]
if clean:
safe_rm_dir(temp_dir)
print('Total runtime: {:.3f}'.format(elapsed_time(start_time)))
# TODO: record which level of abstraction each operator is at when returning
# TODO: return instantiated actions here rather than names (including pruned pre/eff)
return plan, cost
| 10,419 |
Python
| 42.416666 | 110 | 0.621173 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/advanced.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, get_conjunctive_parts, get_disjunctive_parts
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import substitute_expression
from hsr_tamp.pddlstream.language.fluent import get_predicate_map
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE, ConstraintStream
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import find_unique, get_mapping
UNIVERSAL_TO_CONDITIONAL = False
AUTOMATICALLY_NEGATE = True # TODO: fix Yang's bug
# TODO: AUTOMATICALLY_NEGATE = False can omit collisions
def get_predicates(expression):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return set()
if isinstance(expression, pddl.conditions.JunctorCondition) or \
isinstance(expression, pddl.conditions.QuantifiedCondition):
predicates = set()
for part in expression.parts:
predicates.update(get_predicates(part))
return predicates
if isinstance(expression, pddl.conditions.Literal):
return {expression.predicate}
raise ValueError(expression)
def universal_to_conditional(action):
import pddl
new_parts = []
unsatisfiable = fd_from_fact((UNSATISFIABLE,))
for quant in get_conjunctive_parts(action.precondition):
if isinstance(quant, pddl.UniversalCondition):
condition = quant.parts[0]
# TODO: normalize first?
if isinstance(condition, pddl.Disjunction) or isinstance(condition, pddl.Literal):
action.effects.append(pddl.Effect(quant.parameters, condition.negate(), unsatisfiable))
continue
new_parts.append(quant)
action.precondition = pddl.Conjunction(new_parts)
def process_conditional_effect(effect, negative_from_predicate):
import pddl
new_parts = []
stream_facts = []
for disjunctive in get_conjunctive_parts(effect.condition):
for literal in get_disjunctive_parts(disjunctive):
# TODO: assert only one disjunctive part
if isinstance(literal, pddl.Literal) and (literal.predicate in negative_from_predicate):
stream = negative_from_predicate[literal.predicate]
if not isinstance(stream, ConstraintStream):
new_parts.append(literal)
continue
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
stream_facts.append(fd_from_fact(substitute_expression(stream.stream_fact, mapping)))
# TODO: add the negated literal as precondition here?
else:
new_parts.append(literal)
return new_parts, stream_facts
def optimizer_conditional_effects(domain, externals):
import pddl
#from hsr_tamp.pddlstream.algorithms.scheduling.negative import get_negative_predicates
# TODO: extend this to predicates
if UNIVERSAL_TO_CONDITIONAL:
negative_streams = list(filter(lambda e: e.is_negated, externals))
else:
negative_streams = list(filter(lambda e: isinstance(e, ConstraintStream) and e.is_negated, externals))
negative_from_predicate = get_predicate_map(negative_streams)
if not negative_from_predicate:
return
for action in domain.actions:
universal_to_conditional(action)
new_effects = []
for effect in action.effects:
if effect.literal.predicate != UNSATISFIABLE:
new_effects.append(effect)
continue
new_parts, stream_facts = process_conditional_effect(effect, negative_from_predicate)
if not stream_facts:
new_effects.append(effect)
for stream_fact in stream_facts:
new_effects.append(pddl.Effect(effect.parameters, pddl.Conjunction(new_parts), stream_fact))
action.effects = new_effects
def enforce_simultaneous(domain, externals):
optimizer_conditional_effects(domain, externals)
axiom_predicates = set()
for axiom in domain.axioms:
axiom_predicates.update(get_predicates(axiom.condition))
for external in externals:
if isinstance(external, ConstraintStream) and not external.info.simultaneous:
#isinstance(external, ComponentStream) and not external.outputs
# Only need for ConstraintStream because VariableStream used in action args
# TODO: apply recursively to domain conditions?
predicates = {get_prefix(fact) for fact in external.certified}
if predicates & axiom_predicates:
external.info.simultaneous = True
##################################################
def get_domain_predicates(external):
return set(map(get_prefix, external.domain))
def get_certified_predicates(external):
if isinstance(external, Stream):
return set(map(get_prefix, external.certified))
if isinstance(external, Function):
return {get_prefix(external.head)}
raise ValueError(external)
def get_interacting_externals(externals):
external_pairs = set()
for external1 in externals:
for external2 in externals:
# TODO: handle case where no domain conditions
if get_certified_predicates(external1) & get_domain_predicates(external2):
# TODO: count intersection when arity of zero
external_pairs.add((external1, external2))
if external1.is_negated:
raise ValueError('Stream [{}] can certify [{}] and thus cannot be negated'.format(
external1.name, external2.name))
return external_pairs
def get_certifiers(externals):
certifiers = defaultdict(set)
for external in externals:
for predicate in get_certified_predicates(external):
certifiers[predicate].add(external)
return certifiers
def get_negated_predicates(domain):
# TODO: generalize to more complicated formulas and recursive axioms
import pddl
negated_action_preconditions = set()
for action in domain.actions:
for part in get_conjunctive_parts(action.precondition):
# TODO: at least check more complicated parts for usage
if isinstance(part, pddl.NegatedAtom):
negated_action_preconditions.add(part.predicate)
negated_predicates = set()
for axiom in domain.axioms:
if axiom.name not in negated_action_preconditions:
continue
for part in get_conjunctive_parts(axiom.condition):
if isinstance(part, pddl.NegatedAtom):
negated_predicates.add(part.predicate)
return negated_predicates
def automatically_negate_externals(domain, externals):
negated_predicates = get_negated_predicates(domain)
certifiers = get_certifiers(externals)
producers = {e1 for e1, _ in get_interacting_externals(externals)}
non_producers = set(externals) - producers
for external in non_producers:
#if external.is_fluent:
#external.num_opt_fns = 0 # Streams that can be evaluated at the end as tests
if isinstance(external, Stream) and not external.is_negated \
and external.is_test and not external.is_fluent and external.could_succeed() \
and all((predicate in negated_predicates) and (len(certifiers[predicate]) == 1)
for predicate in get_certified_predicates(external)):
# TODO: could instead only negate if in a negative axiom
external.info.negate = True
print('Setting negate={} for stream [{}]'.format(external.is_negated, external.name))
| 7,925 |
Python
| 43.52809 | 110 | 0.679117 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/instantiation.py
|
from collections import defaultdict, namedtuple, Sized
from heapq import heappush, heappop
from itertools import product
from hsr_tamp.pddlstream.algorithms.common import COMPLEXITY_OP
from hsr_tamp.pddlstream.algorithms.relation import compute_order, Relation, solve_satisfaction
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.language.conversion import is_atom, head_from_fact
from hsr_tamp.pddlstream.utils import safe_zip, HeapElement, safe_apply_mapping
USE_RELATION = True
# TODO: maybe store unit complexity here as well as a tiebreaker
Priority = namedtuple('Priority', ['complexity', 'num']) # num ensures FIFO
def is_instance(atom, schema):
return (atom.function == schema.function) and \
all(is_parameter(b) or (a == b)
for a, b in safe_zip(atom.args, schema.args))
def test_mapping(atoms1, atoms2):
mapping = {}
for a1, a2 in safe_zip(atoms1, atoms2):
assert a1.function == a2.function
for arg1, arg2 in safe_zip(a1.args, a2.args):
if mapping.get(arg1, arg2) == arg2:
mapping[arg1] = arg2
else:
return None
return mapping
##################################################
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.43.7049&rep=rep1&type=pdf
class Instantiator(Sized): # Dynamic Instantiator
def __init__(self, streams, evaluations={}, verbose=False):
# TODO: lazily instantiate upon demand
self.streams = streams
self.verbose = verbose
#self.streams_from_atom = defaultdict(list)
self.queue = []
self.num_pushes = 0 # shared between the queues
# TODO: rename atom to head in most places
self.complexity_from_atom = {}
self.atoms_from_domain = defaultdict(list)
for stream in self.streams:
if not stream.domain:
assert not stream.inputs
self.push_instance(stream.get_instance([]))
for atom, node in evaluations.items():
self.add_atom(atom, node.complexity)
# TODO: revisit deque and add functions to front
# TODO: record the stream instances or results?
#########################
def __len__(self):
return len(self.queue)
def compute_complexity(self, instance):
domain_complexity = COMPLEXITY_OP([self.complexity_from_atom[head_from_fact(f)]
for f in instance.get_domain()] + [0])
return domain_complexity + instance.external.get_complexity(instance.num_calls)
def push_instance(self, instance):
# TODO: flush stale priorities?
complexity = self.compute_complexity(instance)
priority = Priority(complexity, self.num_pushes)
heappush(self.queue, HeapElement(priority, instance))
self.num_pushes += 1
if self.verbose:
print(self.num_pushes, instance)
def pop_stream(self):
priority, instance = heappop(self.queue)
return instance
def min_complexity(self):
priority, _ = self.queue[0]
return priority.complexity
#########################
def _add_combinations(self, stream, atoms):
if not all(atoms):
return
domain = list(map(head_from_fact, stream.domain))
# Most constrained variable/atom to least constrained
for combo in product(*atoms):
mapping = test_mapping(domain, combo)
if mapping is not None:
input_objects = safe_apply_mapping(stream.inputs, mapping)
self.push_instance(stream.get_instance(input_objects))
def _add_combinations_relation(self, stream, atoms):
if not all(atoms):
return
# TODO: might be a bug here?
domain = list(map(head_from_fact, stream.domain))
# TODO: compute this first?
relations = [Relation(filter(is_parameter, domain[index].args),
[tuple(a for a, b in safe_zip(atom.args, domain[index].args)
if is_parameter(b)) for atom in atoms[index]])
for index in compute_order(domain, atoms)]
solution = solve_satisfaction(relations)
for element in solution.body:
mapping = solution.get_mapping(element)
input_objects = safe_apply_mapping(stream.inputs, mapping)
self.push_instance(stream.get_instance(input_objects))
def _add_new_instances(self, new_atom):
for s_idx, stream in enumerate(self.streams):
for d_idx, domain_fact in enumerate(stream.domain):
domain_atom = head_from_fact(domain_fact)
if is_instance(new_atom, domain_atom):
# TODO: handle domain constants more intelligently
self.atoms_from_domain[s_idx, d_idx].append(new_atom)
atoms = [self.atoms_from_domain[s_idx, d2_idx] if d_idx != d2_idx else [new_atom]
for d2_idx in range(len(stream.domain))]
if USE_RELATION:
self._add_combinations_relation(stream, atoms)
else:
self._add_combinations(stream, atoms)
def add_atom(self, atom, complexity):
if not is_atom(atom):
return False
head = atom.head
if head in self.complexity_from_atom:
assert self.complexity_from_atom[head] <= complexity
return False
self.complexity_from_atom[head] = complexity
self._add_new_instances(head)
return True
| 5,655 |
Python
| 40.588235 | 101 | 0.603537 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/disable_skeleton.py
|
from hsr_tamp.pddlstream.algorithms.downward import make_axiom
from hsr_tamp.pddlstream.algorithms.disabled import get_free_objects
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders, get_stream_plan_components
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
from hsr_tamp.pddlstream.language.conversion import get_args, substitute_expression
from hsr_tamp.pddlstream.language.object import OptimisticObject, UniqueOptValue
from hsr_tamp.pddlstream.utils import grow_component, adjacent_from_edges, incoming_from_edges, get_mapping, user_input, flatten
from collections import Counter
def increase_free_variables(stream_plan):
# TODO: could decrease the number of variables if a cluster is removed
free_objects = Counter(flatten(result.instance.input_objects for result in stream_plan))
for obj, num in free_objects.items():
# TODO: wait until the full plan has failed (accomplished through levels)
if isinstance(obj, OptimisticObject):
assert isinstance(obj.param, UniqueOptValue)
instance = obj.param.instance
instance.num_optimistic = max(instance.num_optimistic, num + 1)
def create_disable_axiom(external_plan, use_parameters=True):
# TODO: express constraint mutexes upfront
# TODO: investigate why use_parameters=True hurts satisfaction
# TODO: better mix optimization and sampling by determining a splitting point
# TODO: be careful about the shared objects as parameters
# TODO: need to block functions & predicates
stream_plan, _ = partition_external_plan(external_plan)
assert stream_plan
#component_plan = stream_plan
[unsatisfiable] = stream_plan[-1].get_unsatisfiable()
component_plan = list(flatten(r.get_components() for r in stream_plan[:-1])) + list(unsatisfiable)
increase_free_variables(component_plan)
#output_objects = get_free_objects(component_plan) if use_parameters else set()
constraints = [result.stream_fact for result in component_plan]
optimistic_objects = {o for f in constraints for o in get_args(f)
if isinstance(o, OptimisticObject)} # TODO: consider case when variables are free
#assert optimistic_objects <= output_objects
#free_objects = list(optimistic_objects & output_objects) # TODO: need to return all variables
free_objects = optimistic_objects
parameters = ['?p{}'.format(i) for i in range(len(free_objects))]
param_from_obj = get_mapping(free_objects, parameters)
preconditions = substitute_expression(constraints, param_from_obj)
effect = (UNSATISFIABLE,)
axiom = make_axiom(parameters, preconditions, effect)
#axiom.dump()
return axiom
def compute_failed_indices(skeleton):
failed_indices = set()
for binding in skeleton.root.post_order():
result = binding.result
if (result is not None) and result.instance.num_calls and (not result.instance.successful):
failed_indices.add(binding.index)
#assert not binding.children
return sorted(failed_indices)
def current_failed_cluster(binding):
assert 1 <= binding.visits
failed_result = binding.skeleton.stream_plan[binding.index]
successful_results = [result for i, result in enumerate(binding.skeleton.stream_plan)
if i not in binding.stream_indices]
stream_plan = successful_results + [failed_result]
partial_orders = get_partial_orders(stream_plan)
# All connected components
#return get_connected_components(stream_plan, partial_orders)
# Only the failed connected component
return [grow_component([failed_result], adjacent_from_edges(partial_orders))]
def current_failure_contributors(binding):
# Alternatively, find unsuccessful streams in cluster and add ancestors
assert (1 <= binding.visits) or binding.is_dominated()
failed_result = binding.skeleton.stream_plan[binding.index]
failed_indices = compute_failed_indices(binding.skeleton) # Use last index?
partial_orders = get_partial_orders(binding.skeleton.stream_plan)
incoming = incoming_from_edges(partial_orders)
failed_ancestors = grow_component([failed_result], incoming)
for index in reversed(failed_indices):
if index == binding.index:
continue
result = binding.skeleton.stream_plan[index]
ancestors = grow_component([result], incoming)
if ancestors & failed_ancestors:
failed_ancestors.update(ancestors)
return [failed_ancestors]
def extract_disabled_clusters(queue, full_cluster=False):
# TODO: include costs within clustering?
# What is goal is to be below a cost threshold?
# In satisfaction, no need because costs are fixed
# Make stream_facts for externals to prevent use of the same ones
# This ordering is why it's better to put likely to fail first
# Branch on the different possible binding outcomes
# TODO: consider a nonlinear version of this that evaluates out of order
# Need extra sampling effort to identify infeasible subsets
# Treat unevaluated optimistically, as in always satisfiable
# Need to keep streams with outputs to connect if downstream is infeasible
# TODO: prune streams that always have at least one success
# TODO: CSP identification of irreducible unsatisfiable subsets
# TODO: take into consideration if a stream is enumerated to mark as a hard failure
#clusters = set()
ordered_clusters = []
for skeleton in queue.skeletons:
# TODO: consider all up to the most progress
#cluster_plans = [skeleton.stream_plan]
cluster_plans = get_stream_plan_components(skeleton.stream_plan)
binding = skeleton.best_binding
if not binding.is_fully_bound:
# TODO: block if cost sensitive to possibly get cheaper solutions
cluster_plans = current_failed_cluster(binding) if full_cluster else current_failure_contributors(binding)
for cluster_plan in cluster_plans:
ordered_clusters.append(cluster_plan)
#clusters.add(frozenset(cluster_plan))
# TODO: could instead prune at this stage
return ordered_clusters
def create_disabled_axioms(queue, last_clusters=None, **kwargs):
clusters = extract_disabled_clusters(queue)
return [create_disable_axiom(cluster, **kwargs) for cluster in clusters]
| 6,468 |
Python
| 50.341269 | 128 | 0.726654 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/relation.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import INF, get_mapping
def compute_order(domain, atoms):
# Most constrained variable/atom to least constrained
# TODO: dynamically select the atom with the fewest options (minimize new additions)
# Operating on dual (select constraints rather than vars) because lower arity
order = []
parameters = set() # Include constants
for _ in range(len(domain)):
min_new = INF
min_index = None
for index in set(range(len(domain))) - set(order):
if set(filter(is_parameter, domain[index].args)) <= parameters:
min_new = 0
min_index = index
if len(atoms[index]) < min_new:
min_new = len(atoms[index])
min_index = index
order.append(min_index)
parameters.update(filter(is_parameter, domain[min_index].args))
return order
##################################################
# TODO: all solutions constraint satisfaction point of view: constraint propagation
# https://en.wikipedia.org/wiki/Local_consistency
# Cluster into components and then order?
class Relation(object):
def __init__(self, heading, body):
self.heading = tuple(heading)
self.body = list(body)
def get_mapping(self, element):
return get_mapping(self.heading, element)
def project_element(self, attributes, element):
value_from_attribute = self.get_mapping(element)
assert all(attr in value_from_attribute for attr in attributes)
return tuple(value_from_attribute[attr] for attr in attributes)
def get_conditional(self, inputs):
outputs = [attribute for attribute in self.heading if attribute not in inputs]
two_from_overlap = defaultdict(set)
for element in self.body:
key = self.project_element(inputs, element)
value = self.project_element(outputs, element)
two_from_overlap[key].add(value) # TODO: preserve ordering
# TODO: return a relation object?
return two_from_overlap
def subtract_attributes(self, attributes):
return tuple(attribute for attribute in self.heading if attribute not in attributes)
def dump(self):
print(self.heading)
for element in self.body:
print(element)
def __repr__(self):
return '|{}| x {}'.format(', '.join(map(str, self.heading)), len(self.body))
def overlapping_attributes(relation1, relation2):
return tuple(attribute for attribute in relation2.heading if attribute in relation1.heading)
def join(relation1, relation2):
# Alternatively, Cartesian product then filter
overlap = overlapping_attributes(relation1, relation2)
new_heading = relation1.heading + relation2.subtract_attributes(overlap)
new_body = []
two_from_overlap = relation2.get_conditional(overlap)
for element in relation1.body:
key = relation1.project_element(overlap, element)
for value in two_from_overlap[key]:
new_body.append(element + value)
return Relation(new_heading, new_body)
def solve_satisfaction(relations):
solution = Relation([], [tuple()])
for relation in relations:
solution = join(solution, relation)
return solution
| 3,364 |
Python
| 39.542168 | 96 | 0.662604 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/downward.py
|
from __future__ import print_function
import os
import re
import sys
import subprocess
from collections import namedtuple, defaultdict
from time import time
from hsr_tamp.pddlstream.language.constants import EQ, NOT, Head, Evaluation, get_prefix, get_args, OBJECT, TOTAL_COST, Action, Not
from hsr_tamp.pddlstream.language.conversion import is_atom, is_negated_atom, objects_from_evaluations, pddl_from_object, \
pddl_list_from_expression, obj_from_pddl
from hsr_tamp.pddlstream.utils import read, write, INF, clear_dir, get_file_path, MockSet, find_unique, int_ceil, \
safe_remove, safe_zip, elapsed_time
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
USE_CERBERUS = False
#CERBERUS_PATH = '/home/caelan/Programs/cerberus' # Check if this path exists
CERBERUS_PATH = '/home/caelan/Programs/fd-redblack-ipc2018' # Check if this path exists
# Does not support derived predicates
USE_FORBID = False
FORBID_PATH = '/Users/caelan/Programs/external/ForbidIterative'
# --planner topk,topq,topkq,diverse
FORBID_TEMPLATE = 'plan.py --planner topk --number-of-plans {num} --domain {domain} --problem {problem}'
FORBID_COMMAND = os.path.join(FORBID_PATH, FORBID_TEMPLATE)
assert not USE_CERBERUS or not USE_FORBID
# Does not support derived predicates
##################################################
filepath = os.path.abspath(__file__)
if ' ' in filepath:
raise RuntimeError('The path to pddlstream cannot include spaces')
def find_build(fd_path):
for release in ['release', 'release64', 'release32']: # TODO: list the directory
path = os.path.join(fd_path, 'builds/{}/'.format(release))
if os.path.exists(path):
return path
# TODO: could also just automatically compile
raise RuntimeError('Please compile FastDownward first [.../pddlstream$ ./downward/build.py]')
# TODO: check at runtime so users can use utils without FD
FD_PATH = get_file_path(__file__, '../../downward/')
#FD_PATH = get_file_path(__file__, '../../FastDownward/')
TRANSLATE_PATH = os.path.join(find_build(FD_PATH), 'bin/translate')
FD_BIN = os.path.join(find_build(CERBERUS_PATH if USE_CERBERUS else FD_PATH), 'bin')
DOMAIN_INPUT = 'domain.pddl'
PROBLEM_INPUT = 'problem.pddl'
TRANSLATE_FLAGS = [] #if USE_CERBERUS else ['--negative-axioms']
original_argv = sys.argv[:]
sys.argv = sys.argv[:1] + TRANSLATE_FLAGS + [DOMAIN_INPUT, PROBLEM_INPUT]
sys.path.append(TRANSLATE_PATH)
# TODO: max translate time
import pddl.f_expression
import pddl
import instantiate
import pddl_parser.lisp_parser
import normalize
import pddl_parser
from pddl_parser.parsing_functions import parse_domain_pddl, parse_task_pddl, \
parse_condition, check_for_duplicates
sys.argv = original_argv
TEMP_DIR = 'temp/'
TRANSLATE_OUTPUT = 'output.sas'
SEARCH_OUTPUT = 'sas_plan'
SEARCH_COMMAND = 'downward --internal-plan-file {} {} < {}'
INFINITY = 'infinity'
GOAL_NAME = '@goal' # @goal-reachable
INTERNAL_AXIOM = 'new-axiom' # @0
IDENTICAL = "identical" # lowercase is critical (!= instead?)
INTERNAL_PREDICATES = [EQ, IDENTICAL, INTERNAL_AXIOM]
##################################################
# TODO: cost_type=PLUSONE can lead to suboptimality but often doesn't in practice due to COST_SCALE
# TODO: modify parsing_functions to support multiple costs
# bound (int): exclusive depth bound on g-values. Cutoffs are always performed according to the real cost.
# (i.e. solutions must be strictly better than the bound)
HEURISTICS = ['add', 'blind', 'cea', 'ff', 'goalcount', 'hmax', 'lmcut'] # hm
TRANSFORMS = ['NORMAL', 'ONE', 'PLUSONE']
# TODO: move toward using this style
def Heuristic(heuristic='ff', cost_type='PLUSONE'):
return '--heuristic "h={heuristic}(transform=adapt_costs(cost_type={cost_type}))"'.format(
heuristic=heuristic, cost_type=cost_type)
def EagerWeightedAStar(heuristic='ff', weight=1, cost_type='PLUSONE'):
return '--search eager_wastar(evals=[h()], preferred=[], reopen_closed=true, boost=0, w={weight}, pruning=null(), ' \
'cost_type={cost_type}, bound=infinity, max_time=infinity, verbosity=normal)'.format(
weight=weight, cost_type=cost_type)
SEARCH_OPTIONS = {
# See FastDownward's documentation for more configurations
# http://www.fast-downward.org/Doc/Evaluator
# http://www.fast-downward.org/Doc/SearchEngine
# Optimal (when cost_type=NORMAL)
'dijkstra': '--heuristic "h=blind(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
#'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=PLUSONE))"'
# ' --search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"', # cost_type=NORMAL
'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=PLUSONE))"'
' --search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"', # cost_type=PLUSONE
'lmcut-astar': '--heuristic "h=lmcut(transform=adapt_costs(cost_type=PLUSONE))"'
' --search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
# Suboptimal
'ff-astar': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "astar(h,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-eager': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([h],max_time=%s,bound=%s)"',
'ff-eager-pref': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([h],preferred=[h],max_time=%s,bound=%s)"',
'ff-lazy': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([h],preferred=[h],max_time=%s,bound=%s)"',
'goal-lazy': '--heuristic "h=goalcount(transform=no_transform())" '
'--search "lazy_greedy([h],randomize_successors=True,max_time=%s,bound=%s)"',
'add-random-lazy': '--heuristic "h=add(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([h],randomize_successors=True,max_time=%s,bound=%s)"',
'ff-eager-tiebreak': '--heuristic "h=ff(transform=no_transform())" '
'--search "eager(tiebreaking([h, g()]),reopen_closed=false,'
'cost_type=PLUSONE,max_time=%s,bound=%s, f_eval=sum([g(), h]))"', # preferred=[h],
'ff-lazy-tiebreak': '--heuristic "h=ff(transform=no_transform())" '
'--search "lazy(tiebreaking([h, g()]),reopen_closed=false,'
'randomize_successors=True,cost_type=PLUSONE,max_time=%s,bound=%s)"', # preferred=[h],
# TODO: eagerly evaluate goal count but lazily compute relaxed plan
'ff-ehc': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "ehc(h,preferred=[h],preferred_usage=RANK_PREFERRED_FIRST,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"',
# The key difference is that ehc resets the open list upon finding an improvement
# TODO: iterated search
}
# TODO: do I want to sort operators in FD hill-climbing search?
# TODO: greedily prioritize operators with less cost. Useful when prioritizing actions that have no stream cost
for w in range(1, 1+5):
SEARCH_OPTIONS.update({
# TODO: specify whether lazy or eager
'ff-wastar{w}'.format(w=w): '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=true,boost=100,w={w},'
'randomize_successors=false,preferred_successors_first=true,random_seed=-1,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
'cea-wastar{w}'.format(w=w): '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w={w},'
'randomize_successors=false,preferred_successors_first=true,random_seed=-1,'
'cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
# TODO: eager_wastar
# http://www.fast-downward.org/Doc/SearchEngine#Eager_weighted_A.2A_search
'ff-astar{w}'.format(w=w): '--evaluator "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager(alt([single(sum([g(), weight(h,{w})])),'
'single(sum([g(),weight(h,{w})]),pref_only=true)]),'
'preferred=[h],cost_type=PLUSONE,max_time=%s,bound=%s)"'.format(w=w),
})
if USE_CERBERUS:
# --internal-previous-portfolio-plans
#import imp
#plan_path = os.path.join(CERBERUS_PATH, 'plan.py')
#plan = imp.load_source('plan', plan_path)
sys.path.append(CERBERUS_PATH)
import importlib
mod = importlib.import_module("plan-agl") # plan | plan-agl | plan-cbo | plan-sat
#SEARCH_OPTIONS['cerberus'] = ' '.join(p.strip() for s in mod.config_string() for p in s.split('\n')) # .replace('\n', ' ')
SEARCH_OPTIONS['cerberus'] = ' '.join(s if s.startswith('--') else '"{}"'.format(s)
for s in mod.config_string())
# TODO: throw a warning if max_planner_time is met
DEFAULT_MAX_TIME = 30 # INF
DEFAULT_CONSERVATIVE_PLANNER = 'ff-astar'
DEFAULT_GREEDY_PLANNER = 'ff-astar2'
DEFAULT_PLANNER = DEFAULT_GREEDY_PLANNER
def print_search_options():
for i, (name, command) in enumerate(sorted(SEARCH_OPTIONS.items())):
print('\n{}) {}: {}'.format(i, name, command))
##################################################
# WARNING: overflow on h^add! Costs clamped to 100000000
MAX_FD_COST = 1e8
def round_cost(cost):
cost_scale = get_cost_scale()
return int(cost_scale * cost) / cost_scale
def get_cost_scale():
return pddl.f_expression.COST_SCALE
def set_cost_scale(cost_scale):
pddl.f_expression.COST_SCALE = cost_scale
def convert_value(value):
if value == INF:
return INFINITY
return int_ceil(value)
def scale_cost(cost):
if cost == INF:
return INF
return int_ceil(get_cost_scale() * float(cost))
def get_min_unit():
return 1. / get_cost_scale()
set_cost_scale(cost_scale=1e3) # TODO: make unit costs be equivalent to cost scale = 0
##################################################
def parse_lisp(lisp):
return pddl_parser.lisp_parser.parse_nested_list(lisp.splitlines())
# TODO: dynamically generate type_dict and predicate_dict
Domain = namedtuple('Domain', ['name', 'requirements', 'types', 'type_dict', 'constants',
'predicates', 'predicate_dict', 'functions', 'actions', 'axioms', 'pddl'])
def parse_sequential_domain(domain_pddl):
if isinstance(domain_pddl, Domain):
return domain_pddl
args = list(parse_domain_pddl(parse_lisp(domain_pddl))) + [domain_pddl]
domain = Domain(*args)
# for action in domain.actions:
# if (action.cost is not None) and isinstance(action.cost, pddl.Increase) and isinstance(action.cost.expression, pddl.NumericConstant):
# action.cost.expression.value = scale_cost(action.cost.expression.value)
return domain
Problem = namedtuple('Problem', ['task_name', 'task_domain_name', 'task_requirements',
'objects', 'init', 'goal', 'use_metric', 'pddl'])
def parse_problem(domain, problem_pddl):
if isinstance(problem_pddl, Problem):
return problem_pddl
args = list(parse_task_pddl(parse_lisp(problem_pddl), domain.type_dict, domain.predicate_dict)) + [problem_pddl]
return Problem(*args)
#def parse_action(lisp_list):
# action = [':action', 'test'
# ':parameters', [],
# ':precondition', [],
# ':effect', []]
# parse_action(action)
# pddl_parser.parsing_functions.parse_action(lisp_list, [], {})
# return pddl.Action
##################################################
# fact -> evaluation -> fd
def fd_from_fact(fact):
# TODO: convert to evaluation?
prefix = get_prefix(fact)
if prefix == NOT:
return fd_from_fact(fact[1]).negate()
#if prefix == EQ:
# _, head, value = fact
# predicate = get_prefix(head)
# args = list(map(pddl_from_object, get_args(head)))
# fluent = pddl.f_expression.PrimitiveNumericExpression(symbol=predicate, args=args)
# expression = pddl.f_expression.NumericConstant(value)
# return pddl.f_expression.Assign(fluent, expression)
args = list(map(pddl_from_object, get_args(fact)))
return pddl.Atom(prefix, args)
def fact_from_fd(fd):
assert(isinstance(fd, pddl.Literal))
atom = (fd.predicate,) + tuple(map(obj_from_pddl, fd.args))
return Not(atom) if fd.negated else atom
def evaluation_from_fd(fd):
if isinstance(fd, pddl.Literal):
head = Head(fd.predicate, tuple(map(obj_from_pddl, fd.args)))
return Evaluation(head, not fd.negated)
if isinstance(fd, pddl.f_expression.Assign):
head = Head(fd.fluent.symbol, tuple(map(obj_from_pddl, fd.fluent.args)))
return Evaluation(head, float(fd.expression.value) / get_cost_scale()) # Need to be careful due to rounding
raise ValueError(fd)
def fd_from_evaluation(evaluation):
name = evaluation.head.function
args = tuple(map(pddl_from_object, evaluation.head.args))
if is_atom(evaluation):
return pddl.Atom(name, args)
elif is_negated_atom(evaluation):
return pddl.NegatedAtom(name, args)
fluent = pddl.f_expression.PrimitiveNumericExpression(symbol=name, args=args)
expression = pddl.f_expression.NumericConstant(evaluation.value)
return pddl.f_expression.Assign(fluent, expression)
def fd_from_evaluations(evaluations):
return [fd_from_evaluation(e) for e in evaluations if not is_negated_atom(e)]
##################################################
def parse_goal(goal_exp, domain):
#try:
# pass
#except SystemExit as e:
# return False
return parse_condition(pddl_list_from_expression(goal_exp),
domain.type_dict, domain.predicate_dict).simplified()
def get_problem(evaluations, goal_exp, domain, unit_costs=False):
objects = objects_from_evaluations(evaluations)
typed_objects = list({make_object(pddl_from_object(obj)) for obj in objects} - set(domain.constants))
# TODO: this doesn't include =
init = fd_from_evaluations(evaluations)
goal = pddl.Truth() if goal_exp is None else parse_goal(goal_exp, domain)
#print('{} objects and {} atoms'.format(len(objects), len(init)))
problem_pddl = None
if USE_FORBID:
problem_pddl = get_problem_pddl(evaluations, goal_exp, domain.pddl, temporal=False)
write_pddl(domain.pddl, problem_pddl, temp_dir=TEMP_DIR)
return Problem(task_name=domain.name, task_domain_name=domain.name,
objects=sorted(typed_objects, key=lambda o: o.name),
task_requirements=pddl.tasks.Requirements([]), init=init, goal=goal,
use_metric=not unit_costs, pddl=problem_pddl)
def get_identical_atoms(objects):
# TODO: optimistically evaluate (not (= ?o1 ?o2))
init = []
for fd_obj in objects:
obj = obj_from_pddl(fd_obj.name)
if obj.is_unique():
init.append(pddl.Atom(IDENTICAL, (fd_obj.name, fd_obj.name)))
else:
assert obj.is_shared()
return init
def task_from_domain_problem(domain, problem, add_identical=True):
# TODO: prune evaluation that aren't needed in actions
#domain_name, domain_requirements, types, type_dict, constants, \
# predicates, predicate_dict, functions, actions, axioms = domain
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric, problem_pddl = problem
assert domain.name == task_domain_name
requirements = pddl.Requirements(sorted(set(domain.requirements.requirements +
task_requirements.requirements)))
objects = domain.constants + objects
check_for_duplicates([o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init.extend(pddl.Atom(EQ, (obj.name, obj.name)) for obj in objects)
if add_identical:
init.extend(get_identical_atoms(objects))
#print('{} objects and {} atoms'.format(len(objects), len(init)))
task = pddl.Task(domain.name, task_name, requirements, domain.types, objects,
domain.predicates, domain.functions, init, goal,
domain.actions, domain.axioms, use_metric)
normalize.normalize(task)
# task.add_axiom
return task
##################################################
def get_derived_predicates(axioms):
axioms_from_name = defaultdict(list)
for axiom in axioms:
axioms_from_name[axiom.name].append(axiom)
return axioms_from_name
def get_fluents(domain):
fluent_predicates = set(get_derived_predicates(domain.axioms))
for action in domain.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
return fluent_predicates
def is_literal(condition):
return isinstance(condition, pddl.Literal)
def get_literals(condition):
if is_literal(condition):
return [condition]
if isinstance(condition, pddl.Truth):
return []
if isinstance(condition, pddl.Conjunction):
literals = []
for c in condition.parts:
literals.extend(get_literals(c))
return literals
raise ValueError(condition)
def get_conjunctive_parts(condition):
# TODO: apply recursively
return condition.parts if isinstance(condition, pddl.Conjunction) else [condition]
def get_disjunctive_parts(condition):
return condition.parts if isinstance(condition, pddl.Disjunction) else [condition]
##################################################
def normalize_domain_goal(domain, goal_exp):
evaluations = []
problem = get_problem(evaluations, goal_exp, domain, unit_costs=False)
task = task_from_domain_problem(domain, problem)
normalize.normalize(task)
return task
def run_search(temp_dir, planner=DEFAULT_PLANNER, max_planner_time=DEFAULT_MAX_TIME,
max_cost=INF, debug=False):
"""
Runs FastDownward's search phase on translated SAS+ problem TRANSLATE_OUTPUT
:param temp_dir: the directory for temporary FastDownward input and output files
:param planner: a keyword for the FastDownward search configuration in SEARCH_OPTIONS
:param max_planner_time: the maximum runtime of FastDownward
:param max_cost: the maximum FastDownward plan cost
:param debug: If True, print the FastDownward search output
:return: a tuple (plan, cost) where plan is a sequence of PDDL actions
(or None) and cost is the cost of the plan (INF if no plan)
"""
max_time = convert_value(max_planner_time)
max_cost = convert_value(scale_cost(max_cost))
start_time = time()
search = os.path.abspath(os.path.join(FD_BIN, SEARCH_COMMAND))
if planner == 'cerberus':
planner_config = SEARCH_OPTIONS[planner] # Check if max_time, max_cost exist
else:
planner_config = SEARCH_OPTIONS[planner] % (max_time, max_cost)
temp_dir = os.path.abspath(temp_dir)
command = search.format(os.path.join(temp_dir, SEARCH_OUTPUT), planner_config,
os.path.join(temp_dir, TRANSLATE_OUTPUT))
domain_path = os.path.abspath(os.path.join(temp_dir, DOMAIN_INPUT))
problem_path = os.path.abspath(os.path.join(temp_dir, PROBLEM_INPUT))
if USE_FORBID:
command = FORBID_COMMAND.format(num=2, domain=domain_path, problem=problem_path)
if debug:
print('Search command:', command)
# os.popen is deprecated
# run, call, check_call, check_output
#with subprocess.Popen(command.split(), stdout=subprocess.PIPE, shell=True, cwd=None) as proc:
# output = proc.stdout.read()
# CalledProcessError
#try:
# output = subprocess.check_output(command, shell=True, cwd=None) #, timeout=None)
#except subprocess.CalledProcessError as e:
# print(e)
#temp_path = temp_dir
temp_path = os.path.join(os.getcwd(), TEMP_DIR) # TODO: temp dir?
for filename in os.listdir(temp_path):
if filename.startswith(SEARCH_OUTPUT):
safe_remove(os.path.join(temp_path, filename))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, cwd=None, close_fds=True)
output, error = proc.communicate()
#if proc.returncode not in [0, 12]: # Good: [0, 12] | Bad: [127]
# raise RuntimeError(proc.returncode)
if USE_FORBID:
for filename in os.listdir(FORBID_PATH):
if filename.startswith(SEARCH_OUTPUT):
os.rename(os.path.join(FORBID_PATH, filename), os.path.join(temp_path, filename))
if debug:
print(output.decode(encoding='UTF-8')[:-1])
print('Search runtime: {:.3f}'.format(elapsed_time(start_time)))
plan_files = sorted(f for f in os.listdir(temp_path) if f.startswith(SEARCH_OUTPUT))
print('Plans:', plan_files)
return parse_solutions(temp_path, plan_files)
##################################################
def parse_action(line):
entries = line.strip('( )').split(' ')
name = entries[0]
args = tuple(entries[1:])
return Action(name, args)
def parse_solution(solution):
#action_regex = r'\((\w+(\s+\w+)\)' # TODO: regex
cost = INF
if solution is None:
return None, cost
cost_regex = r'cost\s*=\s*(\d+)'
matches = re.findall(cost_regex, solution)
if matches:
cost = float(matches[0]) / get_cost_scale()
# TODO: recover the actual cost of the plan from the evaluations
lines = solution.split('\n')[:-2] # Last line is newline, second to last is cost
plan = list(map(parse_action, lines))
return plan, cost
def parse_solutions(temp_path, plan_files):
# TODO: return multiple solutions for focused
best_plan, best_cost = None, INF
for plan_file in plan_files:
solution = read(os.path.join(temp_path, plan_file))
plan, cost = parse_solution(solution)
if cost < best_cost:
best_plan, best_cost = plan, cost
return best_plan, best_cost
def write_pddl(domain_pddl=None, problem_pddl=None, temp_dir=TEMP_DIR):
clear_dir(temp_dir)
domain_path = os.path.join(temp_dir, DOMAIN_INPUT)
if domain_pddl is not None:
write(domain_path, domain_pddl)
problem_path = os.path.join(temp_dir, PROBLEM_INPUT)
if problem_pddl is not None:
write(problem_path, problem_pddl)
return domain_path, problem_path
##################################################
def literal_holds(state, literal):
#return (literal in state) != literal.negated
return (literal.positive() in state) != literal.negated
def conditions_hold(state, conditions):
return all(literal_holds(state, cond) for cond in conditions)
def get_precondition(operator):
if isinstance(operator, pddl.Action) or isinstance(operator, pddl.PropositionalAction):
return operator.precondition
elif isinstance(operator, pddl.Axiom) or isinstance(operator, pddl.PropositionalAxiom):
return operator.condition
raise ValueError(operator)
def get_conditional_effects(operator):
if isinstance(operator, pddl.PropositionalAction):
return [(cond, effect.negate()) for cond, effect in operator.del_effects] + \
[(cond, effect) for cond, effect in operator.add_effects]
elif isinstance(operator, pddl.PropositionalAxiom):
return [([], operator.effect)]
raise ValueError(operator)
def get_effects(operator):
# TODO: conditional effects
return [effect for _, effect in get_conditional_effects(operator)]
def is_applicable(state, action):
return conditions_hold(state, get_precondition(action))
def apply_action(state, action):
assert(isinstance(action, pddl.PropositionalAction))
# TODO: signed literals
# TODO: relaxed_apply_action
for conditions, effect in action.del_effects:
if conditions_hold(state, conditions):
state.discard(effect)
for conditions, effect in action.add_effects:
if conditions_hold(state, conditions):
state.add(effect)
def apply_axiom(state, axiom):
assert(isinstance(state, pddl.PropositionalAxiom))
state.add(axiom.effect)
def is_valid_plan(initial_state, plan): #, goal):
state = set(initial_state)
for action in plan:
if not is_applicable(state, action):
return False
apply_action(state, action)
return True
#def apply_lifted_action(state, action):
# assert(isinstance(state, pddl.Action))
# assert(not action.parameters)
# for effect in state.effects:
# assert(not effect.parameters)
def plan_cost(plan):
cost = 0
for action in plan:
cost += action.cost
return cost
def substitute_derived(axiom_plan, action_instance):
# TODO: what if the propositional axiom has conditional derived
axiom_pre = {p for ax in axiom_plan for p in ax.condition}
axiom_eff = {ax.effect for ax in axiom_plan}
action_instance.precondition = list((set(action_instance.precondition) | axiom_pre) - axiom_eff)
##################################################
def get_function_assignments(task):
return {f.fluent: f.expression for f in task.init
if isinstance(f, pddl.f_expression.FunctionAssignment)}
def get_action_instances(task, action_plan):
type_to_objects = instantiate.get_objects_by_type(task.objects, task.types)
function_assignments = get_function_assignments(task)
predicate_to_atoms = instantiate.get_atoms_by_predicate(task.init)
fluent_facts = MockSet()
init_facts = set()
action_instances = []
for name, objects in action_plan:
# TODO: what if more than one action of the same name due to normalization?
# Normalized actions have same effects, so I just have to pick one
# TODO: conditional effects and internal parameters
action = find_unique(lambda a: a.name == name, task.actions)
args = list(map(pddl_from_object, objects))
variable_mapping = {p.name: a for p, a in safe_zip(action.parameters, args)}
instance = action.instantiate(variable_mapping, init_facts, fluent_facts, type_to_objects,
task.use_min_cost_metric, function_assignments, predicate_to_atoms)
assert (instance is not None)
action_instances.append(instance)
return action_instances
##################################################
def add_preimage_condition(condition, preimage, i):
for literal in condition:
#preimage[literal] = preimage.get(literal, set()) | {i}
preimage.setdefault(literal, set()).add(i)
#preimage.update(condition)
def add_preimage_effect(effect, preimage):
preimage.pop(effect, None)
#if effect in preimage:
# # Fluent effects kept, static dropped
# preimage.remove(effect)
def has_conditional_effects(action_instance):
for conditions, effect in (action_instance.add_effects + action_instance.del_effects):
if conditions:
return True
return False
def action_preimage(action, preimage, i):
for conditions, effect in (action.add_effects + action.del_effects):
assert(not conditions)
# TODO: can later select which conditional effects are used
# TODO: might need to truely decide whether one should hold or not for a preimage
# Maybe I should do that here
add_preimage_effect(effect, preimage)
add_preimage_condition(action.precondition, preimage, i)
def axiom_preimage(axiom, preimage, i):
add_preimage_effect(axiom.effect, preimage)
add_preimage_condition(axiom.condition, preimage, i)
def plan_preimage(combined_plan, goal=[]):
#preimage = set(goal)
action_plan = [action for action in combined_plan if isinstance(action, pddl.PropositionalAction)]
step = len(action_plan)
preimage = {condition: {step} for condition in goal}
for operator in reversed(combined_plan):
if isinstance(operator, pddl.PropositionalAction):
step -= 1
action_preimage(operator, preimage, step)
elif isinstance(operator, pddl.PropositionalAxiom):
axiom_preimage(operator, preimage, step)
else:
raise ValueError(operator)
return preimage
##################################################
def add_predicate(domain, predicate):
if predicate.name in domain.predicate_dict:
return False
domain.predicates.append(predicate)
domain.predicate_dict[predicate.name] = predicate
return True
def make_object(obj, type=OBJECT):
return pddl.TypedObject(obj, type)
def make_parameters(parameters, **kwargs):
return tuple(make_object(p, **kwargs) for p in parameters)
def make_predicate(name, parameters):
return pddl.Predicate(name, make_parameters(parameters))
def make_preconditions(preconditions):
return pddl.Conjunction(list(map(fd_from_fact, preconditions)))
def make_effects(effects):
return [pddl.Effect(parameters=[], condition=pddl.Truth(),
literal=fd_from_fact(fact)) for fact in effects]
def make_cost(cost):
if cost is None:
return cost
fluent = pddl.PrimitiveNumericExpression(symbol=TOTAL_COST, args=[])
try:
expression = pddl.NumericConstant(cost)
except TypeError:
expression = pddl.PrimitiveNumericExpression(
symbol=get_prefix(cost), args=list(map(pddl_from_object, get_args(cost))))
return pddl.Increase(fluent=fluent, expression=expression)
def has_costs(domain):
for action in domain.actions:
if (action.cost is not None) or (action.cost == 0):
return True
return False
def set_unit_costs(domain):
# Cost of None becomes zero if metric = True
#set_cost_scale(1)
for action in domain.actions:
action.cost = make_cost(1)
def make_action(name, parameters, preconditions, effects, cost=None):
# Usually all parameters are external
return pddl.Action(name=name,
parameters=make_parameters(parameters),
num_external_parameters=len(parameters),
precondition=make_preconditions(preconditions),
effects=make_effects(effects),
cost=make_cost(cost))
def make_axiom(parameters, preconditions, derived):
predicate = get_prefix(derived)
external_parameters = list(get_args(derived))
internal_parameters = [p for p in parameters if p not in external_parameters]
parameters = external_parameters + internal_parameters
return pddl.Axiom(name=predicate,
parameters=make_parameters(parameters),
num_external_parameters=len(external_parameters),
condition=make_preconditions(preconditions))
def make_domain(constants=[], predicates=[], functions=[], actions=[], axioms=[]):
types = [pddl.Type(OBJECT)]
pddl_parser.parsing_functions.set_supertypes(types)
return Domain(name='', requirements=pddl.Requirements([]),
types=types, type_dict={ty.name: ty for ty in types}, constants=constants,
predicates=predicates, predicate_dict={p.name: p for p in predicates},
functions=functions, actions=actions, axioms=axioms, pddl=None)
def pddl_from_instance(instance):
action = instance.action
args = [instance.var_mapping[p.name]
for p in action.parameters[:action.num_external_parameters]]
return Action(action.name, args)
| 31,750 |
Python
| 40.887863 | 142 | 0.652409 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/recover_optimizers.py
|
import copy
from hsr_tamp.pddlstream.algorithms.common import INIT_EVALUATION
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders, get_stream_plan_components
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, is_plan, get_args
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.optimizer import ComponentStream, OptimizerStream
from hsr_tamp.pddlstream.utils import neighbors_from_orders, get_mapping, safe_apply_mapping
CLUSTER = True
def get_optimizer(result):
return result.external.optimizer if isinstance(result.external, ComponentStream) else None
##################################################
def combine_optimizer_plan(stream_plan, functions):
if not stream_plan:
return stream_plan
optimizer = get_optimizer(stream_plan[-1])
if optimizer is None:
return stream_plan
function_plan = list(filter(lambda r: get_prefix(r.instance.external.head)
in optimizer.objectives, functions))
external_plan = stream_plan + function_plan
cluster_plans = get_stream_plan_components(external_plan) if CLUSTER else [external_plan]
optimizer_plan = []
for cluster_plan in cluster_plans:
if all(isinstance(r, FunctionResult) for r in cluster_plan):
continue
#if len(cluster_plan) == 1:
# optimizer_plan.append(cluster_plan[0])
# continue
stream = OptimizerStream(optimizer, cluster_plan)
instance = stream.get_instance(stream.input_objects, fluent_facts=stream.fluent_facts)
result = instance.get_result(stream.output_objects)
optimizer_plan.append(result)
return optimizer_plan
def combine_optimizers(evaluations, external_plan):
if not is_plan(external_plan):
return external_plan
stream_plan, function_plan = partition_external_plan(external_plan)
optimizers = {get_optimizer(r) for r in stream_plan} # None is like a unique optimizer
if len(optimizers - {None}) == 0:
return external_plan
print('Constraint plan: {}'.format(external_plan))
combined_results = []
for optimizer in optimizers:
relevant_results = [r for r in stream_plan if get_optimizer(r) == optimizer]
combined_results.extend(combine_optimizer_plan(relevant_results, function_plan))
combined_results.extend(function_plan)
current_facts = set()
for result in combined_results:
current_facts.update(filter(lambda f: evaluation_from_fact(f) in evaluations, result.get_domain()))
combined_plan = []
while combined_results:
for result in combined_results:
if set(result.get_domain()) <= current_facts:
combined_plan.append(result)
current_facts.update(result.get_certified())
combined_results.remove(result)
break
else: # TODO: can also just try one cluster and return
raise RuntimeError()
#return None
return combined_plan
##################################################
def retrace_instantiation(fact, streams, evaluations, free_parameters, visited_facts, planned_results):
# Makes two assumptions:
# 1) Each stream achieves a "primary" fact that uses all of its inputs + outputs
# 2) Outputs are only free parameters (no constants)
if (evaluation_from_fact(fact) in evaluations) or (fact in visited_facts):
return
visited_facts.add(fact)
for stream in streams:
for cert in stream.certified:
if get_prefix(fact) == get_prefix(cert):
mapping = get_mapping(get_args(cert), get_args(fact)) # Should be same anyways
if not all(p in mapping for p in (stream.inputs + stream.outputs)):
# TODO: assumes another effect is sufficient for binding
# Create arbitrary objects for inputs/outputs that aren't mentioned
# Can lead to incorrect ordering
continue
input_objects = safe_apply_mapping(stream.inputs, mapping)
output_objects = safe_apply_mapping(stream.outputs, mapping)
if not all(out in free_parameters for out in output_objects):
# Can only bind if free
continue
instance = stream.get_instance(input_objects)
for new_fact in instance.get_domain():
retrace_instantiation(new_fact, streams, evaluations, free_parameters,
visited_facts, planned_results)
planned_results.append(instance.get_result(output_objects))
def replan_with_optimizers(evaluations, external_plan, domain, optimizers):
# TODO: return multiple plans?
# TODO: can instead have multiple goal binding combinations
# TODO: can replan using samplers as well
if not is_plan(external_plan):
return None
optimizers = list(filter(lambda s: isinstance(s, ComponentStream), optimizers))
if not optimizers:
return None
stream_plan, function_plan = partition_external_plan(external_plan)
free_parameters = {o for r in stream_plan for o in r.output_objects}
#free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)}
initial_evaluations = {e: n for e, n in evaluations.items() if n.result == INIT_EVALUATION}
#initial_evaluations = evaluations
goal_facts = set()
for result in stream_plan:
goal_facts.update(filter(lambda f: evaluation_from_fact(f) not in
initial_evaluations, result.get_certified()))
visited_facts = set()
new_results = []
for fact in goal_facts:
retrace_instantiation(fact, optimizers, initial_evaluations, free_parameters, visited_facts, new_results)
# TODO: ensure correct ordering
new_results = list(filter(lambda r: isinstance(r, ComponentStream), new_results))
#from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
#node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
#extract_stream_plan(node_from_atom, target_facts, stream_plan)
optimizer_results = []
for optimizer in {get_optimizer(r) for r in new_results}: # None is like a unique optimizer:
relevant_results = [r for r in new_results if get_optimizer(r) == optimizer]
optimizer_results.extend(combine_optimizer_plan(relevant_results, function_plan))
#print(str_from_object(set(map(fact_from_evaluation, evaluations))))
#print(str_from_object(set(goal_facts)))
# TODO: can do the flexibly sized optimizers search
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
optimizer_plan = reschedule_stream_plan(initial_evaluations, goal_facts, copy.copy(domain),
(stream_plan + optimizer_results), unique_binding=True)
if not is_plan(optimizer_plan):
return None
return optimizer_plan + function_plan
##################################################
def combine_optimizers_greedy(evaluations, external_plan):
if not is_plan(external_plan):
return external_plan
# The key thing is that a variable must be grounded before it can used in a non-stream thing
# TODO: construct variables in order
# TODO: graph cut algorithm to minimize the number of constraints that are excluded
# TODO: reorder to ensure that constraints are done first since they are likely to fail as tests
incoming_edges, outgoing_edges = neighbors_from_orders(get_partial_orders(external_plan))
queue = []
functions = []
for v in external_plan:
if not incoming_edges[v]:
(functions if isinstance(v, FunctionResult) else queue).append(v)
current = []
ordering = []
while queue:
optimizer = get_optimizer(current[-1]) if current else None
for v in queue:
if optimizer == get_optimizer(v):
current.append(v)
break
else:
ordering.extend(combine_optimizer_plan(current, functions))
current = [queue[0]]
v1 = current[-1]
queue.remove(v1)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1)
if not incoming_edges[v2]:
(functions if isinstance(v2, FunctionResult) else queue).append(v2)
ordering.extend(combine_optimizer_plan(current, functions))
return ordering + functions
| 8,831 |
Python
| 47.262295 | 117 | 0.660288 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/incremental.py
|
from collections import Counter
import time
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.common import add_facts, add_certified, SolutionStore, UNKNOWN_EVALUATION
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem
from hsr_tamp.pddlstream.algorithms.instantiate_task import sas_from_pddl, instantiate_task
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.search import abstrips_solve_from_task
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl_plan
from hsr_tamp.pddlstream.language.attachments import has_attachments, compile_fluents_as_attachments, solve_pyplanners
from hsr_tamp.pddlstream.language.statistics import load_stream_statistics, write_stream_statistics
from hsr_tamp.pddlstream.language.temporal import solve_tfd, SimplifiedDomain
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
from hsr_tamp.pddlstream.utils import INF, Verbose, str_from_object, elapsed_time
UPDATE_STATISTICS = False
def solve_temporal(evaluations, goal_exp, domain, debug=False, **kwargs):
assert isinstance(domain, SimplifiedDomain)
problem = get_problem_pddl(evaluations, goal_exp, domain.pddl)
return solve_tfd(domain.pddl, problem, debug=debug)
def solve_sequential(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args):
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
if has_attachments(domain):
with Verbose(debug):
instantiated = instantiate_task(task)
return solve_pyplanners(instantiated, **search_args)
sas_task = sas_from_pddl(task, debug=debug)
return abstrips_solve_from_task(sas_task, debug=debug, **search_args)
def solve_finite(evaluations, goal_exp, domain, **kwargs):
if isinstance(domain, SimplifiedDomain):
pddl_plan, cost = solve_temporal(evaluations, goal_exp, domain, **kwargs)
else:
pddl_plan, cost = solve_sequential(evaluations, goal_exp, domain, **kwargs)
plan = obj_from_pddl_plan(pddl_plan)
return plan, cost
##################################################
def process_instance(instantiator, store, instance, verbose=False): #, **complexity_args):
if instance.enumerated:
return []
start_time = time.time()
new_results, new_facts = instance.next_results(verbose=verbose)
store.sample_time += elapsed_time(start_time)
evaluations = store.evaluations
#remove_blocked(evaluations, instance, new_results)
for result in new_results:
complexity = result.compute_complexity(evaluations)
#complexity = instantiator.compute_complexity(instance)
for evaluation in add_certified(evaluations, result):
instantiator.add_atom(evaluation, complexity)
fact_complexity = 0 # TODO: record the instance or treat as initial?
for evaluation in add_facts(evaluations, new_facts, result=UNKNOWN_EVALUATION, complexity=fact_complexity):
instantiator.add_atom(evaluation, fact_complexity)
if not instance.enumerated:
instantiator.push_instance(instance)
return new_results
def process_stream_queue(instantiator, store, complexity_limit=INF, verbose=False):
instances = []
results = []
num_successes = 0
while not store.is_terminated() and instantiator and (instantiator.min_complexity() <= complexity_limit):
instance = instantiator.pop_stream()
if instance.enumerated:
continue
instances.append(instance)
new_results = process_instance(instantiator, store, instance, verbose=verbose)
results.extend(new_results)
num_successes += bool(new_results) # TODO: max_results?
if verbose:
print('Eager Calls: {} | Successes: {} | Results: {} | Counts: {}'.format(
len(instances), num_successes, len(results),
str_from_object(Counter(instance.external.name for instance in instances))))
return len(instances)
# def retrace_stream_plan(store, domain, goal_expression):
# # TODO: retrace the stream plan that supports the plan to find the certificate
# if store.best_plan is None:
# return None
# assert not domain.axioms
# from hsr_tamp.pddlstream.algorithms.downward import plan_preimage
# print(goal_expression)
# plan_preimage(store.best_plan, goal_expression)
# raise NotImplementedError()
##################################################
def solve_incremental(problem, constraints=PlanConstraints(),
unit_costs=False, success_cost=INF,
max_iterations=INF, max_time=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
verbose=False, **search_kwargs):
"""
Solves a PDDLStream problem by alternating between applying all possible streams and searching
:param problem: a PDDLStream problem
:param constraints: PlanConstraints on the set of legal solutions
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# max_complexity = 0 => current
# complexity_step = INF => exhaustive
# success_cost = terminate_cost = decision_cost
# TODO: warning if optimizers are present
evaluations, goal_expression, domain, externals = parse_problem(
problem, constraints=constraints, unit_costs=unit_costs)
store = SolutionStore(evaluations, max_time, success_cost, verbose, max_memory=max_memory) # TODO: include other info here?
if UPDATE_STATISTICS:
load_stream_statistics(externals)
static_externals = compile_fluents_as_attachments(domain, externals)
num_iterations = num_calls = 0
complexity_limit = initial_complexity
instantiator = Instantiator(static_externals, evaluations)
num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose)
while not store.is_terminated() and (num_iterations < max_iterations) and (complexity_limit <= max_complexity):
num_iterations += 1
print('Iteration: {} | Complexity: {} | Calls: {} | Evaluations: {} | Solved: {} | Cost: {:.3f} | '
'Search Time: {:.3f} | Sample Time: {:.3f} | Time: {:.3f}'.format(
num_iterations, complexity_limit, num_calls, len(evaluations),
store.has_solution(), store.best_cost, store.search_time, store.sample_time, store.elapsed_time()))
plan, cost = solve_finite(evaluations, goal_expression, domain,
max_cost=min(store.best_cost, constraints.max_cost), **search_kwargs)
if is_plan(plan):
store.add_plan(plan, cost)
if not instantiator:
break
if complexity_step is None:
# TODO: option to select the next k-smallest complexities
complexity_limit = instantiator.min_complexity()
else:
complexity_limit += complexity_step
num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose)
#retrace_stream_plan(store, domain, goal_expression)
#print('Final queue size: {}'.format(len(instantiator)))
summary = store.export_summary()
summary.update({
'iterations': num_iterations,
'complexity': complexity_limit,
})
print('Summary: {}'.format(str_from_object(summary, ndigits=3))) # TODO: return the summary
if UPDATE_STATISTICS:
write_stream_statistics(externals, verbose)
return store.extract_solution()
##################################################
def solve_immediate(problem, **kwargs):
"""
Solves a PDDLStream problem by searching only
INCOMPLETENESS WARNING: only use if no stream evaluations are necessarily (otherwise terminates early)
:param problem: a PDDLStream problem
:param kwargs: keyword args for solve_incremental
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_incremental(problem, start_complexity=0, complexity_step=0, max_complexity=0, **kwargs)
def solve_exhaustive(problem, **kwargs):
"""
Solves a PDDLStream problem by applying all possible streams and searching once
INCOMPLETENESS WARNING: only use if a finite set of instantiable stream instances (otherwise infinite loop)
:param problem: a PDDLStream problem
:param kwargs: keyword args for solve_incremental
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_incremental(problem, start_complexity=INF, complexity_step=INF, max_complexity=INF, **kwargs)
| 9,913 |
Python
| 49.324873 | 127 | 0.7012 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/skeleton.py
|
from __future__ import print_function
import time
from collections import namedtuple, Sized
from itertools import count
from heapq import heappush, heappop
from hsr_tamp.pddlstream.algorithms.common import is_instance_ready, compute_complexity, stream_plan_complexity, add_certified, \
stream_plan_preimage, COMPLEXITY_OP
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.algorithms.disabled import process_instance, update_bindings, update_cost, bind_action_plan
from hsr_tamp.pddlstream.algorithms.reorder import get_output_objects, get_object_orders, get_partial_orders, get_initial_orders
from hsr_tamp.pddlstream.language.constants import is_plan, INFEASIBLE, FAILED, SUCCEEDED
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.algorithms.visualization import visualize_stream_orders
from hsr_tamp.pddlstream.utils import elapsed_time, HeapElement, apply_mapping, INF, get_mapping, adjacent_from_edges, \
incoming_from_edges, outgoing_from_edges
# TODO: the bias away from solved things is actually due to USE_PRIORITIES+timed_process not REQUIRE_DOWNSTREAM
USE_PRIORITIES = True
GREEDY_VISITS = 0
GREEDY_BEST = True
REQUIRE_DOWNSTREAM = True
Priority = namedtuple('Priority', ['not_greedy', 'complexity', 'visits', 'remaining', 'cost']) # TODO: FIFO
Affected = namedtuple('Affected', ['indices', 'has_cost'])
def compute_affected_downstream(stream_plan, index):
# TODO: if the cost is pruned, then add everything that contributes, not just the last function
affected_indices = [index]
result = stream_plan[index]
has_cost = (type(result) is FunctionResult)
output_objects = set(get_output_objects(result))
if not output_objects: # TODO: should I do conditions instead?
return Affected(affected_indices, has_cost)
for index2 in range(index + 1, len(stream_plan)):
result2 = stream_plan[index2]
if output_objects & result2.instance.get_all_input_objects(): # TODO: get_object_orders
output_objects.update(get_output_objects(result2)) # TODO: just include directly affected?
affected_indices.append(index2)
has_cost |= (type(result2) is FunctionResult)
return Affected(affected_indices, has_cost)
def compute_affected_component(stream_plan, index):
# TODO: affected upstream
raise NotImplementedError()
##################################################
class Skeleton(object):
def __init__(self, queue, stream_plan, action_plan, cost):
# TODO: estimate statistics per stream_instance online and use to reorder the skeleton
self.queue = queue
self.index = len(self.queue.skeletons)
self.stream_plan = stream_plan
self.action_plan = action_plan
self.cost = cost
self.best_binding = None
self.improved = False
self.root = Binding(self, self.cost, history=[], mapping={}, index=0, parent=None, parent_result=None)
self.affected_indices = [compute_affected_downstream(self.stream_plan, index)
for index in range(len(self.stream_plan))]
stream_orders = get_partial_orders(self.stream_plan) # init_facts=self.queue.evaluations)
index_from_result = get_mapping(stream_plan, range(len(stream_plan)))
index_orders = {(index_from_result[r1], index_from_result[r2]) for r1, r2 in stream_orders}
preimage = stream_plan_preimage(stream_plan)
self.preimage_complexities = [[queue.evaluations[evaluation_from_fact(fact)].complexity
for fact in stream.get_domain() if fact in preimage] for stream in stream_plan]
self.incoming_indices = incoming_from_edges(index_orders)
self.outgoing_indices = outgoing_from_edges(index_orders)
#min_complexity = stream_plan_complexity(self.queue.evaluations, self.stream_plan, [0]*len(stream_plan))
# TODO: compute this all at once via hashing
def compute_complexity(self, stream_calls, complexities=[]):
# TODO: use the previous value when possible
assert len(stream_calls) == len(self.stream_plan)
start_index = len(complexities)
complexities = complexities + [0]*(len(stream_calls) - start_index)
for index in range(start_index, len(self.stream_plan)):
complexities[index] = self.compute_index_complexity(index, stream_calls[index], complexities)
return complexities
def compute_index_complexity(self, index, num_calls, complexities):
# TODO: automatically set the opt level to be zero for any streams that are bound (assuming not reachieve)
domain_complexity = COMPLEXITY_OP([0] + self.preimage_complexities[index] +
[complexities[index2] for index2 in self.incoming_indices[index]])
return domain_complexity + self.stream_plan[index].external.get_complexity(num_calls=num_calls)
def update_best(self, binding):
if (self.best_binding is None) or (self.best_binding.index < binding.index) or \
((self.best_binding.index == binding.index) and (binding.cost < self.best_binding.cost)):
self.best_binding = binding
#print('Skeleton {} | Progress: {} | New best: {}'.format(
# self.index, self.best_binding.index, self.best_binding))
self.improved = True
return True
return False
def bind_stream_result(self, index, mapping):
return self.stream_plan[index].remap_inputs(mapping) # Has optimistic output objects
def bind_action_plan(self, mapping):
return bind_action_plan(self.action_plan, mapping)
def visualize_bindings(self):
# TODO: remap outputs
orders = {(binding1.parent_result, binding2.parent_result)
for binding1, binding2 in self.root.get_connections()}
return visualize_stream_orders(orders)
##################################################
class Binding(object):
counter = count()
def __init__(self, skeleton, cost, history, mapping, index, parent, parent_result):
#def __init__(self, skeleton, cost=0., history=[], mapping={}, index=0, parent=None):
self.skeleton = skeleton
self.cost = cost
self.history = history
self.mapping = mapping
self.index = index
self.parent = parent
if self.parent is not None:
self.parent.children.append(self)
self.parent_result = parent_result
self.children = []
self._result = False
self.visits = 0 # The number of times _process_binding has been called
self.calls = 0 # The index for result_history
self.complexity = None
self.complexities = None
self.max_history = max(self.history) if self.history else 0
self.skeleton.update_best(self)
self.num = next(self.counter) # TODO: FIFO
@property
def is_fully_bound(self):
return self.index == len(self.skeleton.stream_plan)
@property
def result(self):
if self._result is False:
self._result = None
if not self.is_fully_bound:
self._result = self.skeleton.bind_stream_result(self.index, self.mapping)
return self._result
def is_best(self):
return self.skeleton.best_binding is self
def is_dominated(self):
return self.skeleton.queue.store.best_cost <= self.cost
def is_enumerated(self):
return self.is_fully_bound or self.result.enumerated
def is_unsatisfied(self):
return not self.children
def is_greedy(self):
return (self.visits <= GREEDY_VISITS) and (not GREEDY_BEST or self.is_best())
def up_to_date(self):
if self.is_fully_bound:
return True
#if REQUIRE_DOWNSTREAM:
# return self.result.instance.num_calls <= self.visits
#else:
return self.calls == self.result.instance.num_calls
def compute_complexity(self):
if self.is_fully_bound:
return 0
# TODO: use last if self.result.external.get_complexity(num_calls=INF) == 0
# TODO: intelligently compute/cache this - store parent stream_plan_complexity or compute formula per skeleton
if self.complexity is None:
full_history = self.history + [self.calls] # TODO: relevant history, full history, or future
future = full_history + [0]*(len(self.skeleton.stream_plan) - len(full_history))
parent_complexities = [0]*len(self.skeleton.stream_plan) if self.index == 0 else self.parent.complexities
if self.skeleton.outgoing_indices[self.index]:
self.complexities = self.skeleton.compute_complexity(future, complexities=parent_complexities[:self.index])
else:
self.complexities = list(parent_complexities)
self.complexities[self.index] = self.skeleton.compute_index_complexity(self.index, self.calls, self.complexities)
self.complexity = COMPLEXITY_OP(self.complexities)
#self.complexity = stream_plan_complexity(self.skeleton.queue.evaluations, self.skeleton.stream_plan, future)
return self.complexity
#return compute_complexity(self.skeleton.queue.evaluations, self.result.get_domain()) + \
# self.result.external.get_complexity(self.visits) # visits, calls
def check_complexity(self, complexity_limit=INF):
if complexity_limit == INF:
return True
if any(calls > complexity_limit for calls in [self.max_history, self.calls]): # + self.history
# Check lower bounds for efficiency purposes
return False
return self.compute_complexity() <= complexity_limit
def check_downstream_helper(self, affected):
if self.is_dominated():
# Keep exploring down branches that contain a cost term
return affected.has_cost
if self.is_unsatisfied(): # or type(self.result) == FunctionResult): # not self.visits
return self.index in affected.indices
# TODO: only prune functions here if the reset of the plan is feasible
#if not affected.indices or (max(affected.indices) < self.index):
# # Cut branch for efficiency purposes
# return False
# TODO: discard bindings that have been pruned by their cost per affected component
# TODO: both any and all weakly prune
return any(binding.check_downstream_helper(affected) for binding in self.children)
def check_downstream(self):
return self.check_downstream_helper(self.skeleton.affected_indices[self.index])
def get_priority(self):
if not USE_PRIORITIES:
return Priority(not_greedy=True, complexity=0, visits=self.visits, remaining=0, cost=0.)
# TODO: use effort instead
# TODO: instead of remaining, use the index in the queue to reprocess earlier ones
#priority = self.visits
#priority = self.compute_complexity()
priority = self.compute_complexity() + (self.visits - self.calls) # TODO: check this
# TODO: call_index
remaining = len(self.skeleton.stream_plan) - self.index
return Priority(not self.is_greedy(), priority, self.visits, remaining, self.cost)
def post_order(self):
for child in self.children:
for binding in child.post_order():
yield binding
yield self
def get_ancestors(self):
if self.parent is not None:
for ancestor in self.parent.get_ancestors():
yield ancestor
yield self
def get_connections(self):
# TODO: easier to just iterate over all bindings and extract the parent
connections = []
for child in self.children:
connections.append((self, child))
connections.extend(child.get_connections())
return connections
def recover_bound_results(self):
return [binding.parent_result for binding in list(self.get_ancestors())[1:]]
def update_bindings(self):
new_bindings = []
instance = self.result.instance
for call_idx in range(self.calls, instance.num_calls):
for new_result in instance.results_history[call_idx]: # TODO: don't readd if successful already
if new_result.is_successful():
new_bindings.append(Binding(
skeleton=self.skeleton,
cost=update_cost(self.cost, self.result, new_result),
history=self.history + [call_idx],
mapping=update_bindings(self.mapping, self.result, new_result),
index=self.index + 1, # TODO: history instead of results_history
parent=self,
parent_result=new_result))
self.calls = instance.num_calls
self.visits = max(self.visits, self.calls)
self.complexity = None # Forces re-computation
#self.skeleton.visualize_bindings()
return new_bindings
def __repr__(self):
return '{}(skeleton={}, {})'.format(self.__class__.__name__, self.skeleton.index, self.result)
##################################################
STANDBY = None
class SkeletonQueue(Sized):
def __init__(self, store, domain, disable=True):
# TODO: multi-threaded
self.store = store
self.domain = domain
self.skeletons = []
self.queue = [] # TODO: deque version
self.disable = disable
self.standby = []
@property
def evaluations(self):
return self.store.evaluations
def __len__(self):
return len(self.queue)
def is_active(self):
return self.queue and (not self.store.is_terminated())
def push_binding(self, binding):
# TODO: add to standby if not active
priority = binding.get_priority()
element = HeapElement(priority, binding)
heappush(self.queue, element)
def pop_binding(self):
priority, binding = heappop(self.queue)
#return binding
return priority, binding
def peak_binding(self):
if not self.queue:
return None
priority, binding = self.queue[0]
return priority, binding
def new_skeleton(self, stream_plan, action_plan, cost):
skeleton = Skeleton(self, stream_plan, action_plan, cost)
self.skeletons.append(skeleton)
self.push_binding(skeleton.root)
#self.greedily_process()
return skeleton
def readd_standby(self):
for binding in self.standby:
self.push_binding(binding)
self.standby = []
#########################
def _process_binding(self, binding):
assert binding.calls <= binding.visits # TODO: global DEBUG mode
readd = is_new = False
if binding.is_dominated():
return readd, is_new
if binding.is_fully_bound:
action_plan = binding.skeleton.bind_action_plan(binding.mapping)
self.store.add_plan(action_plan, binding.cost)
is_new = True
return readd, is_new
binding.visits += 1
instance = binding.result.instance
if (REQUIRE_DOWNSTREAM and not binding.check_downstream()): # TODO: move check_complexity here
# TODO: causes redundant plan skeletons to be identified (along with complexity using visits instead of calls)
# Do I need to re-enable this stream in case another skeleton needs it?
# TODO: should I perform this when deciding to sample something new instead?
return STANDBY, is_new
#if not is_instance_ready(self.evaluations, instance):
# raise RuntimeError(instance)
if binding.up_to_date():
new_results, _ = process_instance(self.store, self.domain, instance, disable=self.disable)
is_new = bool(new_results)
for new_binding in binding.update_bindings():
self.push_binding(new_binding)
readd = not instance.enumerated
return readd, is_new
#########################
def process_root(self):
_, binding = self.pop_binding()
readd, is_new = self._process_binding(binding)
if readd is not False:
self.push_binding(binding)
# TODO: if readd == STANDBY
return is_new
def greedily_process(self):
num_new = 0
while self.is_active():
priority, binding = self.peak_binding()
if not binding.is_greedy(): #priority.not_greedy:
break
num_new += self.process_root()
return num_new
def process_until_new(self, print_frequency=1.):
# TODO: process the entire queue for one pass instead
num_new = 0
if not self.is_active():
return num_new
print('Sampling until new output values')
iterations = 0
last_time = time.time()
while self.is_active() and (not num_new):
iterations += 1
_, binding = self.pop_binding()
readd, is_new = self._process_binding(binding)
if readd is True:
self.push_binding(binding)
elif readd is STANDBY:
self.standby.append(binding) # TODO: test for deciding whether to standby
num_new += is_new
if print_frequency <= elapsed_time(last_time):
print('Queue: {} | Iterations: {} | Time: {:.3f}'.format(
len(self.queue), iterations, elapsed_time(last_time)))
last_time = time.time()
self.readd_standby()
return num_new + self.greedily_process()
def process_complexity(self, complexity_limit):
# TODO: could copy the queue and filter instances that exceed complexity_limit
num_new = 0
if not self.is_active():
return num_new
print('Sampling while complexity <= {}'.format(complexity_limit))
while self.is_active():
_, binding = self.pop_binding()
if binding.check_complexity(complexity_limit): # not binding.up_to_date() or
readd, is_new = self._process_binding(binding)
num_new += is_new
if readd is not STANDBY:
if readd is True:
self.push_binding(binding)
continue
self.standby.append(binding)
self.readd_standby()
return num_new + self.greedily_process()
# TODO: increment the complexity level even more if nothing below in the queue
def timed_process(self, max_time=INF, max_iterations=INF):
# TODO: combine process methods into process_until
iterations = num_new = 0
if not self.is_active():
return num_new
print('Sampling for up to {:.3f} seconds'.format(max_time)) #, max_iterations))
start_time = time.time() # TODO: instead use sample_time
while self.is_active() and (elapsed_time(start_time) < max_time) and (iterations < max_iterations):
iterations += 1
num_new += self.process_root()
#print('Iterations: {} | New: {} | Time: {:.3f}'.format(iterations, num_new, elapsed_time(start_time)))
return num_new + self.greedily_process()
#########################
def accelerate_best_bindings(self, **kwargs):
# TODO: more generally reason about streams on several skeletons
# TODO: reset the complexity values for old streams
for skeleton in self.skeletons:
if not skeleton.improved:
continue
skeleton.improved = False
for result in skeleton.best_binding.recover_bound_results():
# TODO: just accelerate the facts within the plan preimage
#print(result, result.compute_complexity(self.evaluations, **kwargs))
result.call_index = 0 # Pretends the fact was first
#print(result.compute_complexity(self.evaluations, **kwargs))
add_certified(self.evaluations, result, **kwargs) # TODO: should special have a complexity of INF?
# TODO: AssertionError: Could not find instantiation for numeric expression: dist
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0, accelerate=False):
start_time = time.time()
if is_plan(stream_plan):
self.new_skeleton(stream_plan, action_plan, cost)
self.greedily_process()
elif (stream_plan is INFEASIBLE) and not self.process_until_new():
# Move this after process_complexity
return INFEASIBLE
if not self.queue:
return FAILED
# TODO: add and process
self.timed_process(max_time=(max_time - elapsed_time(start_time)))
self.process_complexity(complexity_limit)
if accelerate:
self.accelerate_best_bindings()
return FAILED
| 21,064 |
Python
| 46.337079 | 129 | 0.629747 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/satisfaction.py
|
from __future__ import print_function
import time
from collections import Counter, namedtuple
from hsr_tamp.pddlstream.algorithms.algorithm import parse_stream_pddl, evaluations_from_init
from hsr_tamp.pddlstream.algorithms.common import SolutionStore
from hsr_tamp.pddlstream.algorithms.disable_skeleton import create_disabled_axioms, extract_disabled_clusters
from hsr_tamp.pddlstream.algorithms.downward import make_domain, make_predicate, add_predicate
from hsr_tamp.pddlstream.algorithms.recover_optimizers import retrace_instantiation, combine_optimizers
from hsr_tamp.pddlstream.algorithms.reorder import reorder_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
# from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.language.constants import is_parameter, get_length, partition_facts, Assignment, OptPlan
from hsr_tamp.pddlstream.language.conversion import revert_solution, \
evaluation_from_fact, replace_expression, get_prefix, get_args
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.statistics import write_stream_statistics, compute_plan_effort
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.algorithms.visualization import visualize_constraints
from hsr_tamp.pddlstream.utils import INF, get_mapping, elapsed_time, str_from_object, safe_zip
# TODO: ConstraintProblem?
SatisfactionProblem = namedtuple('SatisfactionProblem', ['stream_pddl', 'stream_map', 'init', 'terms'])
SatisfactionSolution = namedtuple('SatisfactionSolution', ['bindings', 'cost', 'facts'])
##################################################
def parse_value(value):
return OptimisticObject.from_opt(value, value) if is_parameter(value) else Object.from_value(value)
def obj_from_existential_expression(parent): # obj_from_value_expression
return replace_expression(parent, parse_value)
def create_domain(goal_facts):
domain = make_domain()
for fact in goal_facts: # TODO: consider removing this annoying check
name = get_prefix(fact)
parameters = ['?x{}'.format(i) for i in range(len(get_args(fact)))]
add_predicate(domain, make_predicate(name, parameters))
return domain
def plan_functions(functions, externals):
external_from_function = {}
for external in filter(lambda e: isinstance(e, Function), externals):
assert external.function not in external_from_function
external_from_function[external.function] = external
function_plan = set()
for term in functions:
if get_prefix(term) not in external_from_function:
raise ValueError('{} is not implemented'.format(get_prefix(term)))
external = external_from_function[get_prefix(term)]
instance = external.get_instance(get_args(term))
[result] = instance.next_optimistic()
function_plan.add(result)
print('Function plan:', str_from_object(function_plan))
return function_plan
def get_parameters(goal_facts):
return {o for f in goal_facts for o in get_args(f) if isinstance(o, OptimisticObject)}
def extract_streams(evaluations, externals, goal_facts):
streams = list(filter(lambda e: isinstance(e, Stream), externals))
free_parameters = get_parameters(goal_facts)
visited_facts = set()
stream_results = []
for fact in goal_facts:
# TODO: prune results that already exceed effort limit
retrace_instantiation(fact, streams, evaluations, free_parameters, visited_facts, stream_results)
print('Streams:', stream_results)
# TODO: express some of this pruning using effort (e.g. unlikely to sample bound value)
return stream_results
def get_optimistic_cost(function_plan):
return sum([0.] + [result.value for result in function_plan
if type(result.external) == Function])
def bindings_from_plan(plan_skeleton, action_plan):
if action_plan is None:
return None
bindings = {}
for (args1,), (args2,) in safe_zip(plan_skeleton, action_plan):
parameter_names = [o.value for o in args1]
bindings.update(get_mapping(parameter_names, args2))
return bindings
def are_domainated(clusters1, clusters2):
return all(any(c1 <= c2 for c2 in clusters2) for c1 in clusters1)
def dump_assignment(solution):
bindings, cost, evaluations = solution
print()
print('Solved: {}'.format(bindings is not None))
print('Cost: {:.3f}'.format(cost))
print('Total facts: {}'.format(len(evaluations)))
print('Fact counts: {}'.format(str_from_object(Counter(map(get_prefix, evaluations.all_facts))))) # preimage_facts
if bindings is None:
return
print('Assignments:')
for param in sorted(bindings):
print('{} = {}'.format(param, str_from_object(bindings[param])))
def visualize_problem(problem, **kwargs):
stream_pddl, stream_map, init, terms = problem
terms = set(map(obj_from_existential_expression, terms))
return visualize_constraints(terms, **kwargs)
##################################################
def constraint_satisfaction(problem, stream_info={},
costs=True, max_cost=INF, success_cost=INF, max_time=INF,
unit_efforts=False, max_effort=INF,
max_skeletons=INF, search_sample_ratio=1, verbose=True, **search_args):
# Approaches
# 1) Existential quantification of bindings in goal conditions
# 2) Backtrack useful streams and then schedule. Create arbitrary outputs for not mentioned.
# 3) Construct all useful streams and then associate outputs with bindings
# Useful stream must satisfy at least one fact. How should these assignments be propagated though?
# Make an action that maps each stream result to unbound values?
# TODO: include functions again for cost-sensitive satisfaction
# TODO: convert init into streams to bind certain facts
# TODO: investigate constraint satisfaction techniques for binding instead
# TODO: could also instantiate all possible free parameters even if not useful
# TODO: effort that is a function of the number of output parameters (degrees of freedom)
# TODO: use a CSP solver instead of a planner internally
# TODO: max_iterations?
stream_pddl, stream_map, init, terms = problem
if not terms:
return SatisfactionSolution({}, 0, init)
constraints, negated, functions = partition_facts(set(map(obj_from_existential_expression, terms)))
if not costs:
functions = []
evaluations = evaluations_from_init(init)
goal_facts = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, constraints))
free_parameters = sorted(get_parameters(goal_facts))
print('Parameters:', free_parameters)
externals = parse_stream_pddl(stream_pddl, stream_map, stream_info, unit_efforts=unit_efforts)
stream_results = extract_streams(evaluations, externals, goal_facts)
function_plan = plan_functions(negated + functions, externals)
plan_skeleton = [Assignment(free_parameters)]
cost = get_optimistic_cost(function_plan)
if max_cost < cost:
return SatisfactionSolution(None, INF, init)
# TODO: detect connected components
# TODO: eagerly evaluate fully bound constraints
# TODO: consider other results if this fails
domain = create_domain(goal_facts)
init_evaluations = evaluations.copy()
store = SolutionStore(evaluations, max_time=max_time, success_cost=success_cost, verbose=verbose)
queue = SkeletonQueue(store, domain, disable=False)
num_iterations = search_time = sample_time = 0
planner = 'ff-astar' # TODO: toggle within reschedule_stream_plan
#last_clusters = set()
#last_success = True
while not store.is_terminated():
num_iterations += 1
start_time = time.time()
print('\nIteration: {} | Skeletons: {} | Skeleton Queue: {} | Evaluations: {} | '
'Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
num_iterations, len(queue.skeletons), len(queue),
len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time()))
external_plan = None
if len(queue.skeletons) < max_skeletons:
domain.axioms[:] = create_disabled_axioms(queue, use_parameters=False)
#dominated = are_domainated(last_clusters, clusters)
#last_clusters = clusters
#if last_success or not dominated: # Could also keep a history of results
stream_plan = reschedule_stream_plan(init_evaluations, goal_facts, domain, stream_results,
unique_binding=True, unsatisfiable=True,
max_effort=max_effort, planner=planner, **search_args)
if stream_plan is not None:
external_plan = reorder_stream_plan(store, combine_optimizers(
init_evaluations, stream_plan + list(function_plan)))
print('Stream plan ({}, {:.3f}): {}'.format(
get_length(external_plan), compute_plan_effort(external_plan), external_plan))
last_success = (external_plan is not None)
search_time += elapsed_time(start_time)
# Once a constraint added for a skeleton, it should only be relaxed
start_time = time.time()
if last_success: # Only works if create_disable_axioms never changes
allocated_sample_time = (search_sample_ratio * search_time) - sample_time
else:
allocated_sample_time = INF
queue.process(external_plan, OptPlan(plan_skeleton, []), cost=cost, # TODO: fill in preimage facts
complexity_limit=INF, max_time=allocated_sample_time)
sample_time += elapsed_time(start_time)
if not last_success and not queue:
break
# TODO: exhaustively compute all plan skeletons and add to queue within the focused algorithm
write_stream_statistics(externals, verbose)
action_plan, cost, facts = revert_solution(store.best_plan, store.best_cost, evaluations)
bindings = bindings_from_plan(plan_skeleton, action_plan)
return SatisfactionSolution(bindings, cost, facts)
| 10,454 |
Python
| 50.757425 | 118 | 0.692462 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/algorithm.py
|
from collections import Counter
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init, SOLUTIONS
from hsr_tamp.pddlstream.algorithms.constraints import add_plan_constraints
from hsr_tamp.pddlstream.algorithms.downward import parse_lisp, parse_goal, has_costs, set_unit_costs, normalize_domain_goal
from hsr_tamp.pddlstream.language.temporal import parse_domain, SimplifiedDomain
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import obj_from_value_expression
from hsr_tamp.pddlstream.language.exogenous import compile_to_exogenous
from hsr_tamp.pddlstream.language.external import External
from hsr_tamp.pddlstream.language.function import parse_function, parse_predicate
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.optimizer import parse_optimizer
from hsr_tamp.pddlstream.language.rule import parse_rule, apply_rules_to_streams, RULES
from hsr_tamp.pddlstream.language.stream import parse_stream, Stream, StreamInstance
from hsr_tamp.pddlstream.utils import INF
# TODO: rename file to parsing
def parse_constants(domain, constant_map):
obj_from_constant = {}
for constant in domain.constants:
if constant.name.startswith(Object._prefix): # TODO: check other prefixes
raise NotImplementedError('Constants are not currently allowed to begin with {}'.format(Object._prefix))
if constant.name not in constant_map:
raise ValueError('Undefined constant {}'.format(constant.name))
value = constant_map.get(constant.name, constant.name)
obj_from_constant[constant.name] = Object(value, name=constant.name) # TODO: remap names
# TODO: add object predicate
for name in constant_map:
for constant in domain.constants:
if constant.name == name:
break
else:
raise ValueError('Constant map value {} not mentioned in domain :constants'.format(name))
del domain.constants[:] # So not set twice
return obj_from_constant
def check_problem(domain, streams, obj_from_constant):
for action in (domain.actions + domain.axioms):
for p, c in Counter(action.parameters).items():
if c != 1:
raise ValueError('Parameter [{}] for action [{}] is not unique'.format(p.name, action.name))
# TODO: check that no undeclared parameters & constants
#action.dump()
undeclared_predicates = set()
for stream in streams:
# TODO: domain.functions
facts = list(stream.domain)
if isinstance(stream, Stream):
facts.extend(stream.certified)
for fact in facts:
name = get_prefix(fact)
if name not in domain.predicate_dict:
undeclared_predicates.add(name)
elif len(get_args(fact)) != domain.predicate_dict[name].get_arity(): # predicate used with wrong arity: {}
print('Warning! predicate used with wrong arity in stream [{}]: {}'.format(stream.name, fact))
# for constant in stream.constants:
# if constant not in obj_from_constant:
# raise ValueError('Undefined constant in stream [{}]: {}'.format(stream.name, constant))
if undeclared_predicates:
print('Warning! Undeclared predicates: {}'.format(
sorted(undeclared_predicates))) # Undeclared predicate: {}
def reset_globals():
# TODO: maintain these dictionaries in an object
Object.reset()
OptimisticObject.reset()
RULES[:] = []
SOLUTIONS[:] = []
def parse_problem(problem, stream_info={}, constraints=None, unit_costs=False, unit_efforts=False):
# TODO: just return the problem if already written programmatically
#reset_globals() # Prevents use of satisfaction.py
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
domain = parse_domain(domain_pddl) # TODO: normalize here
#domain = domain_pddl
if len(domain.types) != 1:
raise NotImplementedError('Types are not currently supported')
if unit_costs:
set_unit_costs(domain)
if not has_costs(domain):
# TODO: set effort_weight to 1 if no costs
print('Warning! All actions have no cost. Recommend setting unit_costs=True')
obj_from_constant = parse_constants(domain, constant_map) # Keep before parse_stream_pddl
streams = parse_stream_pddl(stream_pddl, stream_map, stream_info=stream_info,
unit_costs=unit_costs, unit_efforts=unit_efforts)
check_problem(domain, streams, obj_from_constant)
evaluations = evaluations_from_init(init)
goal_exp = obj_from_value_expression(goal)
if isinstance(domain, SimplifiedDomain):
#assert isinstance(domain, str) # raw PDDL is returned
_ = {name: Object(value, name=name) for name, value in constant_map.items()}
return evaluations, goal_exp, domain, streams
goal_exp = add_plan_constraints(constraints, domain, evaluations, goal_exp)
parse_goal(goal_exp, domain) # Just to check that it parses
normalize_domain_goal(domain, goal_exp) # TODO: does not normalize goal_exp
compile_to_exogenous(evaluations, domain, streams)
return evaluations, goal_exp, domain, streams
##################################################
def parse_streams(streams, rules, stream_pddl, procedure_map, procedure_info, use_functions=True):
stream_iter = iter(parse_lisp(stream_pddl))
assert('define' == next(stream_iter))
pddl_type, pddl_name = next(stream_iter)
assert('stream' == pddl_type)
for lisp_list in stream_iter:
name = lisp_list[0] # TODO: refactor at this point
if name == ':stream':
externals = [parse_stream(lisp_list, procedure_map, procedure_info)]
elif name == ':rule':
externals = [parse_rule(lisp_list, procedure_map, procedure_info)]
elif name == ':function':
if not use_functions:
continue
externals = [parse_function(lisp_list, procedure_map, procedure_info)]
elif name == ':predicate': # Cannot just use args if want a bound
externals = [parse_predicate(lisp_list, procedure_map, procedure_info)]
elif name == ':optimizer':
externals = parse_optimizer(lisp_list, procedure_map, procedure_info)
else:
raise ValueError(name)
for external in externals:
if any(e.name == external.name for e in streams):
raise ValueError('Stream [{}] is not unique'.format(external.name))
if name == ':rule':
rules.append(external)
external.pddl_name = pddl_name # TODO: move within constructors
streams.append(external)
def set_unit_efforts(externals):
for external in externals:
if external.get_effort() < INF:
external.info.effort = 1
NO_INFO = None
RELATIONAL_INFO = 'relational_info' # structural_info
STATISTICS_INFO = 'statistics_info'
def parse_stream_pddl(stream_pddl, stream_map, stream_info={}, unit_costs=False, unit_efforts=False):
if stream_info is None: # NO_INFO
stream_info = {}
externals = []
if stream_pddl is None:
return externals # No streams
if isinstance(stream_pddl, str):
stream_pddl = [stream_pddl]
if all(isinstance(e, External) for e in stream_pddl):
return stream_pddl
if isinstance(stream_map, dict): # DEBUG_MODES
stream_map = {k.lower(): v for k, v in stream_map.items()}
stream_info = {k.lower(): v for k, v in stream_info.items()}
rules = []
for pddl in stream_pddl:
# TODO: check which functions are actually used and prune the rest
parse_streams(externals, rules, pddl, stream_map, stream_info, use_functions=not unit_costs)
apply_rules_to_streams(rules, externals)
if unit_efforts:
set_unit_efforts(externals)
return externals
##################################################
def remove_blocked(evaluations, domain, instance, new_results):
# TODO: finish refactoring this
if new_results and isinstance(instance, StreamInstance):
instance.enable(evaluations, domain)
| 8,253 |
Python
| 45.370786 | 124 | 0.668363 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/disabled.py
|
import time
from hsr_tamp.pddlstream.algorithms.common import add_facts, add_certified, is_instance_ready, UNKNOWN_EVALUATION
from hsr_tamp.pddlstream.algorithms.algorithm import remove_blocked
from hsr_tamp.pddlstream.language.constants import OptPlan
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.language.conversion import is_plan, transform_action_args, replace_expression
from hsr_tamp.pddlstream.utils import INF, safe_zip, apply_mapping, flatten, elapsed_time
# TODO: disabled isn't quite like complexity. Stream instances below the complexity threshold might be called again
# Well actually, if this was true wouldn't it have already been sampled on a lower level?
def update_bindings(bindings, opt_result, result):
if not isinstance(result, StreamResult):
return bindings
new_bindings = bindings.copy()
for opt, obj in safe_zip(opt_result.output_objects, result.output_objects):
assert new_bindings.get(opt, obj) == obj # TODO: return failure if conflicting bindings
new_bindings[opt] = obj
return new_bindings
def update_cost(cost, opt_result, result):
# TODO: recompute optimistic costs to attempt to produce a tighter bound
if type(result) is not FunctionResult:
return cost
return cost + (result.value - opt_result.value)
def bind_action_plan(opt_plan, mapping):
fn = lambda o: mapping.get(o, o)
new_action_plan = [transform_action_args(action, fn)
for action in opt_plan.action_plan]
new_preimage_facts = frozenset(replace_expression(fact, fn)
for fact in opt_plan.preimage_facts)
return OptPlan(new_action_plan, new_preimage_facts)
def get_free_objects(stream_plan):
return set(flatten(result.output_objects for result in stream_plan
if isinstance(result, StreamResult)))
##################################################
def push_disabled(instantiator, disabled):
for instance in list(disabled):
if instance.enumerated:
disabled.remove(instance)
else:
# TODO: only add if not already queued
instantiator.push_instance(instance)
def reenable_disabled(evaluations, domain, disabled):
for instance in disabled:
instance.enable(evaluations, domain)
disabled.clear()
def process_instance(store, domain, instance, disable=False):
if instance.enumerated:
return [], []
start_time = time.time()
new_results, new_facts = instance.next_results(verbose=store.verbose)
store.sample_time += elapsed_time(start_time)
evaluations = store.evaluations
if disable:
instance.disable(evaluations, domain)
for result in new_results:
#add_certified(evaluations, result) # TODO: only add if the fact is actually new?
complexity = INF if (not disable or result.external.is_special) else \
result.compute_complexity(evaluations)
add_facts(evaluations, result.get_certified(), result=result, complexity=complexity)
if disable:
remove_blocked(evaluations, domain, instance, new_results)
add_facts(evaluations, new_facts, result=UNKNOWN_EVALUATION, complexity=0) # TODO: record the instance
return new_results, new_facts
##################################################
def process_stream_plan(store, domain, disabled, stream_plan, action_plan, cost,
bind=True, max_failures=0):
# Bad old implementation of this method
# The only advantage of this vs skeleton is that this can avoid the combinatorial growth in bindings
if not is_plan(stream_plan):
return
if not stream_plan:
store.add_plan(action_plan, cost)
return
stream_plan = [result for result in stream_plan if result.optimistic]
free_objects = get_free_objects(stream_plan)
bindings = {}
bound_plan = []
num_wild = 0
for idx, opt_result in enumerate(stream_plan):
if (store.best_cost <= cost) or (max_failures < (idx - len(bound_plan))):
# TODO: this terminates early when bind=False
break
opt_inputs = [inp for inp in opt_result.instance.input_objects if inp in free_objects]
if (not bind and opt_inputs) or not all(inp in bindings for inp in opt_inputs):
continue
bound_result = opt_result.remap_inputs(bindings)
bound_instance = bound_result.instance
if bound_instance.enumerated or not is_instance_ready(store.evaluations, bound_instance):
continue
# TODO: could remove disabled and just use complexity_limit
new_results, new_facts = process_instance(store, domain, bound_instance) # TODO: bound_result
num_wild += len(new_facts)
if not bound_instance.enumerated:
disabled.add(bound_instance)
for new_result in new_results:
if new_result.is_successful():
bound_plan.append(new_results[0])
bindings = update_bindings(bindings, bound_result, bound_plan[-1])
cost = update_cost(cost, opt_result, bound_plan[-1])
break
if (num_wild == 0) and (len(stream_plan) == len(bound_plan)):
store.add_plan(bind_action_plan(action_plan, bindings), cost)
# TODO: report back whether to try w/o optimistic values in the event that wild
##################################################
# def process_stream_plan_branch(store, domain, disabled, stream_plan, action_plan, cost):
# if not is_plan(stream_plan):
# return
# stream_plan = [result for result in stream_plan if result.optimistic]
# if not stream_plan:
# store.add_plan(action_plan, cost)
# return
# free_objects = get_free_objects(stream_plan)
# bindings = defaultdict(set)
# for opt_result in stream_plan:
# opt_inputs = [inp for inp in opt_result.instance.input_objects if inp in free_objects]
# inp_bindings = [bindings[inp] for inp in opt_inputs]
# for combo in product(*inp_bindings):
# bound_result = opt_result.remap_inputs(get_mapping(opt_inputs, combo))
# bound_instance = bound_result.instance
# if bound_instance.enumerated or not is_instance_ready(store.evaluations, bound_instance):
# continue # Disabled
# new_results = process_instance(store, domain, bound_instance)
# if not bound_instance.enumerated:
# disabled.add(bound_instance)
# if isinstance(opt_result, StreamResult):
# for new_result in new_results:
# for out, obj in safe_zip(opt_result.output_objects, new_result.output_objects):
# bindings[out].add(obj)
# #Binding = namedtuple('Binding', ['index', 'mapping'])
# # TODO: after querying, search over all bindings of the produced sampled
| 6,986 |
Python
| 47.186207 | 115 | 0.656885 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/meta.py
|
import argparse
import time
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, fact_from_fd, fd_from_fact, \
fd_from_evaluations, INTERNAL_AXIOM
from hsr_tamp.pddlstream.algorithms.incremental import solve_incremental
from hsr_tamp.pddlstream.algorithms.focused import solve_focused_original, solve_binding, solve_adaptive, get_negative_externals
from hsr_tamp.pddlstream.algorithms.instantiate_task import instantiate_task, convert_instantiated
from hsr_tamp.pddlstream.algorithms.refinement import optimistic_process_streams
from hsr_tamp.pddlstream.algorithms.scheduling.reinstantiate import reinstantiate_axiom
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.language.constants import is_plan, Certificate, PDDLProblem, get_prefix, Solution
from hsr_tamp.pddlstream.language.conversion import value_from_obj_expression, EQ
from hsr_tamp.pddlstream.language.external import DEBUG, SHARED_DEBUG
from hsr_tamp.pddlstream.language.stream import PartialInputs
from hsr_tamp.pddlstream.language.temporal import SimplifiedDomain
from hsr_tamp.pddlstream.utils import elapsed_time, INF, Verbose, irange, SEPARATOR
FOCUSED_ALGORITHMS = ['focused', 'binding', 'adaptive']
ALGORITHMS = ['incremental'] + FOCUSED_ALGORITHMS
DEFAULT_ALGORITHM = 'adaptive'
##################################################
def create_parser(default_algorithm=DEFAULT_ALGORITHM):
# https://docs.python.org/3/library/argparse.html#the-add-argument-method
parser = argparse.ArgumentParser() # Automatically includes help
parser.add_argument('-a', '--algorithm', type=str, default=default_algorithm, choices=ALGORITHMS, required=False,
help='Specifies the PDDLStream algorithm to use')
parser.add_argument('-u', '--unit', action='store_true', help='Uses unit costs') # --unit_costs
# args = parser.parse_args()
# print('Arguments:', args)
# TODO: search planner, debug
# TODO: method that calls solve with args
return parser
##################################################
def solve(problem, algorithm=DEFAULT_ALGORITHM, constraints=PlanConstraints(),
stream_info={}, replan_actions=set(),
unit_costs=False, success_cost=INF,
max_time=INF, max_iterations=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
max_skeletons=INF, search_sample_ratio=1, max_failures=0,
unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True,
#temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[],
#planner=DEFAULT_PLANNER, max_planner_time=DEFAULT_MAX_TIME, max_cost=INF, debug=False
visualize=False, verbose=True, **search_kwargs):
"""
Solves a PDDLStream problem generically using one of the available algorithms
:param problem: a PDDLStream problem
:param algorithm: a PDDLStream algorithm name
:param constraints: PlanConstraints on the set of legal solutions
:param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
:param replan_actions: the actions declared to induce replanning for the purpose of deferred stream evaluation
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param max_skeletons: the maximum number of plan skeletons (max_skeletons=None indicates not adaptive)
:param search_sample_ratio: the desired ratio of sample time / search time when max_skeletons!=None
:param max_failures: the maximum number of stream failures before switching phases when max_skeletons=None
:param unit_efforts: use unit stream efforts rather than estimated numeric efforts
:param max_effort: the maximum amount of stream effort
:param effort_weight: a multiplier for stream effort compared to action costs
:param reorder: if True, reorder stream plans to minimize the expected sampling overhead
:param visualize: if True, draw the constraint network and stream plan as a graphviz file
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# TODO: print the arguments using locals()
# TODO: could instead make common arguments kwargs but then they could have different default values
# TODO: portfolios of PDDLStream algorithms
if algorithm == 'incremental':
return solve_incremental(
problem=problem, constraints=constraints,
unit_costs=unit_costs, success_cost=success_cost,
max_iterations=max_iterations, max_time=max_time, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
verbose=verbose, **search_kwargs)
# if algorithm == 'abstract_focused': # meta_focused | meta_focused
# return solve_focused(
# problem, constraints=constraints,
# stream_info=stream_info, replan_actions=replan_actions,
# unit_costs=unit_costs, success_cost=success_cost,
# max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
# initial_complexity=initial_complexity, complexity_step=complexity_step, #max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
# bind=bind, max_failures=max_failures,
# unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
# visualize=visualize, verbose=verbose, **search_kwargs)
fail_fast = (max_failures < INF)
if algorithm == 'focused':
return solve_focused_original(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
fail_fast=fail_fast, # bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
if algorithm == 'binding':
return solve_binding(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
fail_fast=fail_fast, # bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
if algorithm == 'adaptive':
return solve_adaptive(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
# bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
raise NotImplementedError(algorithm)
##################################################
def solve_restart(problem, max_time=INF, max_restarts=0, iteration_time=INF, abort=True, **kwargs):
# TODO: iteratively lower the cost bound
# TODO: a sequence of different planner configurations
# TODO: reset objects and/or streams
if (max_restarts >= 1) and (iteration_time == INF):
iteration_time = min(2 * 60, iteration_time)
assert (max_restarts == 0) or (iteration_time != INF)
assert max_restarts >= 0
start_time = time.time()
for attempt in irange(1+max_restarts):
iteration_start_time = time.time()
if elapsed_time(start_time) > max_time:
break
if attempt >= 1:
print(SEPARATOR)
# solution = planner_fn(problem) # Or include the problem in the lambda
remaining_time = min(iteration_time, max_time-elapsed_time(start_time))
solution = solve(problem, max_time=remaining_time, **kwargs)
plan, cost, certificate = solution
if is_plan(plan): # TODO: INFEASIBLE
return solution
if abort and (elapsed_time(iteration_start_time) < remaining_time):
break # TODO: return the cause of failure
certificate = Certificate(all_facts=[], preimage_facts=[]) # TODO: aggregate
return Solution(None, INF, certificate)
##################################################
def set_unique(externals):
for external in externals:
external.info.opt_gen_fn = PartialInputs(unique=True)
external.num_opt_fns = 0
def examine_instantiated(problem, unique=False, normalize=True, unit_costs=False, verbose=False, debug=False, **kwargs):
# TODO: refactor to an analysis file
domain_pddl, constant_map, stream_pddl, _, init, goal = problem
stream_map = DEBUG if unique else SHARED_DEBUG # DEBUG_MODES
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal)
evaluations, goal_exp, domain, externals = parse_problem(problem, **kwargs)
assert not isinstance(domain, SimplifiedDomain)
negative = get_negative_externals(externals)
externals = list(filter(lambda s: s not in negative, externals))
# store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose)
# instantiator = Instantiator(externals, evaluations)
# process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose)
# results = [] # TODO: extract from process_stream_queue
# set_unique(externals)
# domain.actions[:] = [] # TODO: only instantiate axioms
# TODO: drop all fluents and instantiate
# TODO: relaxed planning version of this
results, exhausted = optimistic_process_streams(evaluations, externals, complexity_limit=INF, max_effort=None)
evaluations = evaluations_from_stream_plan(evaluations, results, max_effort=None)
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
with Verbose(debug):
instantiated = instantiate_task(task, check_infeasible=False)
if instantiated is None:
return results, None
# TODO: reinstantiate actions?
instantiated.axioms[:] = [reinstantiate_axiom(axiom) for axiom in instantiated.axioms]
if normalize:
instantiated = convert_instantiated(instantiated)
return results, instantiated
# sas_task = sas_from_pddl(task, debug=debug)
##################################################
def iterate_subgoals(goals, axiom_from_effect):
necessary = set()
possible = set()
for goal in goals:
if goal in axiom_from_effect:
necessary.update(set.intersection(*[set(axiom.condition) for axiom in axiom_from_effect[goal]]))
# print(len(axiom_from_effect[goal]) == 1) # Universal
for axiom in axiom_from_effect[goal]:
possible.update(axiom.condition) # Add goal as well?
else:
necessary.add(goal)
print('Necessary:', necessary)
print('Possible:', possible - necessary)
return possible
def recurse_subgoals(goals, condition_from_effect):
possible = set()
def recurse(goal):
if goal in possible:
return
possible.add(goal)
for condition in condition_from_effect[goal]:
recurse(condition)
for goal in goals:
recurse(goal)
return possible
def analyze_goal(problem, use_actions=False, use_axioms=True, use_streams=True, blocked_predicates=[], **kwargs):
# TODO: instantiate all goal partial states
# TODO: remove actions/axioms that never could achieve a subgoal
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
evaluations = evaluations_from_init(init)
init = set(fd_from_evaluations(evaluations))
# from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import recover_axioms_plans
results, instantiated = examine_instantiated(problem, **kwargs) # TODO: only do if the goals are derived
if instantiated is None:
return None
# optimistic_init = set(instantiated.task.init)
# This is like backchaining in a relaxed space
condition_from_effect = defaultdict(set)
if use_actions:
# TODO: selectively ignore some conditions (e.g. HandEmpty)
# TODO: refactor into separate method
for action in instantiated.actions:
for conditional, effect in action.add_effects:
for condition in (action.precondition + conditional):
if condition.predicate not in blocked_predicates:
condition_from_effect[effect].add(condition)
for conditional, effect in action.del_effects:
for condition in (action.precondition + conditional):
if condition.predicate not in blocked_predicates:
condition_from_effect[effect.negate()].add(condition)
if use_axioms:
# TODO: axiom_rules.handle_axioms(...)
# print('Axioms:', instantiated.axioms)
for axiom in instantiated.axioms:
# axiom = reinstantiate_axiom(axiom)
# axiom.dump()
for condition in axiom.condition:
condition_from_effect[axiom.effect].add(condition)
if use_streams:
for result in results:
for effect in result.certified:
if get_prefix(effect) == EQ:
continue
for condition in result.domain:
condition_from_effect[fd_from_fact(effect)].add(fd_from_fact(condition))
print('Goals:', list(map(fact_from_fd, instantiated.goal_list)))
# all_subgoals = iterate_subgoals(instantiated.goal_list, axiom_from_effect)
all_subgoals = recurse_subgoals(instantiated.goal_list, condition_from_effect)
filtered_subgoals = [subgoal for subgoal in all_subgoals if subgoal in init] # TODO: return the goals as well?
external_subgoals = [value_from_obj_expression(fact_from_fd(subgoal))
for subgoal in sorted(filtered_subgoals, key=lambda g: g.predicate)
if not subgoal.predicate.startswith(INTERNAL_AXIOM)]
print('Initial:', external_subgoals)
return external_subgoals # TODO: decompose into simplified components
| 16,305 |
Python
| 51.6 | 128 | 0.689114 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/visualization.py
|
from __future__ import print_function
import os
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.constants import EQ, get_prefix, get_args, str_from_plan, is_parameter, \
partition_facts
from hsr_tamp.pddlstream.language.conversion import str_from_fact, evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.object import OptimisticObject
from hsr_tamp.pddlstream.utils import clear_dir, ensure_dir, str_from_object, user_input, flatten
# https://www.graphviz.org/doc/info/
DEFAULT_EXTENSION = '.png' # png | pdf
PARAMETER_COLOR = 'LightGreen'
CONSTRAINT_COLOR = 'LightBlue'
NEGATED_COLOR = 'LightYellow'
COST_COLOR = 'LightSalmon'
STREAM_COLOR = 'LightSteelBlue'
FUNCTION_COLOR = 'LightCoral'
VISUALIZATIONS_DIR = 'visualizations/'
CONSTRAINT_NETWORK_DIR = os.path.join(VISUALIZATIONS_DIR, 'constraint_networks/')
STREAM_PLAN_DIR = os.path.join(VISUALIZATIONS_DIR, 'stream_plans/')
PLAN_LOG_FILE = os.path.join(VISUALIZATIONS_DIR, 'log.txt')
ITERATION_TEMPLATE = 'iteration_{}' + DEFAULT_EXTENSION
SYNTHESIZER_TEMPLATE = '{}_{}' + DEFAULT_EXTENSION
##################################################
def has_pygraphviz():
# TODO: networkx
# https://github.com/caelan/pddlstream/blob/82ee5e363585d0af8ff9532ecc14641687d5b56b/examples/fault_tolerant/data_network/run.py#L189
#import networkx
#import graphviz
#import pydot
try:
import pygraphviz
except ImportError:
return False
return True
def reset_visualizations():
clear_dir(VISUALIZATIONS_DIR)
ensure_dir(CONSTRAINT_NETWORK_DIR)
ensure_dir(STREAM_PLAN_DIR)
def log_plans(stream_plan, action_plan, iteration):
# TODO: do this within the focused algorithm itself?
from hsr_tamp.pddlstream.retired.synthesizer import decompose_stream_plan
decomposed_plan = decompose_stream_plan(stream_plan)
with open(PLAN_LOG_FILE, 'a+') as f:
f.write('Iteration: {}\n'
'Component plan: {}\n'
'Stream plan: {}\n'
'Action plan: {}\n\n'.format(
iteration, decomposed_plan,
stream_plan, str_from_plan(action_plan)))
def create_synthesizer_visualizations(result, iteration):
from hsr_tamp.pddlstream.retired.synthesizer import decompose_result
stream_plan = decompose_result(result)
if len(stream_plan) <= 1:
return
# TODO: may overwrite another optimizer if both used on the same iteration
filename = SYNTHESIZER_TEMPLATE.format(result.external.name, iteration)
visualize_constraints(result.get_objectives(), os.path.join(CONSTRAINT_NETWORK_DIR, filename))
visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, filename))
def create_visualizations(evaluations, stream_plan, iteration):
# TODO: place it in the temp_dir?
# TODO: decompose any joint streams
for result in stream_plan:
create_synthesizer_visualizations(result, iteration)
filename = ITERATION_TEMPLATE.format(iteration)
# visualize_stream_plan(stream_plan, path)
constraints = set() # TODO: approximates needed facts using produced ones
for stream in stream_plan:
constraints.update(filter(lambda f: evaluation_from_fact(f) not in evaluations, stream.get_certified()))
print('Constraints:', str_from_object(constraints))
visualize_constraints(constraints, os.path.join(CONSTRAINT_NETWORK_DIR, filename))
from hsr_tamp.pddlstream.retired.synthesizer import decompose_stream_plan
decomposed_plan = decompose_stream_plan(stream_plan)
if len(decomposed_plan) != len(stream_plan):
visualize_stream_plan(decompose_stream_plan(stream_plan), os.path.join(STREAM_PLAN_DIR, filename))
#visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
visualize_stream_plan(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
##################################################
def visualize_constraints(constraints, filename='constraint_network'+DEFAULT_EXTENSION, use_functions=True):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=False)
graph.node_attr['style'] = 'filled'
#graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['colorscheme'] = 'SVG'
graph.edge_attr['colorscheme'] = 'SVG'
#graph.graph_attr['rotate'] = 90
#graph.node_attr['fixedsize'] = True
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['rankdir'] = 'RL'
graph.graph_attr['nodesep'] = 0.05
graph.graph_attr['ranksep'] = 0.25
#graph.graph_attr['pad'] = 0
# splines="false";
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
positive, negated, functions = partition_facts(constraints)
for head in (positive + negated + functions):
# TODO: prune values w/o free parameters?
name = str_from_fact(head)
if head in functions:
if not use_functions:
continue
color = COST_COLOR
elif head in negated:
color = NEGATED_COLOR
else:
color = CONSTRAINT_COLOR
graph.add_node(name, shape='box', color=color)
for arg in get_args(head):
if isinstance(arg, OptimisticObject) or is_parameter(arg):
arg_name = str(arg)
graph.add_node(arg_name, shape='circle', color=PARAMETER_COLOR)
graph.add_edge(name, arg_name)
graph.draw(filename, prog='dot') # neato | dot | twopi | circo | fdp | nop
print('Saved', filename)
return graph
##################################################
def display_image(filename):
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread(filename)
plt.imshow(img)
plt.title(filename)
plt.axis('off')
plt.tight_layout()
#plt.show()
plt.draw()
#plt.waitforbuttonpress(0) # this will wait for indefinite time
plt.pause(interval=1e-3)
user_input()
plt.close(plt.figure())
def visualize_stream_orders(orders, streams=[], filename='stream_orders'+DEFAULT_EXTENSION):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['color'] = STREAM_COLOR
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
streams = set(streams) | set(flatten(orders))
for stream in streams:
graph.add_node(str(stream))
for stream1, stream2 in orders:
graph.add_edge(str(stream1), str(stream2))
# TODO: could also print the raw values (or a lookup table)
# https://stackoverflow.com/questions/3499056/making-a-legend-key-in-graphviz
graph.draw(filename, prog='dot')
print('Saved', filename)
#display_image(filename)
return graph
def visualize_stream_plan(stream_plan, filename='stream_plan'+DEFAULT_EXTENSION):
return visualize_stream_orders(get_partial_orders(stream_plan), streams=stream_plan, filename=filename)
##################################################
def visualize_stream_plan_bipartite(stream_plan, filename='stream_plan'+DEFAULT_EXTENSION, use_functions=False):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
#graph.graph_attr['rankdir'] = 'LR'
graph.graph_attr['nodesep'] = 0.1
graph.graph_attr['ranksep'] = 0.25
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
# TODO: store these settings as a dictionary
def add_fact(fact):
head, color = (fact[1], COST_COLOR) if get_prefix(fact) == EQ else (fact, CONSTRAINT_COLOR)
s_fact = str_from_fact(head)
graph.add_node(s_fact, color=color)
return s_fact
def add_stream(stream):
color = FUNCTION_COLOR if isinstance(stream, FunctionResult) else STREAM_COLOR
s_stream = str(stream.instance) if isinstance(stream, FunctionResult) else str(stream)
graph.add_node(s_stream, style='rounded,filled', color=color)
# shape: oval, plaintext, polygon, rarrow, cds
# style: rounded, filled, bold
return s_stream
achieved_facts = set()
for stream in stream_plan:
if not use_functions and isinstance(stream, FunctionResult):
continue
s_stream = add_stream(stream)
for fact in stream.instance.get_domain():
if fact in achieved_facts:
s_fact = add_fact(fact)
graph.add_edge(s_fact, s_stream) # Add initial facts?
#if not isinstance(stream, StreamResult):
# continue
for fact in stream.get_certified():
if fact not in achieved_facts: # Ensures DAG
s_fact = add_fact(fact)
graph.add_edge(s_stream, s_fact)
achieved_facts.add(fact)
graph.draw(filename, prog='dot')
print('Saved', filename)
return graph
# graph.layout
# https://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/reference/agraph.html
# https://pygraphviz.github.io/documentation/stable/reference/agraph.html#pygraphviz.AGraph.draw
| 9,884 |
Python
| 39.346939 | 137 | 0.662687 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/constraints.py
|
from __future__ import print_function
from collections import namedtuple
from copy import deepcopy
from hsr_tamp.pddlstream.algorithms.common import add_fact, INTERNAL_EVALUATION
from hsr_tamp.pddlstream.algorithms.downward import make_predicate, make_preconditions, make_effects, add_predicate, \
fd_from_fact
from hsr_tamp.pddlstream.language.constants import Or, And, is_parameter, Not, str_from_plan, EQ
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import find_unique, safe_zip, str_from_object, INF, is_hashable, neighbors_from_orders, \
get_ancestors, get_descendants
OrderedSkeleton = namedtuple('OrderedSkeleton', ['actions', 'orders']) # TODO: AND/OR tree
INTERNAL_PREFIX = '_' # TODO: possibly apply elsewhere
WILD = '*'
ASSIGNED_PREDICATE = '{}assigned'
BOUND_PREDICATE = '{}bound' # TODO: switch with assigned
GROUP_PREDICATE = '{}group'
ORDER_PREDICATE = '{}order'
GOAL_INDEX = -1
def linear_order(actions):
if not actions:
return set()
return {(i, i+1) for i in range(len(actions)-1)} \
| {(len(actions)-1, GOAL_INDEX)}
class PlanConstraints(object):
def __init__(self, skeletons=None, groups={}, exact=True, hint=False, max_cost=INF):
# TODO: constraint that the skeleton is the tail of the plan
if skeletons is not None:
skeletons = [skeleton if isinstance(skeleton, OrderedSkeleton)
else OrderedSkeleton(skeleton, linear_order(skeleton)) for skeleton in skeletons]
self.skeletons = skeletons
self.groups = groups # Could make this a list of lists
self.exact = exact
self.max_cost = max_cost
#self.max_length = max_length
#self.hint = hint # TODO: search over skeletons first and then fall back
#if self.hint:
# raise NotImplementedError()
def dump(self):
print('{}(exact={}, max_cost={})'.format(self.__class__.__name__, self.exact, self.max_cost))
if self.skeletons is None:
return
for i, skeleton in enumerate(self.skeletons):
print(i, str_from_plan(skeleton))
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
# TODO: rename other costs to be terminate_cost (or decision cost)
def to_constant(parameter):
name = parameter[1:]
return to_obj('@{}'.format(name))
def to_obj(value):
# Allows both raw values as well as objects to be specified
if any(isinstance(value, Class) for Class in [Object, OptimisticObject]):
return value
return Object.from_value(value)
def get_internal_prefix(internal):
return INTERNAL_PREFIX if internal else ''
def is_constant(arg):
return not is_parameter(arg) and (arg != WILD)
##################################################
def add_plan_constraints(constraints, domain, evaluations, goal_exp, internal=False):
if (constraints is None) or (constraints.skeletons is None):
return goal_exp
import pddl
# TODO: unify this with the constraint ordering
# TODO: can constrain to use a plan prefix
prefix = get_internal_prefix(internal)
assigned_predicate = ASSIGNED_PREDICATE.format(prefix)
bound_predicate = BOUND_PREDICATE.format(prefix)
group_predicate = GROUP_PREDICATE.format(prefix)
order_predicate = ORDER_PREDICATE.format(prefix)
new_facts = []
for group in constraints.groups:
for value in constraints.groups[group]:
# TODO: could make all constants groups (like an equality group)
fact = (group_predicate, to_obj(group), to_obj(value))
new_facts.append(fact)
new_actions = []
new_goals = []
for num, skeleton in enumerate(constraints.skeletons):
actions, orders = skeleton
incoming_orders, _ = neighbors_from_orders(orders)
order_facts = [(order_predicate, to_obj('n{}'.format(num)), to_obj('t{}'.format(step)))
for step in range(len(actions))]
for step, (name, args) in enumerate(actions):
# TODO: could also just remove the free parameter from the action
new_action = deepcopy(find_unique(lambda a: a.name == name, domain.actions))
local_from_global = {a: p.name for a, p in safe_zip(args, new_action.parameters) if is_parameter(a)}
ancestors, descendants = get_ancestors(step, orders), get_descendants(step, orders)
parallel = set(range(len(actions))) - ancestors - descendants - {step}
parameters = set(filter(is_parameter, args))
ancestor_parameters = parameters & set(filter(is_parameter, (p for idx in ancestors for p in actions[idx][1])))
#descendant_parameters = parameters & set(filter(is_parameter, (p for idx in descendants for p in actions[idx][1])))
parallel_parameters = parameters & set(filter(is_parameter, (p for idx in parallel for p in actions[idx][1])))
#bound_preconditions = [Imply(bound, assigned) for bound, assigned in safe_zip(bound_facts, assigned_facts)]
bound_condition = pddl.Conjunction([pddl.Disjunction(map(fd_from_fact, [
Not((bound_predicate, to_constant(p))), (assigned_predicate, to_constant(p), local_from_global[p])
])) for p in parallel_parameters])
existing_preconditions = [(assigned_predicate, to_constant(p), local_from_global[p])
for p in ancestor_parameters]
constant_pairs = [(a, p.name) for a, p in safe_zip(args, new_action.parameters) if is_constant(a)]
group_preconditions = [(group_predicate if is_hashable(a) and (a in constraints.groups) else EQ, to_obj(a), p)
for a, p in constant_pairs]
order_preconditions = [order_facts[idx] for idx in incoming_orders[step]]
new_preconditions = existing_preconditions + group_preconditions + order_preconditions + [Not(order_facts[step])]
new_action.precondition = pddl.Conjunction(
[new_action.precondition, bound_condition,
make_preconditions(new_preconditions)]).simplified()
new_parameters = parameters - ancestors
bound_facts = [(bound_predicate, to_constant(p)) for p in new_parameters]
assigned_facts = [(assigned_predicate, to_constant(p), local_from_global[p]) for p in new_parameters]
new_effects = bound_facts + assigned_facts + [order_facts[step]]
new_action.effects.extend(make_effects(new_effects))
# TODO: should also negate the effects of all other sequences here
new_actions.append(new_action)
#new_action.dump()
new_goals.append(And(*[order_facts[idx] for idx in incoming_orders[GOAL_INDEX]]))
add_predicate(domain, make_predicate(order_predicate, ['?num', '?step']))
if constraints.exact:
domain.actions[:] = []
domain.actions.extend(new_actions)
new_goal_exp = And(goal_exp, Or(*new_goals))
for fact in new_facts:
add_fact(evaluations, fact, result=INTERNAL_EVALUATION)
return new_goal_exp
| 7,191 |
Python
| 47.92517 | 128 | 0.649979 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/reorder.py
|
import time
from collections import namedtuple, deque, Counter
from itertools import combinations
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.statistics import Stats, Performance, EPSILON
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.utils import INF, neighbors_from_orders, topological_sort, get_connected_components, \
sample_topological_sort, is_acyclic, layer_sort, Score, safe_zip
def get_output_objects(result):
if isinstance(result, StreamResult):
return result.output_objects
return tuple()
def get_object_orders(stream_plan):
# TODO: check that only one result per output object
partial_orders = set()
for i, stream1 in enumerate(stream_plan):
for stream2 in stream_plan[i+1:]:
if set(get_output_objects(stream1)) & stream2.instance.get_all_input_objects():
partial_orders.add((stream1, stream2))
return partial_orders
def get_initial_orders(init_facts, stream_plan):
return {(fact, stream) for stream in stream_plan for fact in stream.get_domain() if fact in init_facts}
def get_fact_orders(stream_plan, init_facts=set()):
# TODO: explicitly recover this from plan_streams
# TODO: init_facts isn't used in practice
achieved_facts = set(init_facts)
partial_orders = set()
for i, stream1 in enumerate(stream_plan):
new_facts = set(stream1.get_certified()) - achieved_facts
for stream2 in stream_plan[i+1:]: # Prevents circular
if new_facts & set(stream2.get_domain()):
partial_orders.add((stream1, stream2))
achieved_facts.update(new_facts)
return partial_orders
def get_partial_orders(stream_plan, use_facts=True, **kwargs):
partial_orders = get_object_orders(stream_plan)
if use_facts:
partial_orders.update(get_fact_orders(stream_plan, **kwargs))
assert is_acyclic(stream_plan, partial_orders)
return partial_orders
##################################################
def get_stream_plan_components(external_plan, **kwargs):
partial_orders = get_partial_orders(external_plan, **kwargs)
return get_connected_components(external_plan, partial_orders)
def dump_components(stream_plan):
for i, result in enumerate(stream_plan):
components = get_stream_plan_components(stream_plan[:i+1])
print(i, len(components), components)
##################################################
def get_future_p_successes(stream_plan):
# TODO: should I use this instead of p_success in some places?
# TODO: learn this instead by estimating conditional probabilities of certain sequence
# TODO: propagate stats_heuristic
orders = get_partial_orders(stream_plan)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
descendants_map = {}
for s1 in reversed(stream_plan):
descendants_map[s1] = s1.instance.get_p_success()
for s2 in outgoing_edges[s1]:
descendants_map[s1] *= descendants_map[s2]
return descendants_map
def compute_expected_cost(stream_plan, stats_fn=Performance.get_statistics):
if not is_plan(stream_plan):
return INF
expected_cost = 0.
for result in reversed(stream_plan):
p_success, overhead = stats_fn(result)
expected_cost = overhead + p_success * expected_cost
return expected_cost
##################################################
Subproblem = namedtuple('Subproblem', ['cost', 'head', 'subset'])
def compute_pruning_orders(results, stats_fn=Performance.get_statistics, tiebreaker_fn=lambda v: None):
# TODO: reason about pairs that don't have a (transitive) ordering
# TODO: partial orders make this heuristic not optimal
# TODO: use result.external.name to cluster?
dominates = lambda v1, v2: all(s1 <= s2 for s1, s2 in safe_zip(stats_fn(v1), stats_fn(v2))) \
and tiebreaker_fn(v1) <= tiebreaker_fn(v2)
effort_orders = set()
for v1, v2 in combinations(results, r=2): # randomize
if dominates(v1, v2):
effort_orders.add((v1, v2)) # Includes equality
elif dominates(v2, v1):
effort_orders.add((v2, v1))
return effort_orders
def dynamic_programming(store, vertices, valid_head_fn, stats_fn=Performance.get_statistics, prune=True, greedy=False, **kwargs):
# TODO: include context here as a weak constraint
# TODO: works in the absence of partial orders
# TODO: can also more manually reorder
# 2^N rather than N!
start_time = time.time()
effort_orders = set() # 1 cheaper than 2
if prune:
effort_orders.update(compute_pruning_orders(vertices, stats_fn=stats_fn, **kwargs))
_, out_priority_orders = neighbors_from_orders(effort_orders) # more expensive
priority_ordering = topological_sort(vertices, effort_orders)[::-1] # most expensive to cheapest
# TODO: can break ties with index on action plan to prioritize doing the temporally first things
# TODO: could the greedy strategy lead to premature choices
# TODO: this starts to blow up - group together similar streams (e.g. collision streams) to decrease size
# TODO: key grouping concern are partial orders and ensuring feasibility (isomorphism)
# TODO: flood-fill cheapest as soon as something that has no future dependencies has been found
# TODO: do the forward version to take advantage of sink vertices
subset = frozenset()
queue = deque([subset]) # Acyclic because subsets
subproblems = {subset: Subproblem(cost=0, head=None, subset=None)}
while queue: # searches backward from last to first
if store.is_terminated():
return vertices
subset = queue.popleft() # TODO: greedy/weighted A* version of this (heuristic is next cheapest stream)
applied = set()
# TODO: roll-out more than one step to cut the horizon
# TODO: compute a heuristic that's the best case affordances from subsequent streams
for v in priority_ordering: # most expensive first
if greedy and applied:
break
if (v not in subset) and valid_head_fn(v, subset) and not (out_priority_orders[v] & applied):
applied.add(v)
new_subset = frozenset([v]) | subset
p_success, overhead = stats_fn(v)
new_cost = overhead + p_success*subproblems[subset].cost
subproblem = Subproblem(cost=new_cost, head=v, subset=subset) # Adds new element to the front
if new_subset not in subproblems:
queue.append(new_subset)
subproblems[new_subset] = subproblem
elif new_cost < subproblems[new_subset].cost:
subproblems[new_subset] = subproblem
ordering = []
subset = frozenset(vertices)
while True:
if subset not in subproblems:
print(vertices)
# TODO: some sort of bug where the problem isn't solved?
subproblem = subproblems[subset]
if subproblem.head is None:
break
ordering.append(subproblem.head)
subset = subproblem.subset
#print('Streams: {} | Expected cost: {:.3f} | Time: {:.3f}'.format(
# len(ordering), compute_expected_cost(ordering, stats_fn=stats_fn), elapsed_time(start_time)))
return ordering
##################################################
def dummy_reorder_stream_plan(stream_plan, **kwargs):
return stream_plan
def random_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
return sample_topological_sort(stream_plan, get_partial_orders(stream_plan))
def greedy_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
return topological_sort(stream_plan, get_partial_orders(stream_plan),
priority_fn=lambda s: s.get_statistics().overhead)
##################################################
def dump_layers(distances):
streams_from_layer = {}
for stream, layer in distances.items():
streams_from_layer.setdefault(layer, []).append(stream)
for layer, streams in streams_from_layer.items():
print(layer, sorted(streams, key=Result.stats_heuristic, reverse=True))
return streams_from_layer
def compute_distances(stream_plan):
stream_orders = get_partial_orders(stream_plan)
reversed_orders = {(s2, s1) for s1, s2 in stream_orders}
in_stream_orders, out_stream_orders = neighbors_from_orders(reversed_orders)
sources = {stream for stream in stream_plan if not in_stream_orders[stream]} # In the reversed DAG
output_sources = {stream for stream in sources if stream.external.has_outputs}
test_sources = sources - output_sources
#visited = dijkstra(output_sources, reversed_orders)
#distances = {stream: node.g for stream, node in visited.items()}
distances = layer_sort(set(stream_plan) - test_sources, reversed_orders)
# TODO: take into account argument overlap
max_distance = max([0] + list(distances.values()))
for stream in stream_plan:
if stream not in distances:
distances[stream] = min([max_distance] + [distances[s] - 1 for s in out_stream_orders[stream]])
#dump_layers(distances)
return distances
def layer_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
stream_orders = get_partial_orders(stream_plan)
reversed_orders = {(s2, s1) for s1, s2 in stream_orders}
distances = compute_distances(stream_plan)
priority_fn = lambda s: Score(not s.external.has_outputs, distances[s], -s.stats_heuristic())
reverse_order = topological_sort(stream_plan, reversed_orders, priority_fn=priority_fn)
return reverse_order[::-1]
def compute_statistics(stream_plan, bias=True):
stats_from_stream = {result: result.external.get_statistics() for result in stream_plan}
if not bias:
return stats_from_stream
distances = compute_distances(stream_plan)
max_distance = max(distances.values())
for result in stream_plan:
p_success, overhead = stats_from_stream[result]
if result.external.has_outputs:
# TODO: is_function, number of free inputs, etc.
# TODO: decrease p_success if fewer free inputs (or input streams)
# TODO: dynamic_programming seems to automatically order streams with fewer free ahead anyways
overhead += EPSILON*(max_distance - distances[result] + 1)
else:
p_success *= EPSILON
stats_from_stream[result] = Stats(p_success, overhead)
return stats_from_stream
##################################################
def optimal_reorder_stream_plan(store, stream_plan, stats_from_stream=None, **kwargs):
if not stream_plan:
return stream_plan
if stats_from_stream is None:
stats_from_stream = compute_statistics(stream_plan)
# TODO: use the negative output (or overhead) as a bound
indices = range(len(stream_plan))
index_from_stream = dict(zip(stream_plan, indices))
stream_orders = get_partial_orders(stream_plan)
stream_orders = {(index_from_stream[s1], index_from_stream[s2]) for s1, s2 in stream_orders}
#nodes = stream_plan
nodes = indices # TODO: are indices actually much faster?
in_stream_orders, out_stream_orders = neighbors_from_orders(stream_orders)
valid_combine = lambda v, subset: out_stream_orders[v] <= subset
#valid_combine = lambda v, subset: in_stream_orders[v] & subset
# TODO: these are special because they don't enable any downstream access to another stream
#sources = {stream_plan[index] for index in indices if not in_stream_orders[index]}
#sinks = {stream_plan[index] for index in indices if not out_stream_orders[index]} # Contains collision checks
#print(dijkstra(sources, get_partial_orders(stream_plan)))
stats_fn = lambda idx: stats_from_stream[stream_plan[idx]]
#tiebreaker_fn = lambda *args: 0
#tiebreaker_fn = lambda *args: random.random() # TODO: introduces cycles
tiebreaker_fn = lambda idx: stream_plan[idx].stats_heuristic()
ordering = dynamic_programming(store, nodes, valid_combine, stats_fn=stats_fn, tiebreaker_fn=tiebreaker_fn, **kwargs)
#import gc
#gc.collect()
return [stream_plan[index] for index in ordering]
##################################################
def reorder_stream_plan(store, stream_plan, algorithm=None, **kwargs):
if not stream_plan:
return stream_plan
stats_from_stream = compute_statistics(stream_plan)
stats = Counter(stats_from_stream.values())
if algorithm is None:
algorithm = 'layer' if len(stats) <= 1 else 'optimal'
if algorithm == 'dummy':
return dummy_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'random':
return random_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'greedy':
return greedy_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'layer':
#print('Heuristic reordering:', stats)
return layer_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'optimal':
#print('Optimal reordering:', stats)
return optimal_reorder_stream_plan(store, stream_plan, stats_from_stream, **kwargs)
raise NotImplementedError(algorithm)
| 13,469 |
Python
| 44.972696 | 129 | 0.663895 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/reinstantiate.py
|
from hsr_tamp.pddlstream.algorithms.downward import apply_action, get_conjunctive_parts
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance
from hsr_tamp.pddlstream.utils import MockSet
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
import pddl
import instantiate
def instantiate_unsatisfiable(state, action, var_mapping, negative_from_name={}):
precondition = []
for effect in action.effects:
if effect.literal.predicate == UNSATISFIABLE:
# Condition must be false for plan to succeed
conditions = set(get_conjunctive_parts(effect.condition))
negative = {literal for literal in conditions if literal.predicate in negative_from_name}
if not negative:
continue
assert len(negative) == 1
# TODO: handle the case where negative is not used (not (CFree ..))
normal_conjunction = pddl.Conjunction(conditions - negative)
# TODO: assumes that can instantiate with just predicate_to_atoms
normal_effect = pddl.Effect(effect.parameters, normal_conjunction, effect.literal)
# TODO: avoid recomputing these
objects_by_type = instantiate.get_objects_by_type([], [])
predicate_to_atoms = instantiate.get_atoms_by_predicate(state)
result = []
normal_effect.instantiate(var_mapping, state, {effect.literal},
objects_by_type, predicate_to_atoms, result)
for _, _, _, mapping in result:
for literal in negative:
new_literal = literal.rename_variables(mapping).negate()
assert (not new_literal.free_variables())
precondition.append(new_literal)
return precondition
def reinstantiate_action(state, instance, negative_from_name={}):
# Recomputes the instances with without any pruned preconditions
# TODO: making the assumption that no negative derived predicates
action = instance.action
var_mapping = instance.var_mapping
init_facts = set()
fluent_facts = MockSet()
precondition = []
try:
action.precondition.instantiate(var_mapping, init_facts, fluent_facts, precondition)
except pddl.conditions.Impossible:
return None
precondition = list(set(precondition)) + instantiate_unsatisfiable(state, action, var_mapping, negative_from_name)
effects = []
effect_from_literal = {literal: (cond, effect, effect_mapping)
for cond, literal, effect, effect_mapping in instance.effect_mappings}
for literal in instance.applied_effects:
cond, effect, effect_mapping = effect_from_literal[literal]
if effect is None: # Stream effect
#effects.append((cond, literal, cond, effect))
continue
else:
effect._instantiate(effect_mapping, init_facts, fluent_facts, effects)
new_effects = []
for cond, effect, e, m in effects:
precondition.extend(cond)
new_effects.append(([], effect, e, m))
return pddl.PropositionalAction(instance.name, precondition, new_effects, instance.cost, action, var_mapping)
def reinstantiate_action_instances(task, old_instances, **kwargs):
# Recomputes the instances with without any pruned preconditions
state = set(task.init)
new_instances = []
for old_instance in old_instances:
# TODO: better way of instantiating conditional effects (when not fluent)
new_instance = reinstantiate_action(state, old_instance, **kwargs)
assert (new_instance is not None)
new_instances.append(new_instance)
apply_action(state, new_instance)
new_instances.append(get_goal_instance(task.goal)) # TODO: move this?
return new_instances
##################################################
def reinstantiate_axiom(old_instance, init_facts=set(), fluent_facts=MockSet()):
axiom = old_instance.axiom
var_mapping = old_instance.var_mapping
new_instance = axiom.instantiate(var_mapping, init_facts, fluent_facts)
assert (new_instance is not None)
return new_instance
def reinstantiate_axiom_instances(old_instances, **kwargs):
return [reinstantiate_axiom(old_instance, **kwargs) for old_instance in old_instances]
| 4,332 |
Python
| 45.591397 | 118 | 0.667359 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/stream_action.py
|
from collections import OrderedDict
from hsr_tamp.pddlstream.algorithms.downward import make_action, make_parameters, make_domain
from hsr_tamp.pddlstream.language.constants import Not
from hsr_tamp.pddlstream.language.conversion import pddl_from_object, substitute_expression
from hsr_tamp.pddlstream.language.statistics import check_effort
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.utils import INF
BOUND_PREDICATE = '_bound'
def enforce_single_binding(result, preconditions, effects):
binding_facts = [(BOUND_PREDICATE, pddl_from_object(out)) for out in result.output_objects]
preconditions.extend(Not(fact) for fact in binding_facts)
effects.extend(fact for fact in binding_facts)
def get_stream_actions(results, unique_binding=False, effort_scale=1, max_effort=INF, **kwargs):
result_from_name = OrderedDict()
stream_actions = []
for result in results:
#if not isinstance(stream_result, StreamResult):
if type(result) == FunctionResult:
continue
effort = result.get_effort(**kwargs)
if not check_effort(effort, max_effort):
continue
name = '{}-{}'.format(result.external.name, len(result_from_name))
#name = '{}_{}_{}'.format(result.external.name, # No spaces & parens
# ','.join(map(pddl_from_object, result.instance.input_objects)),
# ','.join(map(pddl_from_object, result.output_objects)))
assert name not in result_from_name
result_from_name[name] = result
preconditions = list(result.instance.get_domain())
effects = list(result.get_certified()) + [result.stream_fact]
if unique_binding:
enforce_single_binding(result, preconditions, effects)
cost = effort_scale * effort
stream_actions.append(make_action(name, [], preconditions, effects, cost))
return stream_actions, result_from_name
def add_stream_actions(domain, results, **kwargs):
if not results:
return domain, {}
stream_actions, result_from_name = get_stream_actions(results, **kwargs)
output_objects = []
for result in result_from_name.values():
if isinstance(result, StreamResult):
output_objects.extend(map(pddl_from_object, result.output_objects))
new_constants = list(make_parameters(set(output_objects) | set(domain.constants)))
# to_untyped_strips, free_variables
new_domain = make_domain(constants=new_constants, predicates=domain.predicates,
actions=domain.actions[:] + stream_actions, axioms=domain.axioms)
#new_domain = copy.copy(domain)
return new_domain, result_from_name
| 2,793 |
Python
| 47.172413 | 96 | 0.688149 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/negative.py
|
import time
from hsr_tamp.pddlstream.algorithms.downward import fact_from_fd, plan_preimage, apply_action, \
GOAL_NAME, get_derived_predicates, literal_holds
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axiom_plan
from hsr_tamp.pddlstream.algorithms.scheduling.reinstantiate import reinstantiate_action_instances, reinstantiate_axiom_instances
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl
from hsr_tamp.pddlstream.language.function import Predicate, PredicateResult
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import safe_zip, INF, elapsed_time
def convert_negative_predicate(negative, literal, step_from_atom, negative_plan):
input_objects = tuple(map(obj_from_pddl, literal.args)) # Might be negative
instance = negative.get_instance(input_objects)
value = not literal.negated
if instance.enumerated:
assert (instance.value == value)
else:
result = PredicateResult(instance, value, optimistic=True)
step = min(step_from_atom[literal]) if result.is_deferrable() else 0
negative_plan[result] = min(step, negative_plan.get(result, INF))
def get_negative_result(negative, input_objects, fluent_facts=frozenset()):
instance = negative.get_instance(input_objects, fluent_facts=fluent_facts)
optimistic = not instance.successful # TODO: clean this up
return instance._Result(instance, output_objects=tuple(), opt_index=instance.opt_index,
call_index=instance.num_calls, optimistic=optimistic)
def convert_negative_stream(negative, literal, step_from_atom, real_states, negative_plan):
import pddl
# assert not negative.is_fluent
fluent_facts_list = []
if negative.is_fluent:
# TODO: ensure that only used once?
for step in step_from_atom[literal]:
fluent_facts_list.append(list(map(fact_from_fd, filter(
lambda f: isinstance(f, pddl.Atom) and (f.predicate in negative.fluents), real_states[step]))))
else:
fluent_facts_list.append(frozenset())
input_objects = tuple(map(obj_from_pddl, literal.args)) # Might be negative
for fluent_facts in fluent_facts_list:
result = get_negative_result(negative, input_objects, fluent_facts)
#if not result.instance.successful: # Doesn't work with reachieve=True
step = min(step_from_atom[literal]) if result.is_deferrable() else 0
negative_plan[result] = min(step, negative_plan.get(result, INF))
def convert_negative(negative_preimage, negative_from_name, step_from_atom, real_states):
negative_plan = {}
for literal in negative_preimage:
negative = negative_from_name[literal.predicate]
if isinstance(negative, Predicate):
convert_negative_predicate(negative, literal, step_from_atom, negative_plan)
elif isinstance(negative, Stream):
convert_negative_stream(negative, literal, step_from_atom, real_states, negative_plan)
else:
raise ValueError(negative)
return negative_plan
##################################################
def recover_negative_axioms(real_task, opt_task, axiom_plans, action_plan, negative_from_name):
start_time = time.time()
action_plan = reinstantiate_action_instances(opt_task, action_plan, negative_from_name=negative_from_name)
# https://github.com/caelan/pddlstream/commit/18b303e19bbab9f8e0016fbb2656f461067e1e94#diff-55454a85485551f9139e20a446b56a83L53
#simplify_conditional_effects(opt_task, action_plan, negative_from_name)
axiom_plans = list(map(reinstantiate_axiom_instances, axiom_plans))
axioms_from_name = get_derived_predicates(opt_task.axioms)
# TODO: could instead just accumulate difference between real and opt
opt_task.init = set(opt_task.init)
real_states = [set(real_task.init)]
num_negative = 0
preimage_plan = []
for axiom_plan, action_instance in safe_zip(axiom_plans, action_plan):
preimage = [l for l in plan_preimage(axiom_plan + [action_instance])
if (l.predicate in axioms_from_name)]
#assert conditions_hold(opt_task.init, conditions)
# TODO: only add derived facts and negative facts to fluent state to make normalizing easier
negative_axiom_plan = extract_axiom_plan(opt_task, preimage, negative_from_name,
static_state=opt_task.init)
#static_state=real_states[-1])
assert negative_axiom_plan is not None
num_negative += len(negative_axiom_plan)
preimage_plan.extend(negative_axiom_plan + axiom_plan + [action_instance])
if action_instance.name != GOAL_NAME:
apply_action(opt_task.init, action_instance)
real_states.append(set(real_states[-1]))
apply_action(real_states[-1], action_instance)
#print('Steps: {} | Negative: {} | Preimage: {} | Time: {:.3f}'.format(
# len(action_plan), num_negative, len(preimage_plan), elapsed_time(start_time)))
return real_states, preimage_plan
| 5,142 |
Python
| 53.712765 | 131 | 0.690393 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_functions.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.utils import INF
from hsr_tamp.pddlstream.language.constants import is_parameter, Head
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl
def extract_function_result(results_from_head, action, pddl_args):
import pddl
if action.cost is None:
return None
# TODO: retrieve constant action costs
# TODO: associate costs with the steps they are applied
expression = action.cost.expression
if not isinstance(expression, pddl.PrimitiveNumericExpression):
return None
var_mapping = {p.name: a for p, a in zip(action.parameters, pddl_args)}
obj_args = tuple(obj_from_pddl(var_mapping[p] if is_parameter(p) else p)
for p in expression.args)
head = Head(expression.symbol, obj_args)
[result] = results_from_head[head]
if result is None:
return None
return result
def compute_function_plan(opt_evaluations, action_plan):
results_from_head = defaultdict(list)
for evaluation, result in opt_evaluations.items():
results_from_head[evaluation.head].append(result)
step_from_function = {}
for step, action_instance in enumerate(action_plan):
action = action_instance.action
if action is None:
continue
args = [action_instance.var_mapping[p.name] for p in action.parameters]
result = extract_function_result(results_from_head, action, args)
if result is not None:
step_from_function[result] = min(step, step_from_function.get(result, INF))
if not result.is_deferrable():
step_from_function[result] = 0
#function_from_instance[action_instance] = result
return step_from_function
| 1,766 |
Python
| 39.15909 | 87 | 0.685164 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/utils.py
|
from hsr_tamp.pddlstream.algorithms.downward import add_predicate, make_predicate, get_literals, fact_from_fd, conditions_hold, \
apply_action, get_derived_predicates
from hsr_tamp.pddlstream.language.constants import And, Not
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.utils import apply_mapping
def partition_results(evaluations, results, apply_now):
applied_results = []
deferred_results = []
opt_evaluations = set(evaluations)
for result in results:
assert(not result.instance.disabled)
assert(not result.instance.enumerated)
domain = set(map(evaluation_from_fact, result.instance.get_domain()))
if isinstance(result, FunctionResult) or (apply_now(result) and (domain <= opt_evaluations)):
applied_results.append(result)
opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
else:
deferred_results.append(result)
return applied_results, deferred_results
def partition_external_plan(external_plan):
function_plan = list(filter(lambda r: isinstance(r, FunctionResult), external_plan))
stream_plan = list(filter(lambda r: r not in function_plan, external_plan))
return stream_plan, function_plan
def add_unsatisfiable_to_goal(domain, goal_expression):
#return goal_expression
import pddl
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
add_predicate(domain, make_predicate(UNSATISFIABLE, []))
negated_atom = pddl.NegatedAtom(UNSATISFIABLE, tuple())
for action in domain.actions:
if negated_atom not in action.precondition.parts:
action.precondition = pddl.Conjunction([action.precondition, negated_atom]).simplified()
#return goal_expression
return And(goal_expression, Not((UNSATISFIABLE,)))
def get_instance_facts(instance, node_from_atom):
# TODO: ignores conditional effect conditions
facts = []
for precondition in get_literals(instance.action.precondition):
if precondition.negated:
continue
args = apply_mapping(precondition.args, instance.var_mapping)
literal = precondition.__class__(precondition.predicate, args)
fact = fact_from_fd(literal)
if fact in node_from_atom:
facts.append(fact)
return facts
| 2,417 |
Python
| 45.499999 | 129 | 0.721142 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/apply_fluents.py
|
import copy
from hsr_tamp.pddlstream.algorithms.downward import fact_from_fd
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.conversion import pddl_from_object
from hsr_tamp.pddlstream.language.object import OptimisticObject, UniqueOptValue
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.utils import neighbors_from_orders, get_mapping, safe_zip
def get_steps_from_stream(stream_plan, step_from_fact, node_from_atom):
steps_from_stream = {}
for result in reversed(stream_plan):
steps_from_stream[result] = set()
for fact in result.get_certified():
if (fact in step_from_fact) and (node_from_atom[fact].result == result):
steps_from_stream[result].update(step_from_fact[fact])
for fact in result.instance.get_domain():
step_from_fact[fact] = step_from_fact.get(fact, set()) | steps_from_stream[result]
# TODO: apply this recursively
return steps_from_stream
def get_fluent_instance(external, input_objects, state):
import pddl
fluent_facts = map(fact_from_fd, filter(
lambda f: isinstance(f, pddl.Atom) and (f.predicate in external.fluents), state))
return external.get_instance(input_objects, fluent_facts=fluent_facts)
def convert_fluent_streams(stream_plan, real_states, action_plan, step_from_fact, node_from_atom):
#return stream_plan
import pddl
assert len(real_states) == len(action_plan) + 1
steps_from_stream = get_steps_from_stream(stream_plan, step_from_fact, node_from_atom)
# TODO: ensure that derived facts aren't in fluents?
# TODO: handle case where costs depend on the outputs
_, outgoing_edges = neighbors_from_orders(get_partial_orders(stream_plan, init_facts=map(
fact_from_fd, filter(lambda f: isinstance(f, pddl.Atom), real_states[0]))))
static_plan = []
fluent_plan = []
for result in stream_plan:
external = result.external
if isinstance(result, FunctionResult) or (result.opt_index != 0) or (not external.is_fluent):
static_plan.append(result)
continue
if outgoing_edges[result]:
# No way of taking into account the binding of fluent inputs when preventing cycles
raise NotImplementedError('Fluent stream is required for another stream: {}'.format(result))
#if (len(steps_from_stream[result]) != 1) and result.output_objects:
# raise NotImplementedError('Fluent stream required in multiple states: {}'.format(result))
for state_index in steps_from_stream[result]:
new_output_objects = [
#OptimisticObject.from_opt(out.value, object())
OptimisticObject.from_opt(out.value, UniqueOptValue(result.instance, object(), name))
for name, out in safe_zip(result.external.outputs, result.output_objects)]
if new_output_objects and (state_index <= len(action_plan) - 1):
# TODO: check that the objects aren't used in any effects
instance = copy.copy(action_plan[state_index])
action_plan[state_index] = instance
output_mapping = get_mapping(list(map(pddl_from_object, result.output_objects)),
list(map(pddl_from_object, new_output_objects)))
instance.var_mapping = {p: output_mapping.get(v, v)
for p, v in instance.var_mapping.items()}
new_instance = get_fluent_instance(external, result.instance.input_objects, real_states[state_index])
# TODO: handle optimistic here
new_result = new_instance.get_result(new_output_objects, opt_index=result.opt_index)
fluent_plan.append(new_result)
return static_plan + fluent_plan
| 3,880 |
Python
| 56.073529 | 113 | 0.665206 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_streams.py
|
from collections import namedtuple, defaultdict
from heapq import heappop, heappush
from hsr_tamp.pddlstream.language.conversion import is_negated_atom, fact_from_evaluation, evaluation_from_fact
from hsr_tamp.pddlstream.language.statistics import check_effort
from hsr_tamp.pddlstream.utils import HeapElement, INF, implies
Node = namedtuple('Node', ['effort', 'result']) # TODO: include level
EFFORT_OP = sum # max | sum
NULL_COND = (None,)
def get_achieving_streams(evaluations, stream_results, max_effort=INF, **effort_args):
unprocessed_from_atom = defaultdict(list)
node_from_atom = {NULL_COND: Node(0, None)}
conditions_from_stream = {}
remaining_from_stream = {}
for result in stream_results:
conditions_from_stream[result] = result.instance.get_domain() + (NULL_COND,)
remaining_from_stream[result] = len(conditions_from_stream[result])
for atom in conditions_from_stream[result]:
unprocessed_from_atom[atom].append(result)
for atom in evaluations:
if not is_negated_atom(atom):
node_from_atom[fact_from_evaluation(atom)] = Node(0, None)
queue = [HeapElement(node.effort, atom) for atom, node in node_from_atom.items()]
while queue:
atom = heappop(queue).value
if atom not in unprocessed_from_atom:
continue
for result in unprocessed_from_atom[atom]:
remaining_from_stream[result] -= 1
if remaining_from_stream[result]:
continue
effort = result.get_effort(**effort_args)
total_effort = effort + EFFORT_OP(
node_from_atom[cond].effort for cond in conditions_from_stream[result])
if (max_effort is not None) and (max_effort <= total_effort):
continue
for new_atom in result.get_certified():
if (new_atom not in node_from_atom) or (total_effort < node_from_atom[new_atom].effort):
node_from_atom[new_atom] = Node(total_effort, result)
heappush(queue, HeapElement(total_effort, new_atom))
del unprocessed_from_atom[atom]
del node_from_atom[NULL_COND]
return node_from_atom
def evaluations_from_stream_plan(evaluations, stream_results, max_effort=INF):
opt_evaluations = set(evaluations)
for result in stream_results:
if result.instance.disabled or result.instance.enumerated:
raise RuntimeError(result)
domain = set(map(evaluation_from_fact, result.instance.get_domain()))
assert(domain <= opt_evaluations)
opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
node_from_atom = get_achieving_streams(evaluations, stream_results)
result_from_evaluation = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()
if check_effort(n.effort, max_effort)}
return result_from_evaluation
def extract_stream_plan(node_from_atom, target_facts, stream_plan):
# TODO: prune with rules
# TODO: linearization that takes into account satisfied goals at each level
# TODO: can optimize for all streams & axioms all at once
for fact in target_facts:
if fact not in node_from_atom:
raise RuntimeError('Preimage fact {} is not achievable!'.format(fact))
#RuntimeError: Preimage fact ('new-axiom@0',) is not achievable!
result = node_from_atom[fact].result
if result is None:
continue
extract_stream_plan(node_from_atom, result.instance.get_domain(), stream_plan)
if result not in stream_plan:
# TODO: dynamic programming version that doesn't reconsider facts
# TODO: don't add if the fact is already satisfied
stream_plan.append(result)
| 3,800 |
Python
| 48.363636 | 111 | 0.662632 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/plan_streams.py
|
from __future__ import print_function
import copy
from collections import defaultdict, namedtuple
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, get_cost_scale, \
conditions_hold, apply_action, scale_cost, fd_from_fact, make_domain, make_predicate, evaluation_from_fd, \
plan_preimage, fact_from_fd, USE_FORBID, pddl_from_instance, parse_action
from hsr_tamp.pddlstream.algorithms.instantiate_task import instantiate_task, sas_from_instantiated, FD_INSTANTIATE
from hsr_tamp.pddlstream.algorithms.scheduling.add_optimizers import add_optimizer_effects, \
using_optimizers, recover_simultaneous
from hsr_tamp.pddlstream.algorithms.scheduling.apply_fluents import convert_fluent_streams
from hsr_tamp.pddlstream.algorithms.scheduling.negative import recover_negative_axioms, convert_negative
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import postprocess_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import recover_axioms_plans
from hsr_tamp.pddlstream.algorithms.scheduling.recover_functions import compute_function_plan
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan, \
evaluations_from_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.stream_action import add_stream_actions
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_results, \
add_unsatisfiable_to_goal, get_instance_facts
from hsr_tamp.pddlstream.algorithms.search import solve_from_task
from hsr_tamp.pddlstream.algorithms.advanced import UNIVERSAL_TO_CONDITIONAL
from hsr_tamp.pddlstream.language.constants import Not, get_prefix, EQ, FAILED, OptPlan, Action
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl_plan, evaluation_from_fact, \
fact_from_evaluation, transform_plan_args, transform_action_args, obj_from_pddl
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.exogenous import get_fluent_domain
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
from hsr_tamp.pddlstream.language.statistics import compute_plan_effort
from hsr_tamp.pddlstream.language.temporal import SimplifiedDomain, solve_tfd
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
from hsr_tamp.pddlstream.language.object import Object
from hsr_tamp.pddlstream.utils import Verbose, INF, topological_sort, get_ancestors
RENAME_ACTIONS = True
#RENAME_ACTIONS = not USE_FORBID
OptSolution = namedtuple('OptSolution', ['stream_plan', 'opt_plan', 'cost']) # TODO: move to the below
#OptSolution = namedtuple('OptSolution', ['stream_plan', 'action_plan', 'cost', 'supporting_facts', 'axiom_plan'])
##################################################
def add_stream_efforts(node_from_atom, instantiated, effort_weight, **kwargs):
if effort_weight is None:
return
# TODO: make effort just a multiplier (or relative) to avoid worrying about the scale
# TODO: regularize & normalize across the problem?
#efforts = []
for instance in instantiated.actions:
# TODO: prune stream actions here?
# TODO: round each effort individually to penalize multiple streams
facts = get_instance_facts(instance, node_from_atom)
#effort = COMBINE_OP([0] + [node_from_atom[fact].effort for fact in facts])
stream_plan = []
extract_stream_plan(node_from_atom, facts, stream_plan)
effort = compute_plan_effort(stream_plan, **kwargs)
instance.cost += scale_cost(effort_weight*effort)
# TODO: store whether it uses shared/unique outputs and prune too expensive streams
#efforts.append(effort)
#print(min(efforts), efforts)
##################################################
def rename_instantiated_actions(instantiated, rename):
# TODO: rename SAS instead?
actions = instantiated.actions[:]
renamed_actions = []
action_from_name = {}
for i, action in enumerate(actions):
renamed_actions.append(copy.copy(action))
renamed_name = 'a{}'.format(i) if rename else action.name
renamed_actions[-1].name = '({})'.format(renamed_name)
action_from_name[renamed_name] = action # Change reachable_action_params?
instantiated.actions[:] = renamed_actions
return action_from_name
##################################################
def get_plan_cost(action_plan, cost_from_action):
if action_plan is None:
return INF
# TODO: return cost per action instance
#return sum([0.] + [instance.cost for instance in action_plan])
scaled_cost = sum([0.] + [cost_from_action[instance] for instance in action_plan])
return scaled_cost / get_cost_scale()
def instantiate_optimizer_axioms(instantiated, domain, results):
# Needed for instantiating axioms before adding stream action effects
# Otherwise, FastDownward will prune these unreachable axioms
# TODO: compute this first and then apply the eager actions
stream_init = {fd_from_fact(result.stream_fact)
for result in results if isinstance(result, StreamResult)}
evaluations = list(map(evaluation_from_fd, stream_init | instantiated.atoms))
temp_domain = make_domain(predicates=[make_predicate(UNSATISFIABLE, [])],
axioms=[ax for ax in domain.axioms if ax.name == UNSATISFIABLE])
temp_problem = get_problem(evaluations, Not((UNSATISFIABLE,)), temp_domain)
# TODO: UNSATISFIABLE might be in atoms making the goal always infeasible
with Verbose():
# TODO: the FastDownward instantiation prunes static preconditions
use_fd = False if using_optimizers(results) else FD_INSTANTIATE
new_instantiated = instantiate_task(task_from_domain_problem(temp_domain, temp_problem),
use_fd=use_fd, check_infeasible=False, prune_static=False)
assert new_instantiated is not None
instantiated.axioms.extend(new_instantiated.axioms)
instantiated.atoms.update(new_instantiated.atoms)
##################################################
def recover_partial_orders(stream_plan, node_from_atom):
# Useful to recover the correct DAG
partial_orders = set()
for child in stream_plan:
# TODO: account for fluent objects
for fact in child.get_domain():
parent = node_from_atom[fact].result
if parent is not None:
partial_orders.add((parent, child))
#stream_plan = topological_sort(stream_plan, partial_orders)
return partial_orders
def recover_stream_plan(evaluations, current_plan, opt_evaluations, goal_expression, domain, node_from_atom,
action_plan, axiom_plans, negative, replan_step):
# Universally quantified conditions are converted into negative axioms
# Existentially quantified conditions are made additional preconditions
# Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
# Added effects cancel out removed effects
# TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
real_task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain))
opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain))
negative_from_name = {external.blocked_predicate: external for external in negative if external.is_negated}
real_states, full_plan = recover_negative_axioms(
real_task, opt_task, axiom_plans, action_plan, negative_from_name)
function_plan = compute_function_plan(opt_evaluations, action_plan)
full_preimage = plan_preimage(full_plan, []) # Does not contain the stream preimage!
negative_preimage = set(filter(lambda a: a.predicate in negative_from_name, full_preimage))
negative_plan = convert_negative(negative_preimage, negative_from_name, full_preimage, real_states)
function_plan.update(negative_plan)
# TODO: OrderedDict for these plans
# TODO: this assumes that actions do not negate preimage goals
positive_preimage = {l for l in (set(full_preimage) - real_states[0] - negative_preimage) if not l.negated}
steps_from_fact = {fact_from_fd(l): full_preimage[l] for l in positive_preimage}
last_from_fact = {fact: min(steps) for fact, steps in steps_from_fact.items() if get_prefix(fact) != EQ}
#stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
# visualize_constraints(map(fact_from_fd, target_facts))
for result, step in function_plan.items():
for fact in result.get_domain():
last_from_fact[fact] = min(step, last_from_fact.get(fact, INF))
# TODO: get_steps_from_stream
stream_plan = []
last_from_stream = dict(function_plan)
for result in current_plan: # + negative_plan?
# TODO: actually compute when these are needed + dependencies
last_from_stream[result] = 0
if isinstance(result.external, Function) or (result.external in negative):
if len(action_plan) > replan_step:
raise NotImplementedError() # TODO: deferring negated optimizers
# Prevents these results from being pruned
function_plan[result] = replan_step
else:
stream_plan.append(result)
curr_evaluations = evaluations_from_stream_plan(evaluations, stream_plan, max_effort=None)
extraction_facts = set(last_from_fact) - set(map(fact_from_evaluation, curr_evaluations))
extract_stream_plan(node_from_atom, extraction_facts, stream_plan)
# Recomputing due to postprocess_stream_plan
stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan, last_from_fact)
node_from_atom = get_achieving_streams(evaluations, stream_plan, max_effort=None)
fact_sequence = [set(result.get_domain()) for result in stream_plan] + [extraction_facts]
for facts in reversed(fact_sequence): # Bellman ford
for fact in facts: # could flatten instead
result = node_from_atom[fact].result
if result is None:
continue
step = last_from_fact[fact] if result.is_deferrable() else 0
last_from_stream[result] = min(step, last_from_stream.get(result, INF))
for domain_fact in result.instance.get_domain():
last_from_fact[domain_fact] = min(last_from_stream[result], last_from_fact.get(domain_fact, INF))
stream_plan.extend(function_plan)
partial_orders = recover_partial_orders(stream_plan, node_from_atom)
bound_objects = set()
for result in stream_plan:
if (last_from_stream[result] == 0) or not result.is_deferrable(bound_objects=bound_objects):
for ancestor in get_ancestors(result, partial_orders) | {result}:
# TODO: this might change descendants of ancestor. Perform in a while loop.
last_from_stream[ancestor] = 0
if isinstance(ancestor, StreamResult):
bound_objects.update(out for out in ancestor.output_objects if out.is_unique())
#local_plan = [] # TODO: not sure what this was for
#for fact, step in sorted(last_from_fact.items(), key=lambda pair: pair[1]): # Earliest to latest
# print(step, fact)
# extract_stream_plan(node_from_atom, [fact], local_plan, last_from_fact, last_from_stream)
# Each stream has an earliest evaluation time
# When computing the latest, use 0 if something isn't deferred
# Evaluate each stream as soon as possible
# Option to defer streams after a point in time?
# TODO: action costs for streams that encode uncertainty
state = set(real_task.init)
remaining_results = list(stream_plan)
first_from_stream = {}
#assert 1 <= replan_step # Plan could be empty
for step, instance in enumerate(action_plan):
for result in list(remaining_results):
# TODO: could do this more efficiently if need be
domain = result.get_domain() + get_fluent_domain(result)
if conditions_hold(state, map(fd_from_fact, domain)):
remaining_results.remove(result)
certified = {fact for fact in result.get_certified() if get_prefix(fact) != EQ}
state.update(map(fd_from_fact, certified))
if step != 0:
first_from_stream[result] = step
# TODO: assumes no fluent axiom domain conditions
apply_action(state, instance)
#assert not remaining_results # Not true if retrace
if first_from_stream:
replan_step = min(replan_step, *first_from_stream.values())
eager_plan = []
results_from_step = defaultdict(list)
for result in stream_plan:
earliest_step = first_from_stream.get(result, 0) # exogenous
latest_step = last_from_stream.get(result, 0) # defer
#assert earliest_step <= latest_step
defer = replan_step <= latest_step
if not defer:
eager_plan.append(result)
# We only perform a deferred evaluation if it has all deferred dependencies
# TODO: make a flag that also allows dependencies to be deferred
future = (earliest_step != 0) or defer
if future:
future_step = latest_step if defer else earliest_step
results_from_step[future_step].append(result)
# TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan, steps_from_fact, node_from_atom)
combined_plan = []
for step, action in enumerate(action_plan):
combined_plan.extend(result.get_action() for result in results_from_step[step])
combined_plan.append(transform_action_args(pddl_from_instance(action), obj_from_pddl))
# TODO: the returned facts have the same side-effect bug as above
# TODO: annotate when each preimage fact is used
preimage_facts = {fact_from_fd(l) for l in full_preimage if (l.predicate != EQ) and not l.negated}
for negative_result in negative_plan: # TODO: function_plan
preimage_facts.update(negative_result.get_certified())
for result in eager_plan:
preimage_facts.update(result.get_domain())
# Might not be able to regenerate facts involving the outputs of streams
preimage_facts.update(result.get_certified()) # Some facts might not be in the preimage
# TODO: record streams and axioms
return eager_plan, OptPlan(combined_plan, preimage_facts)
##################################################
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results,
opt_evaluations, node_from_atom, goal_expression,
effort_weight, debug=False, **kwargs):
# TODO: assert that the unused parameters are off
assert domain is stream_domain
#assert len(applied_results) == len(all_results)
problem = get_problem(opt_evaluations, goal_expression, domain)
with Verbose():
instantiated = instantiate_task(task_from_domain_problem(domain, problem))
if instantiated is None:
return instantiated, None, None, INF
problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl)
pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug, **kwargs)
if pddl_plan is None:
return instantiated, None, pddl_plan, makespan
instance_from_action_args = defaultdict(list)
for instance in instantiated.actions:
name, args = parse_action(instance)
instance_from_action_args[name, args].append(instance)
#instance.action, instance.var_mapping
action_instances = []
for action in pddl_plan:
instances = instance_from_action_args[action.name, action.args]
if len(instances) != 1:
for action in instances:
action.dump()
#assert len(instances) == 1 # TODO: support 2 <= case
action_instances.append(instances[0])
temporal_plan = obj_from_pddl_plan(pddl_plan) # pddl_plan is sequential
return instantiated, action_instances, temporal_plan, makespan
def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results,
opt_evaluations, node_from_atom, goal_expression,
effort_weight, debug=False, **kwargs):
#print(sorted(map(fact_from_evaluation, opt_evaluations)))
temporal_plan = None
problem = get_problem(opt_evaluations, goal_expression, stream_domain) # begin_metric
with Verbose(verbose=debug):
task = task_from_domain_problem(stream_domain, problem)
instantiated = instantiate_task(task)
if instantiated is None:
return instantiated, None, temporal_plan, INF
cost_from_action = {action: action.cost for action in instantiated.actions}
add_stream_efforts(node_from_atom, instantiated, effort_weight)
if using_optimizers(applied_results):
add_optimizer_effects(instantiated, node_from_atom)
# TODO: reachieve=False when using optimizers or should add applied facts
instantiate_optimizer_axioms(instantiated, domain, all_results)
action_from_name = rename_instantiated_actions(instantiated, RENAME_ACTIONS)
# TODO: the action unsatisfiable conditions are pruned
with Verbose(debug):
sas_task = sas_from_instantiated(instantiated)
#sas_task.metric = task.use_min_cost_metric
sas_task.metric = True
# TODO: apply renaming to hierarchy as well
# solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
if renamed_plan is None:
return instantiated, None, temporal_plan, INF
action_instances = [action_from_name[name if RENAME_ACTIONS else '({} {})'.format(name, ' '.join(args))]
for name, args in renamed_plan]
cost = get_plan_cost(action_instances, cost_from_action)
return instantiated, action_instances, temporal_plan, cost
##################################################
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort,
simultaneous=False, reachieve=True, replan_actions=set(), **kwargs):
# TODO: alternatively could translate with stream actions on real opt_state and just discard them
# TODO: only consider axioms that have stream conditions?
#reachieve = reachieve and not using_optimizers(all_results)
#for i, result in enumerate(all_results):
# print(i, result, result.get_effort())
applied_results, deferred_results = partition_results(
evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
stream_domain, deferred_from_name = add_stream_actions(domain, deferred_results)
if reachieve and not using_optimizers(all_results):
achieved_results = {n.result for n in evaluations.values() if isinstance(n.result, Result)}
init_evaluations = {e for e, n in evaluations.items() if n.result not in achieved_results}
applied_results = achieved_results | set(applied_results)
evaluations = init_evaluations # For clarity
# TODO: could iteratively increase max_effort
node_from_atom = get_achieving_streams(evaluations, applied_results, # TODO: apply to all_results?
max_effort=max_effort)
opt_evaluations = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()}
if UNIVERSAL_TO_CONDITIONAL or using_optimizers(all_results):
goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)
temporal = isinstance(stream_domain, SimplifiedDomain)
optimistic_fn = solve_optimistic_temporal if temporal else solve_optimistic_sequential
instantiated, action_instances, temporal_plan, cost = optimistic_fn(
domain, stream_domain, applied_results, all_results, opt_evaluations,
node_from_atom, goal_expression, effort_weight, **kwargs)
if action_instances is None:
return OptSolution(FAILED, FAILED, cost)
action_instances, axiom_plans = recover_axioms_plans(instantiated, action_instances)
# TODO: extract out the minimum set of conditional effects that are actually required
#simplify_conditional_effects(instantiated.task, action_instances)
stream_plan, action_instances = recover_simultaneous(
applied_results, negative, deferred_from_name, action_instances)
action_plan = transform_plan_args(map(pddl_from_instance, action_instances), obj_from_pddl)
replan_step = min([step+1 for step, action in enumerate(action_plan)
if action.name in replan_actions] or [len(action_plan)+1]) # step after action application
stream_plan, opt_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression, stream_domain,
node_from_atom, action_instances, axiom_plans, negative, replan_step)
if temporal_plan is not None:
# TODO: handle deferred streams
assert all(isinstance(action, Action) for action in opt_plan.action_plan)
opt_plan.action_plan[:] = temporal_plan
return OptSolution(stream_plan, opt_plan, cost)
| 21,538 |
Python
| 55.091146 | 122 | 0.693611 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/postprocess.py
|
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem
from hsr_tamp.pddlstream.algorithms.instantiate_task import sas_from_pddl
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.stream_action import get_stream_actions
from hsr_tamp.pddlstream.algorithms.scheduling.utils import add_unsatisfiable_to_goal
from hsr_tamp.pddlstream.algorithms.search import solve_from_task
from hsr_tamp.pddlstream.language.constants import And
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.utils import flatten, INF
# TODO: rename this to plan streams?
DO_RESCHEDULE = False
#RESCHEDULE_PLANNER = 'ff-astar'
RESCHEDULE_PLANNER = 'lmcut-astar'
#RESCHEDULE_PLANNER = 'ff-lazy'
def reschedule_stream_plan(evaluations, target_facts, domain, stream_results,
unique_binding=False, unsatisfiable=False, max_effort=INF,
planner=RESCHEDULE_PLANNER, max_reschedule_time=10, debug=False):
# TODO: search in space of partially ordered plans
# TODO: constrain selection order to be alphabetical?
domain.actions[:], stream_result_from_name = get_stream_actions(
stream_results, unique_binding=unique_binding)
goal_expression = And(*target_facts)
if unsatisfiable: # TODO: ensure that the copy hasn't harmed anything
goal_expression = add_unsatisfiable_to_goal(domain, goal_expression)
reschedule_problem = get_problem(evaluations, goal_expression, domain, unit_costs=False)
reschedule_task = task_from_domain_problem(domain, reschedule_problem)
#reschedule_task.axioms = [] # TODO: ensure that the constants are added in the event that axioms are needed?
sas_task = sas_from_pddl(reschedule_task)
stream_names, effort = solve_from_task(sas_task, planner=planner, max_planner_time=max_reschedule_time,
max_cost=max_effort, debug=debug)
if stream_names is None:
return None
stream_plan = [stream_result_from_name[name] for name, _ in stream_names]
return stream_plan
##################################################
def shorten_stream_plan(evaluations, stream_plan, target_facts):
all_subgoals = set(target_facts) | set(flatten(r.instance.get_domain() for r in stream_plan))
evaluation_subgoals = set(filter(evaluations.__contains__, map(evaluation_from_fact, all_subgoals)))
open_subgoals = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, all_subgoals))
results_from_fact = {}
for result in stream_plan:
for fact in result.get_certified():
results_from_fact.setdefault(fact, []).append(result)
for removed_result in reversed(stream_plan): # TODO: only do in order?
certified_subgoals = open_subgoals & set(removed_result.get_certified())
if not certified_subgoals: # Could combine with following
new_stream_plan = stream_plan[:]
new_stream_plan.remove(removed_result)
return new_stream_plan
if all(2 <= len(results_from_fact[fact]) for fact in certified_subgoals):
node_from_atom = get_achieving_streams(evaluation_subgoals, set(stream_plan) - {removed_result})
if all(fact in node_from_atom for fact in target_facts):
new_stream_plan = []
extract_stream_plan(node_from_atom, target_facts, new_stream_plan)
return new_stream_plan
return None
def prune_stream_plan(evaluations, stream_plan, target_facts):
while True:
new_stream_plan = shorten_stream_plan(evaluations, stream_plan, target_facts)
if new_stream_plan is None:
break
stream_plan = new_stream_plan
return stream_plan
##################################################
def postprocess_stream_plan(evaluations, domain, stream_plan, target_facts):
stream_plan = prune_stream_plan(evaluations, stream_plan, target_facts)
if DO_RESCHEDULE:
# TODO: detect this based on unique or not
# TODO: maybe test if partial order between two ways of achieving facts, if not prune
new_stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_plan)
if new_stream_plan is not None:
return new_stream_plan
return stream_plan
| 4,434 |
Python
| 51.797618 | 113 | 0.690347 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_axioms.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.downward import get_literals, apply_action, \
get_derived_predicates, literal_holds, GOAL_NAME, get_precondition
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance, filter_negated, get_achieving_axioms
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import Verbose, MockSet, safe_zip, flatten
import copy
import pddl
import axiom_rules
def get_necessary_axioms(conditions, axioms, negative_from_name):
if not conditions or not axioms:
return {}
axioms_from_name = get_derived_predicates(axioms)
atom_queue = []
processed_atoms = set()
def add_literals(literals):
for lit in literals:
atom = lit.positive()
if atom not in processed_atoms:
atom_queue.append(atom) # Previously was lit.positive() for some reason?
processed_atoms.add(atom)
add_literals(conditions)
axiom_from_action = {}
partial_instantiations = set()
while atom_queue:
literal = atom_queue.pop()
for axiom in axioms_from_name[literal.predicate]:
derived_parameters = axiom.parameters[:axiom.num_external_parameters]
var_mapping = {p.name: a for p, a in zip(derived_parameters, literal.args) if not is_parameter(a)}
key = (axiom, frozenset(var_mapping.items()))
if key in partial_instantiations:
continue
partial_instantiations.add(key)
parts = [l.rename_variables(var_mapping) for l in get_literals(axiom.condition)
if l.predicate not in negative_from_name] # Assumes a conjunction?
# new_condition = axiom.condition.uniquify_variables(None, var_mapping)
effect_args = [var_mapping.get(a.name, a.name) for a in derived_parameters]
effect = pddl.Effect([], pddl.Truth(), pddl.conditions.Atom(axiom.name, effect_args))
free_parameters = [p for p in axiom.parameters if p.name not in var_mapping]
new_action = pddl.Action(axiom.name, free_parameters, len(free_parameters),
pddl.Conjunction(parts), [effect], None)
# Creating actions so I can partially instantiate (impossible with axioms)
axiom_from_action[new_action] = (axiom, var_mapping)
add_literals(parts)
return axiom_from_action
##################################################
def instantiate_necessary_axioms(model, static_facts, fluent_facts, axiom_remap={}):
instantiated_axioms = []
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
var_mapping = {p.name: a for p, a in zip(action.parameters, atom.args)}
axiom, existing_var_mapping = axiom_remap[action]
var_mapping.update(existing_var_mapping)
inst_axiom = axiom.instantiate(var_mapping, static_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
return instantiated_axioms
##################################################
def extract_axioms(state, axiom_from_atom, conditions, axiom_plan, negated_from_name={}):
success = True
for fact in filter_negated(conditions, negated_from_name):
if literal_holds(state, fact):
continue
if fact not in axiom_from_atom:
print('Fact is not achievable:', fact)
success = False
continue
axiom = axiom_from_atom[fact]
if (axiom is None) or (axiom in axiom_plan):
continue
extract_axioms(state, axiom_from_atom, axiom.condition, axiom_plan, negated_from_name=negated_from_name)
axiom_plan.append(axiom)
return success
##################################################
def is_useful_atom(atom, conditions_from_predicate):
# TODO: this is currently a bottleneck. Instantiate for all actions along the plan first? (apply before checking)
if not isinstance(atom, pddl.Atom):
return False
for atom2 in conditions_from_predicate[atom.predicate]:
if all(is_parameter(a2) or (a1 == a2) for a1, a2 in safe_zip(atom.args, atom2.args)):
return True
return False
def extraction_helper(state, instantiated_axioms, goals, negative_from_name={}):
# TODO: filter instantiated_axioms that aren't applicable?
import options
with Verbose(False):
# axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goals)
all_axioms, axiom_layers = axiom_rules.handle_axioms(
operators=[], axioms=instantiated_axioms, goals=goals, layer_strategy=options.layer_strategy)
axiom_init = set() # TODO: new FastDownward does not use axiom_init
helpful_axioms = []
for axiom in all_axioms:
if axiom.effect in goals: # TODO: double check this
helpful_axioms.append(axiom)
init_atom = axiom.effect.negate()
if axiom.effect in axiom_init:
raise RuntimeError('Bug introduced by new "downward" where both the positive and negative atoms '
'of literal {} are in the initial state'.format(init_atom.positive()))
axiom_init.add(init_atom)
axiom_effects = {axiom.effect for axiom in helpful_axioms}
#assert len(axiom_effects) == len(axiom_init)
for pre in list(goals) + list(axiom_effects):
if pre.positive() not in axiom_init:
axiom_init.add(pre.positive().negate())
goal_action = pddl.PropositionalAction(GOAL_NAME, goals, [], None)
axiom_from_atom, _ = get_achieving_axioms(state | axiom_init, helpful_axioms + [goal_action], negative_from_name)
axiom_plan = [] # Could always add all conditions
success = extract_axioms(state | axiom_init, axiom_from_atom, goals, axiom_plan, negative_from_name)
if not success:
print('Warning! Could not extract an axiom plan')
#return None
return axiom_plan
def extract_axiom_plan(task, goals, negative_from_name, static_state=set()):
import pddl_to_prolog
import build_model
import instantiate
# TODO: only reinstantiate the negative axioms
if not negative_from_name:
return []
axioms_from_name = get_derived_predicates(task.axioms)
derived_goals = {l for l in goals if l.predicate in axioms_from_name}
assert all(literal_holds(task.init, l) # or (l.predicate in negative_from_name)
for l in set(goals) - derived_goals)
axiom_from_action = get_necessary_axioms(derived_goals, task.axioms, negative_from_name)
if not axiom_from_action:
return []
conditions_from_predicate = defaultdict(set)
for axiom, mapping in axiom_from_action.values():
for literal in get_literals(axiom.condition):
conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping))
original_init = task.init
original_actions = task.actions
original_axioms = task.axioms
# TODO: retrieve initial state based on if helpful
task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)}
# TODO: store map from predicate to atom
task.actions = axiom_from_action.keys()
task.axioms = []
# TODO: maybe it would just be better to drop the negative throughout this process until this end
with Verbose(verbose=False):
model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init
opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state)
mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts))
instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action)
axiom_plan = extraction_helper(task.init, instantiated_axioms, derived_goals, negative_from_name)
task.init = original_init
task.actions = original_actions
task.axioms = original_axioms
return axiom_plan
##################################################
def backtrack_axioms(conditions, axioms_from_effect, visited_atoms):
visited_axioms = []
for atom in conditions:
if atom in visited_atoms:
continue
visited_atoms.add(atom)
for axiom in axioms_from_effect[atom]:
visited_axioms.append(axiom)
visited_axioms.extend(backtrack_axioms(axiom.condition, axioms_from_effect, visited_atoms))
return visited_axioms
def recover_axioms_plans(instantiated, action_instances):
#axioms, axiom_init, _ = axiom_rules.handle_axioms(
# instantiated.actions, instantiated.axioms, instantiated.goal_list)
new_action_instances = [copy.deepcopy(instance) for instance in action_instances]
axioms, axiom_init = instantiated.axioms, [] # TODO: bug when needing to reachieve negated
axioms_from_effect = defaultdict(list)
for axiom in axioms:
axioms_from_effect[axiom.effect].append(axiom)
axioms_from_name = get_derived_predicates(instantiated.task.axioms)
state = set(instantiated.task.init) | set(axiom_init)
axiom_plans = []
for action in new_action_instances + [get_goal_instance(instantiated.task.goal)]:
all_conditions = list(get_precondition(action)) + list(flatten(
cond for cond, _ in action.add_effects + action.del_effects))
axioms = backtrack_axioms(all_conditions, axioms_from_effect, set())
axiom_from_atom, _ = get_achieving_axioms(state, axioms)
action.applied_effects = []
for effects in [action.add_effects, action.del_effects]:
negate = (effects is action.del_effects)
for i, (conditions, effect) in reversed(list(enumerate(effects))):
if all(literal_holds(state, literal) or (literal in axiom_from_atom) for literal in conditions):
action.precondition.extend(conditions)
effects[i] = ([], effect)
action.applied_effects.append(effect.negate() if negate else effect)
else:
effects.pop(i)
# RuntimeError: Preimage fact ('new-axiom@0',) is not achievable!
#precondition = action.precondition # TODO: strange bug if this applies
precondition = [literal for literal in action.precondition if literal.predicate in axioms_from_name]
axiom_plans.append([])
success = extract_axioms(state, axiom_from_atom, precondition, axiom_plans[-1])
if not success:
print(all_conditions)
print(action)
print(axioms)
raise RuntimeError('Could not extract axioms')
apply_action(state, action)
return new_action_instances, axiom_plans
| 10,854 |
Python
| 47.67713 | 117 | 0.65386 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/add_optimizers.py
|
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, fact_from_fd
from hsr_tamp.pddlstream.algorithms.scheduling.negative import get_negative_result
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import extract_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.utils import get_instance_facts
from hsr_tamp.pddlstream.language.optimizer import ComponentStream
from hsr_tamp.pddlstream.language.constants import get_args, get_prefix
from hsr_tamp.pddlstream.language.stream import Stream
def using_optimizers(results):
return any(isinstance(result.external, ComponentStream) for result in results)
def add_optimizer_effects(instantiated, node_from_atom):
# TODO: instantiate axioms with negative on effects for blocking
# TODO: fluent streams using conditional effects. Special fluent predicate for inputs to constraint
# TODO: bug! The FD instantiator prunes the result.external.stream_fact
for instance in instantiated.actions:
# TODO: need to handle case where a negative preconditions is used in an optimizer
for condition, effect in (instance.add_effects + instance.del_effects):
for literal in condition:
fact = fact_from_fd(literal)
if (fact in node_from_atom) and (node_from_atom[fact].result is not None):
raise NotImplementedError(literal)
facts = get_instance_facts(instance, node_from_atom)
stream_plan = []
extract_stream_plan(node_from_atom, facts, stream_plan)
# TODO: can detect if some of these are simultaneous and add them as preconditions
for result in stream_plan:
#if isinstance(result.external, ComponentStream):
if True: # TODO: integrate sampler and optimizer treatments
# TODO: need to make multiple versions if several ways of achieving the action
atom = fd_from_fact(result.stream_fact)
instantiated.atoms.add(atom)
effect = (tuple(), atom)
instance.add_effects.append(effect)
instance.effect_mappings.append(effect + (None, None))
# domain = {fact for result in stream_plan if result.external.info.simultaneous
# for fact in result.instance.get_domain()}
# TODO: can streams depending on these be used if dependent preconditions are added to the action
def recover_simultaneous(results, negative_streams, deferred_from_name, instances):
result_from_stream_fact = {}
for result in results:
if isinstance(result.external, Stream):
assert result.stream_fact not in result_from_stream_fact
result_from_stream_fact[result.stream_fact] = result
negative_from_stream_predicate = {}
for state_stream in negative_streams:
if not isinstance(state_stream, Stream):
continue
predicate = get_prefix(state_stream.stream_fact)
if predicate in negative_from_stream_predicate:
# TODO: could make a conjunction condition instead
raise NotImplementedError()
negative_from_stream_predicate[predicate] = state_stream
stream_plan = []
action_plan = []
for instance in instances:
if instance.name in deferred_from_name:
result = deferred_from_name[instance.name]
if result not in stream_plan:
stream_plan.append(result)
else:
action_plan.append(instance)
for conditions, effect in instance.add_effects:
# Assumes effects are in order
assert not conditions
fact = fact_from_fd(effect)
if fact in result_from_stream_fact:
result = result_from_stream_fact[fact]
elif effect.predicate in negative_from_stream_predicate:
negative = negative_from_stream_predicate[effect.predicate]
result = get_negative_result(negative, get_args(fact))
else:
continue
if result not in stream_plan:
stream_plan.append(result)
return stream_plan, action_plan
| 4,173 |
Python
| 50.530864 | 113 | 0.668584 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/synthesizer.py
|
from collections import deque, Counter
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.conversion import substitute_expression
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.optimizer import get_cluster_values, OptimizerResult
from hsr_tamp.pddlstream.language.statistics import Performance
from hsr_tamp.pddlstream.language.stream import Stream, StreamInstance, StreamResult, StreamInfo
from hsr_tamp.pddlstream.utils import neighbors_from_orders
def decompose_result(result):
if isinstance(result, SynthStreamResult):
return result.decompose()
elif isinstance(result, OptimizerResult):
return result.external.stream_plan
return [result]
def decompose_stream_plan(stream_plan):
if not is_plan(stream_plan):
return stream_plan
new_stream_plan = []
for result in stream_plan:
new_stream_plan.extend(decompose_result(result))
return new_stream_plan
class SynthStreamResult(StreamResult):
def get_functions(self):
return substitute_expression(self.instance.external.functions, self.mapping)
def decompose(self):
results = []
for i, stream in enumerate(self.instance.external.streams):
macro_from_micro = self.instance.external.macro_from_micro[i]
input_objects = tuple(self.mapping[macro_from_micro[inp]] for inp in stream.inputs)
instance = stream.get_instance(input_objects)
output_objects = tuple(self.mapping[macro_from_micro[out]] for out in stream.outputs)
results.append(StreamResult(instance, output_objects))
return results
class SynthStreamInstance(StreamInstance):
pass
#def decompose(self):
# return self.streams
class SynthStream(Stream):
# TODO: wild stream optimizer
_Instance = SynthStreamInstance
_Result = SynthStreamResult
def __init__(self, synthesizer, inputs, domain, outputs, certified, functions,
streams, macro_from_micro):
def gen_fn(*input_values): # TODO: take in guess values for inputs?
assert (len(inputs) == len(input_values))
mapping = dict(zip(inputs, input_values))
targets = substitute_expression(certified | functions, mapping)
return synthesizer.gen_fn(outputs, targets) # TODO: could also return a map
#info = None # TODO: stream info
info = StreamInfo() # TODO: use StreamSynthesizer?
super(SynthStream, self).__init__(synthesizer.name, gen_fn, inputs, domain, outputs, certified, info)
self.synthesizer = synthesizer
self.streams = streams
self.functions = tuple(functions)
self.macro_from_micro = macro_from_micro
def update_statistics(self, overhead, success):
self.synthesizer.update_statistics(overhead, success)
def get_p_success(self):
return self.synthesizer.get_p_success()
def get_overhead(self):
return self.synthesizer.get_overhead()
#def decompose(self):
# return self.streams
##################################################
class StreamSynthesizer(Performance): # JointStream | Stream Combiner
def __init__(self, name, streams, gen_fn, post_only=False):
super(StreamSynthesizer, self).__init__(name, StreamInfo())
self.name = name
self.streams = {s.lower(): m for s, m in streams.items()}
self.gen_fn = gen_fn
self.macro_results = {}
self.post_only = post_only
#def get_instances(self):
# raise NotImplementedError()
def get_synth_stream(self, stream_plan):
key = frozenset(stream_plan)
if key in self.macro_results:
return self.macro_results[key]
streams = list(filter(lambda r: isinstance(r, StreamResult), stream_plan))
if len(streams) < 1: # No point if only one...
return None
inputs, domain, outputs, certified, functions, macro_from_micro, \
input_objects, output_objects, fluent_facts = get_cluster_values(stream_plan)
if fluent_facts:
raise NotImplementedError()
mega_stream = SynthStream(self, inputs, domain,
outputs, certified, functions,
streams, macro_from_micro)
mega_instance = mega_stream.get_instance(input_objects)
self.macro_results[key] = SynthStreamResult(mega_instance, output_objects)
return self.macro_results[key]
def __repr__(self):
return '{}{}'.format(self.name, self.streams)
# TODO: worthwhile noting that the focused algorithm does not search over all plan skeletons directly...
##################################################
# TODO: factor this into algorithms
# TODO:
# 1) Iteratively resolve for the next stream plan to apply rather than do in sequence
# 2) Apply to a constraint network specifcation
# 3) Satisfy a constraint network were free variables aren't given by streams
# 4) Allow algorithms to return not feasible to combine rather than impose minimums
# 5) Make a method (rather than a spec) that traverses the constraint graph and prunes weak links/constraints that can't be planned
# 6) Post process all feasible skeletons at once
# 7) Planning and execution view of the algorithm
# 8) Algorithm that does the matching of streams to variables
# 9) Add implied facts (e.g. types) to the constraint network as preconditons
def expand_cluster(synthesizer, v, neighbors, processed):
cluster = {v}
queue = deque([v])
while queue:
v1 = queue.popleft()
for v2 in neighbors[v1]:
if (v2 not in processed) and (v2.instance.external.name in synthesizer.streams):
cluster.add(v2)
queue.append(v2)
processed.add(v2)
return cluster
def get_synthetic_stream_plan(stream_plan, synthesizers):
# TODO: fix this implementation of this to be as follows:
# 1) Prune graph not related
# 2) Cluster
# 3) Try combinations of replacing on stream plan
if not is_plan(stream_plan) or (not synthesizers):
return stream_plan
orders = get_partial_orders(stream_plan)
for order in list(orders):
orders.add(order[::-1])
neighbors, _ = neighbors_from_orders(orders)
# TODO: what if many possibilities?
# TODO: cluster first and then plan using the macro and regular streams
processed = set()
new_stream_plan = []
for result in stream_plan: # Processing in order is important
if result in processed:
continue
processed.add(result)
# TODO: assert that it has at least one thing in it
for synthesizer in synthesizers:
# TODO: something could be an input and output of a cut...
if result.instance.external.name not in synthesizer.streams:
continue
# TODO: need to ensure all are covered I think?
# TODO: don't do if no streams within
cluster = expand_cluster(synthesizer, result, neighbors, processed)
counts = Counter(r.instance.external.name for r in cluster)
if not all(n <= counts[name] for name, n in synthesizer.streams.items()):
continue
ordered_cluster = [r for r in stream_plan if r in cluster]
synthesizer_result = synthesizer.get_synth_stream(ordered_cluster)
if synthesizer_result is None:
continue
new_stream_plan.append(synthesizer_result)
new_stream_plan.extend(filter(lambda s: isinstance(s, FunctionResult), ordered_cluster))
break
else:
new_stream_plan.append(result)
return new_stream_plan
##################################################
"""
def get_synthetic_stream_plan2(stream_plan, synthesizers):
# TODO: pass subgoals along the plan in directly
# TODO: could just do this on the objects themselves to start
free_parameters = set()
for result in stream_plan:
if isinstance(result, StreamResult):
free_parameters.update(result.output_objects)
print(free_parameters)
# TODO: greedy method first
new_plan = []
facts = set()
while True:
candidates = []
for result in stream_plan:
if result.instance.get_domain() <= facts:
candidates.append(result)
selection = candidates[-1]
new_plan.append(selection)
print(new_plan)
print(stream_plan)
print(synthesizers)
raise NotImplementedError()
"""
| 8,674 |
Python
| 41.73399 | 132 | 0.655983 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/skeleton.py
|
from __future__ import print_function
import time
from collections import namedtuple, Sized
from heapq import heappush, heappop, heapreplace
from operator import itemgetter
from hsr_tamp.pddlstream.algorithms.common import is_instance_ready, EvaluationNode
from hsr_tamp.pddlstream.algorithms.disabled import process_instance, update_bindings, update_cost, bind_action_plan
from hsr_tamp.pddlstream.language.constants import is_plan, INFEASIBLE
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.utils import elapsed_time, HeapElement, safe_zip, get_mapping
# The motivation for immediately instantiating is to avoid unnecessary sampling
# Consider a stream result DAG A -> C, B -> C
# If A is already successfully sampled, don't want to resample A until B is sampled
def puncture(sequence, index):
return sequence[:index] + sequence[index+1:]
##################################################
class Skeleton(object):
def __init__(self, queue, stream_plan, action_plan, cost):
self.index = len(queue.skeletons)
queue.skeletons.append(self)
self.queue = queue
self.stream_plan = stream_plan
self.action_plan = action_plan
self.cost = cost
self.root = Binding(self.queue, self,
stream_indices=range(len(stream_plan)),
stream_attempts=[0]*len(stream_plan),
bound_results={},
bindings={},
cost=cost)
self.best_binding = self.root
def bind_stream_plan(self, mapping, indices=None):
if indices is None:
indices = range(len(self.stream_plan))
return [self.stream_plan[index].remap_inputs(mapping) for index in indices]
#def __repr__(self):
# return repr(self.action_plan)
##################################################
# TODO: only branch on new bindings caused by new stream outputs
# TODO: delete a binding if a stream is exhausted
# TODO: evaluate all stream instances in queue below a target effort
Priority = namedtuple('Priority', ['attempted', 'effort'])
class Binding(object):
# TODO: maintain a tree instead. Propagate the best subtree upwards
def __init__(self, queue, skeleton, stream_indices, stream_attempts,
bound_results, bindings, cost):
self.queue = queue
self.skeleton = skeleton
assert len(stream_indices) == len(stream_attempts)
self.stream_indices = list(stream_indices)
self.stream_attempts = list(stream_attempts)
self.bound_results = bound_results
self.bindings = bindings
self.cost = cost
self.children = []
self.enumerated = False # What if result enumerated with zero calls?
self._remaining_results = None
# Maybe just reset the indices for anything that isn't applicable
# n+1 sample represented
# TODO: store partial orders
# TODO: store applied results
# TODO: the problem is that I'm not actually doing all combinations because I'm passing attempted
@property
def attempts_from_index(self):
return get_mapping(self.stream_indices, self.stream_attempts)
@property
def remaining_results(self):
if self._remaining_results is None:
self._remaining_results = self.skeleton.bind_stream_plan(self.bindings, self.stream_indices)
return self._remaining_results
@property
def action_plan(self):
return bind_action_plan(self.skeleton.action_plan, self.bindings)
@property
def result(self):
return self.remaining_results[0]
@property
def index(self):
return self.stream_indices[0]
@property
def attempts(self):
return self.stream_attempts[0]
def is_bound(self):
return not self.stream_indices
def is_dominated(self):
# TODO: what should I do if the cost=inf (from incremental/exhaustive)
return self.queue.store.has_solution() and (self.queue.store.best_cost <= self.cost)
def is_enabled(self):
return not (self.enumerated or self.is_dominated())
def post_order(self):
for child in self.children:
for binding in child.post_order():
yield binding
yield self
def get_priority(self):
# Infinite cost if skeleton is exhausted
# Attempted is equivalent to whether any stream result is disabled
num_attempts = sum(self.stream_attempts)
attempted = num_attempts != 0
# TODO: lexicographic tiebreaking using plan cost and other skeleton properties
return Priority(attempted, (num_attempts, len(self.stream_attempts)))
def get_element(self):
return HeapElement(self.get_priority(), self)
def get_key(self):
# Each stream result is unique (affects hashing)
return self.skeleton, tuple(self.stream_indices), frozenset(self.bindings.items())
def _instantiate(self, index, new_result):
if not new_result.is_successful():
return None # TODO: check if satisfies target certified
opt_result = self.remaining_results[index]
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# self.stream_indices = puncture(self.stream_indices, index)
# self.stream_attempts = puncture(self.stream_attempts, index)
# self.bound_results[self.stream_indices[index]] = new_result
# self.cost = update_cost(self.cost, opt_result, new_result)
# self._remaining_results = puncture(self._remaining_results, index)
# self.queue.disable_binding(self)
# self.queue.new_binding(self)
# return self
bound_results = self.bound_results.copy()
bound_results[self.stream_indices[index]] = new_result
binding = Binding(self.queue, self.skeleton,
puncture(self.stream_indices, index),
puncture(self.stream_attempts, index),
bound_results,
update_bindings(self.bindings, opt_result, new_result),
update_cost(self.cost, opt_result, new_result))
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# binding._remaining_results = puncture(self._remaining_results, index)
if len(binding.stream_indices) < len(self.skeleton.best_binding.stream_indices):
self.skeleton.best_binding = binding
self.children.append(binding)
self.queue.new_binding(binding)
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# # The binding is dominated
# self.enumerated = True
# self.queue.update_enabled(self)
return binding
def update_instances(self):
updated = False
for index, (opt_result, attempt) in enumerate(safe_zip(self.remaining_results, self.stream_attempts)):
if self.enumerated:
return updated
if opt_result.instance.num_calls != attempt:
updated = True
for new_result in opt_result.instance.get_results(start=attempt):
self._instantiate(index, new_result)
self.stream_attempts[index] = opt_result.instance.num_calls
self.enumerated |= opt_result.instance.enumerated
return updated
def __repr__(self):
#return '{}({})'.format(self.__class__.__name__, str_from_object(self.remaining_stream_plan))
#return '{}({})'.format(self.__class__.__name__, str_from_object(self.action_plan))
return '{}(skeleton={}, remaining={})'.format(
self.__class__.__name__, self.skeleton.index, self.stream_indices) #str_from_object(self.attempts_from_index))
##################################################
class SkeletonQueue(Sized):
# TODO: handle this in a partially ordered way
# TODO: alternatively store just preimage and reachieve
# TODO: make an "action" for returning to the search (if it is the best decision)
# TODO: could just maintain a list of active instances and sample/propagate
# TODO: store bindings in a factored form that only combines when needed
# TODO: update bindings given outcomes of eager streams
# TODO: immediately evaluate eager streams in the queue
def __init__(self, store, domain, disable=True):
self.store = store
self.evaluations = store.evaluations
self.domain = domain
self.skeletons = []
self.queue = []
self.binding_from_key = {}
self.bindings_from_instance = {}
self.enabled_bindings = set()
self.disable = disable
####################
def _flush_stale(self):
while self.queue:
queue_priority, binding = self.queue[0]
current_priority = binding.get_priority()
if queue_priority == current_priority:
return
heapreplace(self.queue, binding.get_element())
####################
#def _reenable_stream_plan(self, stream_plan):
# # TODO: only disable if not used elsewhere
# # TODO: could just hash instances
# # TODO: do I actually need to reenable? Yes it ensures that
# # TODO: check if the index is the only one being sampled
# # for result in stream_plan:
# # result.instance.disabled = False
# stream_plan[0].instance.enable(self.evaluations, self.domain)
# # TODO: move functions as far forward as possible to prune these plans
# # TODO: make function evaluations low success as soon as finite cost
# Maybe the reason repeat skeletons are happening is that the currently active thing is disabled
# But another one on the plan isn't
# Could scan the whole queue each time a solution is found
def update_enabled(self, binding):
if not binding.is_enabled() and (binding in self.enabled_bindings):
self.disable_binding(binding)
def is_enabled(self, binding):
self.update_enabled(binding)
return binding in self.enabled_bindings
def enable_binding(self, binding):
assert binding not in self.enabled_bindings
self.enabled_bindings.add(binding)
for result in binding.remaining_results:
instance = result.instance
if instance not in self.bindings_from_instance:
self.bindings_from_instance[instance] = set()
self.bindings_from_instance[instance].add(binding)
def disable_binding(self, binding):
assert binding in self.enabled_bindings
self.enabled_bindings.remove(binding)
for result in binding.remaining_results:
instance = result.instance
if instance in self.bindings_from_instance:
if binding in self.bindings_from_instance[instance]:
self.bindings_from_instance[instance].remove(binding)
if not self.bindings_from_instance[instance]:
del self.bindings_from_instance[instance]
####################
def new_binding(self, binding):
key = binding.get_key()
if key in self.binding_from_key:
print('Binding already visited!') # Could happen if binding is the same
#return
self.binding_from_key[key] = binding
if not binding.is_enabled():
return
if not binding.stream_indices:
# if is_solution(self.domain, self.evaluations, bound_plan, self.goal_expression):
self.store.add_plan(binding.action_plan, binding.cost)
# TODO: could update active for all items in a queue fashion
return
binding.update_instances()
if binding.is_enabled():
self.enable_binding(binding)
heappush(self.queue, binding.get_element())
def new_skeleton(self, stream_plan, action_plan, cost):
skeleton = Skeleton(self, stream_plan, action_plan, cost)
self.new_binding(skeleton.root)
####################
def _generate_results(self, instance):
# assert(instance.opt_index == 0)
if not is_instance_ready(self.evaluations, instance):
raise RuntimeError(instance)
new_results, _ = process_instance(self.store, self.domain, instance, disable=self.disable)
is_new = bool(new_results)
for i, binding in enumerate(list(self.bindings_from_instance[instance])):
#print(i, binding)
# Maybe this list grows but not all the things are accounted for
if self.is_enabled(binding):
binding.update_instances()
self.update_enabled(binding)
#print()
return is_new
def _process_root(self):
is_new = False
self._flush_stale()
_, binding = heappop(self.queue)
if not self.is_enabled(binding):
return is_new
assert not binding.update_instances() #self.update_enabled(binding)
is_new = self._generate_results(binding.result.instance)
# _decompose_synthesizer_skeleton(queue, skeleton, stream_index)
if self.is_enabled(binding):
heappush(self.queue, binding.get_element())
return is_new
####################
def is_active(self):
return self.queue and (not self.store.is_terminated())
def greedily_process(self):
while self.is_active():
self._flush_stale()
key, _ = self.queue[0]
if key.attempted:
break
self._process_root()
def process_until_new(self):
# TODO: process the entire queue once instead
is_new = False
while self.is_active() and (not is_new):
is_new |= self._process_root()
self.greedily_process()
return is_new
def timed_process(self, max_time):
start_time = time.time()
while self.is_active() and (elapsed_time(start_time) <= max_time):
self._process_root()
self.greedily_process()
# TODO: print cost updates when progress with a new skeleton
def accelerate_best_bindings(self):
# TODO: reset the values for old streams
for skeleton in self.skeletons:
for _, result in sorted(skeleton.best_binding.bound_results.items(), key=itemgetter(0)):
# TODO: just accelerate the facts within the plan preimage
result.call_index = 0 # Pretends the fact was first
new_complexity = result.compute_complexity(self.evaluations)
for fact in result.get_certified():
evaluation = evaluation_from_fact(fact)
if new_complexity < self.evaluations[evaluation].complexity:
self.evaluations[evaluation] = EvaluationNode(new_complexity, result)
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0):
# TODO: manually add stream_plans for synthesizers/optimizers
start_time = time.time()
if is_plan(stream_plan):
#print([result for result in stream_plan if result.optimistic])
#raw_input('New skeleton')
self.new_skeleton(stream_plan, action_plan, cost)
self.greedily_process()
elif stream_plan is INFEASIBLE:
# TODO: use complexity_limit
self.process_until_new()
self.timed_process(max_time - elapsed_time(start_time))
self.accelerate_best_bindings()
#print(len(self.queue), len(self.skeletons),
# len(self.bindings_from_instance), len(self.binding_from_key))
# Only currently blocking streams with after called
# Can always process streams with a certain complexity
# Temporarily pop off the queue and then re-add
# Domination occurs when no downstream skeleton that
# Is it worth even doing the dynamic instantiation?
# If some set fails where the output is an input
# Scale input
def __len__(self):
return len(self.queue)
##################################################
# from hsr_tamp.pddlstream.language.synthesizer import SynthStreamResult
# def _decompose_synthesizer_skeleton(queue, skeleton, index):
# stream_plan, plan_attempts, bindings, plan_index, cost = skeleton
# opt_result = stream_plan[index]
# if (plan_attempts[index] == 0) and isinstance(opt_result, SynthStreamResult):
# # TODO: only decompose if failure?
# decomposition = opt_result.decompose()
# new_stream_plan = stream_plan[:index] + decomposition + stream_plan[index+1:]
# new_plan_attempts = plan_attempts[:index] + [0]*len(decomposition) + plan_attempts[index+1:]
# queue.new_binding(new_stream_plan, new_plan_attempts, bindings, plan_index, cost)
##################################################
# TODO: want to minimize number of new sequences as they induce overhead
# TODO: estimate how many times a stream needs to be queried (acceleration)
#
# def compute_sampling_cost(stream_plan, stats_fn=get_stream_stats):
# # TODO: we are in a POMDP. If not the case, then the geometric cost policy is optimal
# if stream_plan is None:
# return INF
# expected_cost = 0
# for result in reversed(stream_plan):
# p_success, overhead = stats_fn(result)
# expected_cost += geometric_cost(overhead, p_success)
# return expected_cost
# # TODO: mix between geometric likelihood and learned distribution
# # Sum tail distribution for number of future
# # Distribution on the number of future attempts until successful
# # Average the tail probability mass
#
# def compute_belief(attempts, p_obs):
# return pow(p_obs, attempts)
#
# def compute_success_score(plan_attempts, p_obs=.9):
# beliefs = [compute_belief(attempts, p_obs) for attempts in plan_attempts]
# prior = 1.
# for belief in beliefs:
# prior *= belief
# return -prior
#
# def compute_geometric_score(plan_attempts, overhead=1, p_obs=.9):
# # TODO: model the decrease in belief upon each failure
# # TODO: what if stream terminates? Assign high cost
# expected_cost = 0
# for attempts in plan_attempts:
# p_success = compute_belief(attempts, p_obs)
# expected_cost += geometric_cost(overhead, p_success)
# return expected_cost
##################################################
# from hsr_tamp.pddlstream.algorithms.downward import task_from_domain_problem, get_problem, get_action_instances, \
# get_goal_instance, plan_preimage, is_valid_plan, substitute_derived, is_applicable, apply_action
# from hsr_tamp.pddlstream.algorithms.reorder import replace_derived
# from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axiom_plan
# def is_solution(domain, evaluations, action_plan, goal_expression):
# task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs=True))
# action_instances = get_action_instances(task, action_plan) + [get_goal_instance(task.goal)]
# #original_init = task.init
# task.init = set(task.init)
# for instance in action_instances:
# axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}, static_state=task.init)
# if axiom_plan is None:
# return False
# #substitute_derived(axiom_plan, instance)
# #if not is_applicable(task.init, instance):
# # return False
# apply_action(task.init, instance)
# return True
# #replace_derived(task, set(), plan_instances)
# #preimage = plan_preimage(plan_instances, [])
# #return is_valid_plan(original_init, action_instances) #, task.goal)
| 19,908 |
Python
| 44.145125 | 122 | 0.631605 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/satisfaction.py
|
from __future__ import print_function
from collections import namedtuple
from hsr_tamp.pddlstream.algorithms.meta import solve
from hsr_tamp.pddlstream.algorithms.satisfaction import SatisfactionSolution
from hsr_tamp.pddlstream.algorithms.constraints import to_constant, ORDER_PREDICATE, ASSIGNED_PREDICATE, \
get_internal_prefix
from hsr_tamp.pddlstream.algorithms.downward import make_action, make_domain, make_predicate
from hsr_tamp.pddlstream.language.constants import is_parameter, Not, PDDLProblem, MINIMIZE, NOT, partition_facts, get_costs, \
get_constraints
from hsr_tamp.pddlstream.language.conversion import get_prefix, get_args, obj_from_value_expression
from hsr_tamp.pddlstream.utils import safe_zip
Cluster = namedtuple('Cluster', ['constraints', 'parameters'])
def get_parameters(expression):
head = get_prefix(expression)
if head in [NOT, MINIMIZE]:
return get_parameters(get_args(expression)[0])
return list(filter(is_parameter, get_args(expression)))
def update_cluster(cluster1, cluster2):
assert cluster2.parameters <= cluster1.parameters
cluster1.constraints.extend(cluster2.constraints)
cluster1.parameters.update(cluster2.parameters)
def cluster_constraints(terms):
# Can always combine clusters but leads to inefficient grounding
# The extreme case of this making a single goal
# Alternatively, can just keep each cluster separate (shouldn't slow down search much)
clusters = sorted([Cluster([constraint], set(get_parameters(constraint)))
for constraint in get_constraints(terms)],
key=lambda c: len(c.parameters), reverse=True)
cost_clusters = sorted([Cluster([cost], set(get_parameters(cost)))
for cost in get_costs(terms)],
key=lambda c: len(c.parameters))
for c1 in cost_clusters:
for c2 in reversed(clusters):
if 1 < len(get_costs(c1.constraints)) + len(get_costs(c2.constraints)):
continue
if c1.parameters <= c2.parameters:
update_cluster(c2, c1)
break
else:
# TODO: extend this to allow the intersection to cover the cluster
raise RuntimeError('Unable to find a cluster for cost term:', c1.constraints[0])
for i in reversed(range(len(clusters))):
c1 = clusters[i]
for j in reversed(range(i)):
c2 = clusters[j]
if 1 < len(get_costs(c1.constraints)) + len(get_costs(c2.constraints)):
continue
if c1.parameters <= c2.parameters:
update_cluster(c2, c1)
clusters.pop(i)
break
return clusters
##################################################
def planning_from_satisfaction(init, constraints):
clusters = cluster_constraints(constraints)
prefix = get_internal_prefix(internal=False)
assigned_predicate = ASSIGNED_PREDICATE.format(prefix)
order_predicate = ORDER_PREDICATE.format(prefix)
#order_value_facts = make_order_facts(order_predicate, 0, len(clusters)+1)
order_value_facts = [(order_predicate, '_t{}'.format(i)) for i in range(len(clusters)+1)]
init.append(order_value_facts[0])
goal_expression = order_value_facts[-1]
order_facts = list(map(obj_from_value_expression, order_value_facts))
bound_parameters = set()
actions = []
#constants = {}
for i, cluster in enumerate(clusters):
objectives = list(map(obj_from_value_expression, cluster.constraints))
constraints, negated, costs = partition_facts(objectives)
if negated:
raise NotImplementedError(negated)
#free_parameters = cluster.parameters - bound_parameters
existing_parameters = cluster.parameters & bound_parameters
# TODO: confirm that negated predicates work as intended
name = 'cluster-{}'.format(i)
parameters = list(sorted(cluster.parameters))
preconditions = [(assigned_predicate, to_constant(p), p) for p in sorted(existing_parameters)] + \
constraints + [order_facts[i]]
effects = [(assigned_predicate, to_constant(p), p) for p in parameters] + \
[order_facts[i+1], Not(order_facts[i])]
if costs:
assert len(costs) == 1
[cost] = costs
else:
cost = None
actions.append(make_action(name, parameters, preconditions, effects, cost))
#actions[-1].dump()
bound_parameters.update(cluster.parameters)
predicates = [make_predicate(order_predicate, ['?step'])] # '?num',
domain = make_domain(predicates=predicates, actions=actions)
return domain, goal_expression
##################################################
def pddl_from_csp(stream_pddl, stream_map, init, constraints):
domain, goal = planning_from_satisfaction(init, constraints)
constant_map = {}
return PDDLProblem(domain, constant_map, stream_pddl, stream_map, init, goal)
def bindings_from_plan(problem, plan):
if plan is None:
return None
domain = problem[0]
bindings = {}
for action, (name, args) in safe_zip(domain.actions, plan):
assert action.name == name
for param, arg in safe_zip(action.parameters, args):
name = param.name
assert bindings.get(name, arg) is arg
bindings[name] = arg
return bindings
##################################################
def solve_pddlstream_satisfaction(problem, **kwargs):
# TODO: prune set of streams based on constraints
# TODO: investigate constraint satisfaction techniques for search instead
# TODO: optimistic objects based on free parameters that prevent cycles
# TODO: disallow creation of new parameters / certifying new facts
stream_pddl, stream_map, init, constraints = problem
problem = pddl_from_csp(stream_pddl, stream_map, init, constraints)
plan, cost, facts = solve(problem, **kwargs)
bindings = bindings_from_plan(problem, plan)
return SatisfactionSolution(bindings, cost, facts)
| 6,114 |
Python
| 42.678571 | 127 | 0.651783 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/successor_generator.py
|
from collections import defaultdict, deque
from hsr_tamp.pddlstream.algorithms.downward import literal_holds, get_derived_predicates, apply_action
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axioms
class SuccessorNode(object):
def __init__(self, depth=0):
self.depth = depth
self.children = {}
self.instances = []
def get_child(self, value):
if value not in self.children:
self.children[value] = SuccessorNode(depth=self.depth + 1)
return self.children[value]
def get_successors(self, atom_order, state):
if len(atom_order) <= self.depth:
return self.instances
atom = atom_order[self.depth]
instances = []
for value, node in self.children.items():
if (value is None) or (literal_holds(state, atom) is value):
instances.extend(node.get_successors(atom_order, state))
return instances
def get_fluents(init, action_instances):
fluents = set()
for action in action_instances: # TODO: just actions if no action_instances
for cond, eff in action.add_effects:
assert not cond
if not literal_holds(init, eff):
fluents.add(eff)
for cond, eff in action.del_effects:
assert not cond
if not literal_holds(init, eff.negate()):
fluents.add(eff)
return fluents
class SuccessorGenerator(object):
def __init__(self, instantiated, action_instances=[]):
derived_predicates = get_derived_predicates(instantiated.task.axioms)
conditions = {literal.positive() for axiom in instantiated.axioms for literal in axiom.condition}
state = set(instantiated.task.init)
fluents = get_fluents(state, action_instances) & conditions
self.fluent_order = list(fluents)
applicable_axioms = []
axiom_from_literal = defaultdict(list)
# TODO: could also just use get_achieving_axioms
self.root = SuccessorNode()
for axiom in instantiated.axioms:
if all((l.predicate in derived_predicates) or (l.positive() in fluents) or
literal_holds(state, l) for l in axiom.condition):
applicable_axioms.append(axiom)
for literal in axiom.condition:
if literal in fluents:
axiom_from_literal[literal].append(axiom)
fluent_conds = {l.positive(): not l.negated for l in axiom.condition}
node = self.root
for atom in self.fluent_order:
value = fluent_conds.get(atom, None)
node = node.get_child(value)
node.instances.append(axiom)
def get_successors(self, state):
return self.root.get_successors(self.fluent_order, state)
##################################################
def mark_axiom(queue, remaining_from_axiom, axiom, axiom_from_atom):
if not remaining_from_axiom[id(axiom)]:
axiom_from_atom[axiom.effect].append(axiom)
queue.append(axiom.effect)
def mark_iteration(state, axioms_from_literal, fluents_from_axiom, remaining_from_axiom, static_axioms):
axioms_from_atom = defaultdict(list)
for literal in axioms_from_literal:
if literal_holds(state, literal):
axioms_from_atom[literal].append(None)
queue = deque(axioms_from_atom.keys())
for axiom in static_axioms:
mark_axiom(queue, remaining_from_axiom, axiom, axioms_from_atom)
while queue:
literal = queue.popleft()
for axiom in axioms_from_literal[literal]:
remaining_from_axiom[id(axiom)] -= 1
mark_axiom(queue, remaining_from_axiom, axiom, axioms_from_atom)
for literal, axioms in axioms_from_atom.items():
for axiom in axioms:
if axiom is not None:
remaining_from_axiom[id(axiom)] = fluents_from_axiom[id(axiom)]
# TODO: still some overhead here
# TODO: could process these layer by layer instead
return {atom: axioms[0] for atom, axioms in axioms_from_atom.items()}
def recover_axioms_plans2(instantiated, action_instances):
#import axiom_rules
#with Verbose(False):
# normalized_axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(
# [], instantiated.axioms, instantiated.goal_list)
#state = set(instantiated.task.init + axiom_init)
normalized_axioms = instantiated.axioms # TODO: ignoring negated because cannot reinstantiate correctly
state = set(instantiated.task.init)
fluents = get_fluents(state, action_instances)
unprocessed_from_atom = defaultdict(list)
fluents_from_axiom = {}
remaining_from_axiom = {}
for axiom in normalized_axioms:
fluent_conditions = []
for literal in axiom.condition:
if literal.positive() in fluents:
fluent_conditions.append(literal)
elif not literal_holds(state, literal):
fluent_conditions = None
break
if fluent_conditions is None:
continue
for literal in fluent_conditions:
unprocessed_from_atom[literal].append(axiom)
fluents_from_axiom[id(axiom)] = len(fluent_conditions)
remaining_from_axiom[id(axiom)] = fluents_from_axiom[id(axiom)]
static_axioms = [axiom for axiom, num in fluents_from_axiom.items() if num == 0]
axiom_plans = []
for action in action_instances + [get_goal_instance(instantiated.task.goal)]:
axiom_from_atom = mark_iteration(state, unprocessed_from_atom,
fluents_from_axiom, remaining_from_axiom, static_axioms)
preimage = []
for literal in action.precondition:
if not literal_holds(state, literal):
preimage.append(literal)
assert literal in axiom_from_atom
for cond, eff in (action.add_effects + action.del_effects):
# TODO: add conditional effects that must hold here
assert not cond
axiom_plans.append([])
assert extract_axioms(axiom_from_atom, preimage, axiom_plans[-1])
apply_action(state, action)
return axiom_plans
| 6,339 |
Python
| 43.335664 | 107 | 0.631961 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/execution.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.utils import INF
class ActionInfo(object):
def __init__(self, terminal=False, p_success=None, overhead=None):
"""
:param terminal: Indicates the action may require replanning after use
"""
self.terminal = terminal # TODO: infer from p_success?
if self.terminal:
self.p_success, self.overhead = 1e-3, 0
else:
self.p_success, self.overhead = 1, INF
if p_success is not None:
self.p_success = p_success
if overhead is not None:
self.overhead = overhead
# TODO: should overhead just be cost here then?
def get_action_info(action_info):
action_execution = defaultdict(ActionInfo)
for name, info in action_info.items():
action_execution[name] = info
return action_execution
| 877 |
Python
| 31.518517 | 78 | 0.63626 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/reorder_actions.py
|
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, substitute_derived, is_applicable, apply_action, \
fd_from_evaluation, task_from_domain_problem, get_problem, get_action_instances
from hsr_tamp.pddlstream.algorithms.reorder import separate_plan, get_stream_stats, dynamic_programming
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axioms
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_achieving_axioms
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, EQ, is_plan, And
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.function import PredicateResult
from hsr_tamp.pddlstream.utils import Verbose, MockSet, neighbors_from_orders
# Extract streams required to do one action
# Compute streams that strongly depend on these. Evaluate these.
# Execute the full prefix of the plan
# Make the first action cheaper if uses something that doesn't need to re-expand
# How to do this with shared objects?
# Just do the same thing but make the cost 1 if a shared object
def get_stream_instances(stream_plan):
import pddl
# TODO: something that inverts the negative items
stream_instances = [] # TODO: could even apply these to the state directly
for result in stream_plan:
name = result.instance.external.name
precondition = list(map(fd_from_fact, result.instance.get_domain()))
effects = [([], fd_from_fact(fact)) for fact in result.get_certified() if get_prefix(fact) != EQ]
cost = None # TODO: effort?
instance = pddl.PropositionalAction(name, precondition, effects, cost)
stream_instances.append(instance)
return stream_instances
def instantiate_axioms(model, static_facts, fluent_facts, axiom_remap={}):
import pddl
instantiated_axioms = []
for atom in model:
if isinstance(atom.predicate, pddl.Axiom):
axiom = axiom_remap.get(atom.predicate, atom.predicate)
variable_mapping = dict([(par.name, arg)
for par, arg in zip(axiom.parameters, atom.args)])
inst_axiom = axiom.instantiate(variable_mapping, static_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
return instantiated_axioms
def replace_derived(task, negative_init, action_instances):
import pddl_to_prolog
import build_model
import axiom_rules
import pddl
original_actions = task.actions
original_init = task.init
task.actions = []
function_assignments = {f.fluent: f.expression for f in task.init
if isinstance(f, pddl.f_expression.FunctionAssignment)}
task.init = (set(task.init) | {a.negate() for a in negative_init}) - set(function_assignments)
for instance in action_instances:
#axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}) # TODO: refactor this
# TODO: just instantiate task?
with Verbose(False):
model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init
# fluent_facts = instantiate.get_fluent_facts(task, model)
fluent_facts = MockSet()
instantiated_axioms = instantiate_axioms(model, task.init, fluent_facts)
goal_list = [] # TODO: include the goal?
with Verbose(False): # TODO: helpful_axioms prunes axioms that are already true (e.g. not Unsafe)
helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms([instance], instantiated_axioms, goal_list)
axiom_from_atom, _ = get_achieving_axioms(task.init | negative_init | set(axiom_init), helpful_axioms)
# negated_from_name=negated_from_name)
axiom_plan = []
extract_axioms(axiom_from_atom, instance.precondition, axiom_plan)
substitute_derived(axiom_plan, instance)
assert(is_applicable(task.init, instance))
apply_action(task.init, instance)
task.actions = original_actions
task.init = original_init
def get_combined_orders(evaluations, stream_plan, action_plan, domain):
if not is_plan(action_plan):
return action_plan
# TODO: could just do this within relaxed
# TODO: do I want to strip the fluents and just do the partial ordering?
stream_instances = get_stream_instances(stream_plan)
negative_results = filter(lambda r: isinstance(r, PredicateResult) and (r.value == False), stream_plan)
negative_init = set(fd_from_evaluation(evaluation_from_fact(f))
for r in negative_results for f in r.get_certified())
#negated_from_name = {r.instance.external.name for r in negative_results}
opt_evaluations = evaluations_from_stream_plan(evaluations, stream_plan)
goal_expression = And()
task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs=True))
action_instances = get_action_instances(task, action_plan)
replace_derived(task, negative_init, action_instances)
#combined_instances = stream_instances + action_instances
orders = set()
for i, a1 in enumerate(action_plan):
for a2 in action_plan[i+1:]:
orders.add((a1, a2))
# TODO: just store first achiever here
for i, instance1 in enumerate(stream_instances):
for j in range(i+1, len(stream_instances)):
effects = {e for _, e in instance1.add_effects}
if effects & set(stream_instances[j].precondition):
orders.add((stream_plan[i], stream_plan[j]))
for i, instance1 in enumerate(stream_instances):
for j, instance2 in enumerate(action_instances):
effects = {e for _, e in instance1.add_effects} | \
{e.negate() for _, e in instance1.del_effects}
if effects & set(instance2.precondition):
orders.add((stream_plan[i], action_plan[j]))
return orders
##################################################
def reorder_combined_plan(evaluations, combined_plan, action_info, domain, **kwargs):
# TODO: actions as a weak constraint
# TODO: actions are extremely unlikely to work
# TODO: can give actions extreme priority
if not is_plan(combined_plan):
return combined_plan
stream_plan, action_plan = separate_plan(combined_plan)
orders = get_combined_orders(evaluations, stream_plan, action_plan, domain)
_, out_orders = neighbors_from_orders(orders)
valid_combine = lambda v, subset: out_orders[v] <= subset
def stats_fn(operator):
if isinstance(operator, Result):
return get_stream_stats(operator)
name, _ = operator
info = action_info[name]
return info.p_success, info.overhead
return dynamic_programming(combined_plan, valid_combine, stats_fn, **kwargs)
##################################################
# def partial_ordered(plan):
# # https://www.aaai.org/ocs/index.php/ICAPS/ICAPS10/paper/viewFile/1420/1539
# # http://repository.cmu.edu/cgi/viewcontent.cgi?article=1349&context=compsci
# # https://arxiv.org/pdf/1105.5441.pdf
# # https://pdfs.semanticscholar.org/e057/e330249f447c2f065cf50db9dfaddad16aaa.pdf
# # https://github.mit.edu/caelan/PAL/blob/master/src/search/post_processing.cc
#
# instances = instantiate_plan(plan)
# orders = set()
# primary_effects = set() # TODO: start and goal operators here?
# for i in reversed(xrange(len(instances))):
# for pre in instances[i].preconditions:
# for j in reversed(xrange(i)):
# #if pre in instances[j].effects:
# if any(eff == pre for eff in instances[j].effects):
# orders.add((j, i))
# primary_effects.add((j, pre))
# break
# for eff in instances[i].effects:
# for j in xrange(i):
# if any((pre.head == eff.head) and (pre.value != eff.value) for pre in instances[j].preconditions):
# orders.add((j, i))
# if (i, eff) in primary_effects:
# for j in xrange(i):
# if any((eff2.head == eff.head) and (eff2.value != eff.value) for eff2 in instances[j].effects):
# orders.add((j, i))
# # TODO: could remove transitive
# # TODO: this isn't so helpful because it will choose arbitrary streams until an action is feasible (i.e. not intelligent ones)
# for i, (action, args) in enumerate(plan):
# print i, action, args #, instances[i].preconditions, instances[i].effects
# print orders
# print primary_effects
# print topological_sort(range(len(plan)), orders, lambda v: hasattr(plan[v][0], 'stream'))
| 8,909 |
Python
| 50.50289 | 132 | 0.662252 |
tanaydimri/omni.demo.ui/demo/ui/__init__.py
|
from .scripts.main_ui import *
| 30 |
Python
| 29.99997 | 30 | 0.766667 |
tanaydimri/omni.demo.ui/demo/ui/scripts/main_ui.py
|
import omni.ext
import omni.ui as ui
from demo.core import LetsPrint
from pathlib import Path
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class DemoUi(omni.ext.IExt):
def __init__(self):
self._demoWindow = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.epgCustomUi] The epgTestUi on startup callback..")
self.all_projects = None
self.__currentPath = Path(__file__).parent
self.__buttonIconPath = self.__currentPath.parent.parent.parent
self._default_project_image = "{0}/data/extension_preview_image.png".format(self.__buttonIconPath)
# Instantiating all the objects we might need.
# Here we are just instantiating a class to help print when the button is pressed
self._letsPrint = LetsPrint()
self._window = self._create_window()
def on_shutdown(self):
print("[omni.epgCustomUi] The epgTestUi on shutdown callback..")
self.destroy()
def destroy(self):
self._visiblity_changed_listener = None
self._demoWindow = None
def set_visible(self, value):
self._demoWindow.visible = value
def onButtonClick(self):
self._letsPrint.delegatedPrint()
def _create_window(self):
# IMPORTANT: Remember to pass in the "flags=ui.WINDOW_FLAGS_MENU_BAR" kwarg if you want to display Menu Items
if self._demoWindow is None:
self._demoWindow = ui.Window("Variant Manager", width=800, height=500, padding_x=10, padding_y=10, flags=ui.WINDOW_FLAGS_MENU_BAR)
self.set_visible(True)
else:
self.set_visible(True)
# This is how you add Menus to your UI.
with self._demoWindow.menu_bar:
with ui.Menu("File"):
ui.MenuItem("New")
ui.MenuItem("Open")
with ui.Menu("Open Recent"):
ui.MenuItem("myAwesomeScene.usd")
ui.MenuItem("anotherAwesomeScene.usd")
ui.MenuItem("yetAnotherAwesomeScene.usd")
ui.MenuItem("Save")
with self._demoWindow.frame:
demoLabel = ui.Label("EPIGRAPH PROJECTS", height=30, style={"font_size": 50, "color": 0xFF000000})
with ui.HStack():
demoButton = ui.Button("I will print Something", image_url=self._default_project_image, clicked_fn=self.onButtonClick)
| 2,764 |
Python
| 41.538461 | 142 | 0.641823 |
tanaydimri/omni.demo.ui/demo/core/__init__.py
|
from .scripts.letsPrint import *
| 32 |
Python
| 31.999968 | 32 | 0.8125 |
tanaydimri/omni.demo.ui/demo/core/scripts/letsPrint.py
|
import omni.ext
# Classes inheriting from "omni.ext.IExt" would be auto instantiated at runtime
# and the on_startup would be called. Besides, on_shutdown will be called while
# disabling this extension from the Extensions menu
class LetsPrint(omni.ext.IExt):
def __init__(self):
self.printer = Printer()
def on_startup(self):
print("Starting Up [epg.browser]")
def on_shutdown(self):
print("Shuting Down [epg.browser]")
def delegatedPrint(self):
self.printer.printSomething()
class Printer():
def __init__(self):
print("Printer Initialized")
def printSomething(self):
print("PRINTING SOMETHING NOW!!!")
| 630 |
Python
| 23.26923 | 80 | 0.726984 |
tanaydimri/omni.demo.ui/docs/CHANGELOG.md
|
Write all the Changelogs for this extension here:
----
v0.0.0 (Demo Release)
- Added the Button to UI which prints something.
- I hope this helps you to understand how Omniverse UI extensions could be built :)
| 210 |
Markdown
| 34.166661 | 83 | 0.757143 |
tanaydimri/omni.demo.ui/docs/README.md
|
**[NOT OFFICIALLY FROM NVIDIA]**
This is a demo extension with UI to understand how Omniverse Extension are structured.
To run this demo extension, put this in one of your Omniverse extension search paths. I prefere to put it under: "..\Documents\Kit\shared\exts" Then search for "demo" in your extensions tab, in any of the Omniverse applications.
**Some important points to notice:**
- The core functionality of the extension lives in demo/core. Here an init file in responsible for "collecting" all the core modules to be important by other modules.
- Likewise, the UI related code lives under demo/ui
- All the classes inheriting from "omni.ext.IExt" will be instantiated at the time when an extension is enabled and will automatically call the on_startup method. Besides, it will also call on_shutdown on extension disable.
- Be sure to read about Omni UI styling. There is a great documentation in your Omni Kit app, under Omni::UI Doc.
---
Hope this helps you with building your own extensions :)
- Stay Safe!!
| 1,022 |
Markdown
| 62.937496 | 228 | 0.774951 |
DataJuggler/DataJuggler.CameraKeys/README.md
|
Camera Keys is an Omniverse Python extension to move the camera around, and set all 6 camera keyframes at the current frame (Transform x,y,z and Rotation x,y,z).
<img src=https://github.com/DataJuggler/SharedRepo/blob/master/Shared/Images/CameraKeysExtension.png width=460 height=320>
To use this extension, you must have a camera named Camera at '/World/Camera'. Future versions may work with multiple cameras.
The extension is designed to help you easiily create camera fly throughs. Move your camera into position either manually, or using the buttons shown above and the transoform keys are set at the current frame.
Thanks to @mati-codes for writing the camera move forward and helping me with many questions.
Change the frame in the timeline to the desired frame, then move the camera into place and click 'Set Keys' to set the keyframes.
Use the slider amounts for rotation and movement amount to set how far the camera will move on any of the button clicks.
| 982 |
Markdown
| 41.739129 | 208 | 0.789206 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/datajuggler/camerakeys/extension.py
|
import omni.ext
import omni.ui as ui
import omni.timeline
import math
from omni.kit.viewport.utility import get_active_viewport
from pxr import Sdf, Usd, UsdGeom, Gf
import omni.usd
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class DatajugglerCamerakeysExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[datajuggler.camerakeys] datajuggler camerakeys startup")
# Get the stage
stage = omni.usd.get_context().get_stage()
active_viewport = get_active_viewport()
if active_viewport:
# Pull meaningful information from the Viewport to frame a specific prim
time = active_viewport.time
camera_path = active_viewport.camera_path
else:
# Otherwise, create a camera that will be used to frame the prim_to_frame
camera_path = "/World/Camera"
UsdGeom.Camera.Define(stage, camera_path)
print(camera_path)
# Start at 100
self._MovementValue = 100
self._RotationValue = 5
self._label = None
self._window = ui.Window("Camera Keys", width=600, height=660)
with self._window.frame:
with ui.VStack():
label = ui.Label("Make sure your project has a camera named Camera at the World level '/World/Camera'")
self._label = label
def get_local_rot(prim: Usd.Prim):
return prim.GetAttribute("xformOp:rotateXYZ").Get()
def decompose_matrix(mat: Gf.Matrix4d):
reversed_ident_mtx = reversed(Gf.Matrix3d())
translate = mat.ExtractTranslation()
scale = Gf.Vec3d(*(v.GetLength() for v in mat.ExtractRotationMatrix()))
#must remove scaling from mtx before calculating rotations
mat.Orthonormalize()
# without reversed this seems to return angles in ZYX order
rotate = Gf.Vec3d(*reversed(mat.ExtractRotation().Decompose(*reversed_ident_mtx)))
return translate, rotate, scale
def Left_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(-1,0,0,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Forward_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,-1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Back_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Right_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(-1,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def XRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationX = round(rotationX + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(newRotationX, rotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation X = " + str(newRotationX)
def XRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationX = round(rotationX - self._RotationValue,1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(newRotationX, rotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation X = " + str(newRotationX)
def YRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# label.text = "Old Rotation Y = " + str(rotationY)
# calculate the new value
newRotationY = round(rotationY + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, newRotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationY)
def YRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationY = round(rotationY - self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, newRotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationY)
def ZRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationZ = round(rotationZ + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, rotationY, newRotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New RotationZY = " + str(newRotationZ)
def ZRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationZ = round(rotationZ - self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, rotationY, newRotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationZ)
def XAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformX = transformX - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the X Axis to " + str(round(newTransformX, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(newTransformX, transformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def XAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformX = transformX + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the X Axis to " + str(round(newTransformX, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(newTransformX, transformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def YAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformY = transformY + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the Y Axis to " + str(round(newTransformY, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, newTransformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def YAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformY = transformY - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the Y Axis to " + str(round(newTransformY, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, newTransformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def ZAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformZ = transformZ - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the Z Axis to " + str(round(newTransformZ, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, transformY, newTransformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def ZAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformZ = transformZ + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the Z Axis to " + str(round(newTransformZ, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, transformY, newTransformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def SetKeys_Click():
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|x'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|y'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|z'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|x'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|y'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|z'])
timeline = omni.timeline.get_timeline_interface()
time = timeline.get_current_time()
fps = timeline.get_time_codes_per_seconds()
frame = time * fps
label.text = "6 Keys Were Set at frame " + str(frame)
# add an IntSlider for translate Strength
ui.Label("Camera Rotation Amount")
self._rotationSlider = ui.IntSlider(min = 1, max = 90, step=5)
self._rotationSlider.model.set_value(5)
self._rotationValue = 5
self._rotationSlider.model.add_value_changed_fn(self._onrotation_value_changed)
with ui.HStack(height=40):
xAxisButtonUp = ui.Button("X +", clicked_fn=XRotateUp_Click)
yAxisButtonUp = ui.Button("Y +", clicked_fn=YRotateUp_Click)
zAxisButtonUp = ui.Button("Z +", clicked_fn=ZRotateUp_Click)
with ui.HStack(height=40):
xAxisButtonDown = ui.Button("X -", clicked_fn=XRotateDown_Click)
yAxisButtonDown = ui.Button("Y -", clicked_fn=YRotateDown_Click)
zAxisButtonDown = ui.Button("Z -", clicked_fn=ZRotateDown_Click)
# add an IntSlider for translate Strength
ui.Label("Camera Movement Amount")
self._movementSlider = ui.IntSlider(min = 10, max = 1000, step=10)
self._movementSlider.model.set_value(100)
self._MovementValue = 100
self._movementSlider.model.add_value_changed_fn(self._on_value_changed)
with ui.HStack(height=54):
leftButton = ui.Button("Left", clicked_fn=Left_Click)
forwardButton = ui.Button("Forward", clicked_fn=Forward_Click)
yAxisButtonUp = ui.Button("Back", clicked_fn=Back_Click)
rightButton = ui.Button("Right", clicked_fn=Right_Click)
with ui.HStack(height=54):
xAxisButtonUp = ui.Button("X +", clicked_fn=XAxisUp_Click)
yAxisButtonUp = ui.Button("Y +", clicked_fn=YAxisUp_Click)
zAxisButtonUp = ui.Button("Z +", clicked_fn=ZAxisUp_Click)
with ui.HStack(height=54):
xAxisButtonDown = ui.Button("X -", clicked_fn=XAxisDown_Click)
yAxisButtonDown = ui.Button("Y -", clicked_fn=YAxisDown_Click)
zAxisButtonDown = ui.Button("Z -", clicked_fn=ZAxisDown_Click)
# with ui.VStack(height=54):
# ui.Label("Shaky Cam Movement Amount - Only Applies To Forward")
# # add an IntSlider for translate Strength
# self._ShakySlider = ui.IntSlider(min = 1, max = 100, step=1)
# self._ShakySlider.model.set_value(0)
# self._ShakyValue = 0
# self._ShakySlider.model.add_value_changed_fn(self._on_shakyvalue_changed)
with ui.VStack(height=40):
ui.Label("")
ui.Label("Change the timeline to the desired frame before clicking the Set Keys button.")
with ui.VStack(height=60):
ui.Button("Set Keys", clicked_fn=SetKeys_Click)
def _on_value_changed(self, model: ui.SimpleIntModel):
self._MovementValue = model.get_value_as_int()
self._label.text = "Camera movement value = " + str(self._MovementValue)
def _onrotation_value_changed(self, model: ui.SimpleIntModel):
self._RotationValue = model.get_value_as_int()
self._label.text = "Camera rotation value = " + str(self._Rotation_Value)
#def _on_shakyvalue_changed(self, model: ui.SimpleIntModel):
# self._ShakyValue = model.get_value_as_int()
# self._label.text = "Camera shaky value = " + str(self._Shaky_Value)
def on_shutdown(self):
print("[datajuggler.camerakeys] datajuggler camerakeys shutdown")
| 32,636 |
Python
| 53.304492 | 119 | 0.527362 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/docs/CHANGELOG.md
|
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.1.1] - 2021-08-01
- I changed how the x+, x-, y+, y-, z+ and z- button click events work, as omni.usd.utils no longer exists in Composer 2023.1.1.
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 331 |
Markdown
| 29.181816 | 128 | 0.676737 |
isaac-sim/IsaacLab/README.md
|

---
# Isaac Lab
[](https://docs.omniverse.nvidia.com/isaacsim/latest/overview.html)
[](https://docs.python.org/3/whatsnew/3.10.html)
[](https://releases.ubuntu.com/20.04/)
[](https://pre-commit.com/)
[](https://isaac-sim.github.io/IsaacLab)
[](https://opensource.org/licenses/BSD-3-Clause)
**Isaac Lab** is a unified and modular framework for robot learning that aims to simplify common workflows
in robotics research (such as RL, learning from demonstrations, and motion planning). It is built upon
[NVIDIA Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/overview.html) to leverage the latest
simulation capabilities for photo-realistic scenes and fast and accurate simulation.
Please refer to our [documentation page](https://isaac-sim.github.io/IsaacLab) to learn more about the
installation steps, features, tutorials, and how to set up your project with Isaac Lab.
## Announcements
* [17.04.2024] [**v0.3.0**](https://github.com/isaac-sim/IsaacLab/releases/tag/v0.3.0):
Several improvements and bug fixes to the framework. Includes cabinet opening and dexterous manipulation environments,
terrain-aware patch sampling, and animation recording.
* [22.12.2023] [**v0.2.0**](https://github.com/isaac-sim/IsaacLab/releases/tag/v0.2.0):
Significant breaking updates to enhance the modularity and user-friendliness of the framework. Also includes
procedural terrain generation, warp-based custom ray-casters, and legged-locomotion environments.
## Contributing to Isaac Lab
We wholeheartedly welcome contributions from the community to make this framework mature and useful for everyone.
These may happen as bug reports, feature requests, or code contributions. For details, please check our
[contribution guidelines](https://isaac-sim.github.io/IsaacLab/source/refs/contributing.html).
## Troubleshooting
Please see the [troubleshooting](https://isaac-sim.github.io/IsaacLab/source/refs/troubleshooting.html) section for
common fixes or [submit an issue](https://github.com/isaac-sim/IsaacLab/issues).
For issues related to Isaac Sim, we recommend checking its [documentation](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html)
or opening a question on its [forums](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/67).
## Support
* Please use GitHub [Discussions](https://github.com/isaac-sim/IsaacLab/discussions) for discussing ideas, asking questions, and requests for new features.
* Github [Issues](https://github.com/isaac-sim/IsaacLab/issues) should only be used to track executable pieces of work with a definite scope and a clear deliverable. These can be fixing bugs, documentation issues, new features, or general updates.
## Acknowledgement
NVIDIA Isaac Sim is available freely under [individual license](https://www.nvidia.com/en-us/omniverse/download/). For more information about its license terms, please check [here](https://docs.omniverse.nvidia.com/app_isaacsim/common/NVIDIA_Omniverse_License_Agreement.html#software-support-supplement).
The Isaac Lab framework is released under [BSD-3 License](LICENSE). The license files of its dependencies and assets are present in the [`docs/licenses`](docs/licenses) directory.
| 3,717 |
Markdown
| 64.228069 | 304 | 0.781544 |
isaac-sim/IsaacLab/tools/tests_to_skip.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# The following tests are skipped by run_tests.py
TESTS_TO_SKIP = [
# lab
"test_argparser_launch.py", # app.close issue
"test_build_simulation_context_nonheadless.py", # headless
"test_env_var_launch.py", # app.close issue
"test_kwarg_launch.py", # app.close issue
"test_differential_ik.py", # Failing
# lab_tasks
"test_data_collector.py", # Failing
"test_record_video.py", # Failing
]
| 556 |
Python
| 29.944443 | 63 | 0.672662 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/pyproject.toml
|
[build-system]
requires = ["setuptools", "wheel", "toml"]
build-backend = "setuptools.build_meta"
| 98 |
TOML
| 23.749994 | 42 | 0.704082 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/test/test_valid_configs.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch the simulator
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
import omni.isaac.lab_assets as lab_assets # noqa: F401
from omni.isaac.lab.assets import AssetBase, AssetBaseCfg
from omni.isaac.lab.sensors import SensorBase, SensorBaseCfg
from omni.isaac.lab.sim import build_simulation_context
class TestValidEntitiesConfigs(unittest.TestCase):
"""Test cases for all registered entities configurations."""
@classmethod
def setUpClass(cls):
# load all registered entities configurations from the module
cls.registered_entities: dict[str, AssetBaseCfg | SensorBaseCfg] = {}
# inspect all classes from the module
for obj_name in dir(lab_assets):
obj = getattr(lab_assets, obj_name)
# store all registered entities configurations
if isinstance(obj, (AssetBaseCfg, SensorBaseCfg)):
cls.registered_entities[obj_name] = obj
# print all existing entities names
print(">>> All registered entities:", list(cls.registered_entities.keys()))
"""
Test fixtures.
"""
def test_asset_configs(self):
"""Check all registered asset configurations."""
# iterate over all registered assets
for asset_name, entity_cfg in self.registered_entities.items():
for device in ("cuda:0", "cpu"):
with self.subTest(asset_name=asset_name, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
# print the asset name
print(f">>> Testing entity {asset_name} on device {device}")
# name the prim path
entity_cfg.prim_path = "/World/asset"
# create the asset / sensors
entity: AssetBase | SensorBase = entity_cfg.class_type(entity_cfg) # type: ignore
# play the sim
sim.reset()
# check asset is initialized successfully
self.assertTrue(entity._is_initialized)
if __name__ == "__main__":
run_tests()
| 2,547 |
Python
| 34.388888 | 106 | 0.628583 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.3"
# Description
title = "Isaac Lab Assets"
description="Extension containing configuration instances of different assets and sensors"
readme = "docs/README.md"
repository = "https://github.com/isaac-sim/IsaacLab"
category = "robotics"
keywords = ["kit", "robotics", "assets", "isaaclab"]
[dependencies]
"omni.isaac.lab" = {}
# Main python module this extension provides.
[[python.module]]
name = "omni.isaac.lab_assets"
| 502 |
TOML
| 25.473683 | 90 | 0.729084 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/unitree.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for Unitree robots.
The following configurations are available:
* :obj:`UNITREE_A1_CFG`: Unitree A1 robot with DC motor model for the legs
* :obj:`UNITREE_GO1_CFG`: Unitree Go1 robot with actuator net model for the legs
* :obj:`UNITREE_GO2_CFG`: Unitree Go2 robot with DC motor model for the legs
* :obj:`H1_CFG`: H1 humanoid robot
Reference: https://github.com/unitreerobotics/unitree_ros
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ActuatorNetMLPCfg, DCMotorCfg, ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration - Actuators.
##
GO1_ACTUATOR_CFG = ActuatorNetMLPCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
network_file=f"{ISAACLAB_NUCLEUS_DIR}/ActuatorNets/Unitree/unitree_go1.pt",
pos_scale=-1.0,
vel_scale=1.0,
torque_scale=1.0,
input_order="pos_vel",
input_idx=[0, 1, 2],
effort_limit=23.7, # taken from spec sheet
velocity_limit=30.0, # taken from spec sheet
saturation_effort=23.7, # same as effort limit
)
"""Configuration of Go1 actuators using MLP model.
Actuator specifications: https://shop.unitree.com/products/go1-motor
This model is taken from: https://github.com/Improbable-AI/walk-these-ways
"""
##
# Configuration
##
UNITREE_A1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/A1/a1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.42),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": DCMotorCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
effort_limit=33.5,
saturation_effort=33.5,
velocity_limit=21.0,
stiffness=25.0,
damping=0.5,
friction=0.0,
),
},
)
"""Configuration of Unitree A1 using DC motor.
Note: Specifications taken from: https://www.trossenrobotics.com/a1-quadruped#specifications
"""
UNITREE_GO1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/Go1/go1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.4),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": GO1_ACTUATOR_CFG,
},
)
"""Configuration of Unitree Go1 using MLP-based actuator model."""
UNITREE_GO2_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/Go2/go2.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.4),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": DCMotorCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
effort_limit=23.5,
saturation_effort=23.5,
velocity_limit=30.0,
stiffness=25.0,
damping=0.5,
friction=0.0,
),
},
)
"""Configuration of Unitree Go2 using DC-Motor actuator model."""
H1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 1.05),
joint_pos={
".*_hip_yaw": 0.0,
".*_hip_roll": 0.0,
".*_hip_pitch": -0.28, # -16 degrees
".*_knee": 0.79, # 45 degrees
".*_ankle": -0.52, # -30 degrees
"torso": 0.0,
".*_shoulder_pitch": 0.28,
".*_shoulder_roll": 0.0,
".*_shoulder_yaw": 0.0,
".*_elbow": 0.52,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"legs": ImplicitActuatorCfg(
joint_names_expr=[".*_hip_yaw", ".*_hip_roll", ".*_hip_pitch", ".*_knee", "torso"],
effort_limit=300,
velocity_limit=100.0,
stiffness={
".*_hip_yaw": 150.0,
".*_hip_roll": 150.0,
".*_hip_pitch": 200.0,
".*_knee": 200.0,
"torso": 200.0,
},
damping={
".*_hip_yaw": 5.0,
".*_hip_roll": 5.0,
".*_hip_pitch": 5.0,
".*_knee": 5.0,
"torso": 5.0,
},
),
"feet": ImplicitActuatorCfg(
joint_names_expr=[".*_ankle"],
effort_limit=100,
velocity_limit=100.0,
stiffness={".*_ankle": 20.0},
damping={".*_ankle": 4.0},
),
"arms": ImplicitActuatorCfg(
joint_names_expr=[".*_shoulder_pitch", ".*_shoulder_roll", ".*_shoulder_yaw", ".*_elbow"],
effort_limit=300,
velocity_limit=100.0,
stiffness={
".*_shoulder_pitch": 40.0,
".*_shoulder_roll": 40.0,
".*_shoulder_yaw": 40.0,
".*_elbow": 40.0,
},
damping={
".*_shoulder_pitch": 10.0,
".*_shoulder_roll": 10.0,
".*_shoulder_yaw": 10.0,
".*_elbow": 10.0,
},
),
},
)
"""Configuration for the Unitree H1 Humanoid robot."""
H1_MINIMAL_CFG = H1_CFG.copy()
H1_MINIMAL_CFG.spawn.usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1_minimal.usd"
"""Configuration for the Unitree H1 Humanoid robot with fewer collision meshes.
This configuration removes most collision meshes to speed up simulation.
"""
| 8,821 |
Python
| 31.91791 | 111 | 0.557306 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.