diff --git a/CHANGES.md b/CHANGES.md index 9dcd7ba6da..e6c73873eb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -65,6 +65,9 @@ after the corresponding tracker issues. - For developers: add CPLEX support to our GitHub Actions for Windows +- Fix a bug in the computation of RHW landmarks + + ## Fast Downward 20.06 Released on July 26, 2020. diff --git a/experiments/issue1004/common_setup.py b/experiments/issue1004/common_setup.py new file mode 100644 index 0000000000..eeca3aadb5 --- /dev/null +++ b/experiments/issue1004/common_setup.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', 'pathways-noneg', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + get_repo_base(), + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + self.add_step( + 'publish-absolute-report', subprocess.call, ['publish', outfile]) + + def add_comparison_table_step(self, **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + def make_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + def publish_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) + subprocess.call(["publish", outfile]) + + self.add_step("make-comparison-tables", make_comparison_tables) + self.add_step( + "publish-comparison-tables", publish_comparison_tables) + + def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + for rev1, rev2 in itertools.combinations(self._revisions, 2): + for attribute in self.get_supported_attributes( + config.nick, attributes): + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1004/landmark_parser.py b/experiments/issue1004/landmark_parser.py new file mode 100755 index 0000000000..943492471b --- /dev/null +++ b/experiments/issue1004/landmark_parser.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +import re + +from lab.parser import Parser + +parser = Parser() +parser.add_pattern( + "lmgraph_generation_time", + r"Landmark graph generation time: (.+)s", + type=float) +parser.add_pattern( + "landmarks", + r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_disjunctive", + r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_conjunctive", + r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", + type=int) +parser.add_pattern( + "orderings", + r"Landmark graph contains (\d+) orderings.", + type=int) + +parser.parse() diff --git a/experiments/issue1004/v1-v2-optimal.py b/experiments/issue1004/v1-v2-optimal.py new file mode 100755 index 0000000000..f503e78a73 --- /dev/null +++ b/experiments/issue1004/v1-v2-optimal.py @@ -0,0 +1,83 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1004-base-seq-opt-bjolp", "issue1004-v2-seq-opt-bjolp"), + ("issue1004-base-seq-opt-bjolp-opt", "issue1004-v2-seq-opt-bjolp-opt"), + ], attributes=ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + + +REVISIONS = [ + "issue1004-base", + "issue1004-v1", + "issue1004-v2", +] + +CONFIGS = [ + common_setup.IssueConfig("seq-opt-bjolp", [], + driver_options=["--alias", "seq-opt-bjolp"]), + common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), + common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), + common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_1", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("disjunctive_landmarks", min_wins=False), + Attribute("conjunctive_landmarks", min_wins=False), + Attribute("orderings", min_wins=False), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/experiments/issue1004/v1-v2-satisficing.py b/experiments/issue1004/v1-v2-satisficing.py new file mode 100755 index 0000000000..f82bd9dea3 --- /dev/null +++ b/experiments/issue1004/v1-v2-satisficing.py @@ -0,0 +1,82 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup + +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1004-base-lama-first", "issue1004-v2-lama-first"), + ("issue1004-base-lama-first-pref", "issue1004-v2-lama-first-pref"), + ], attributes=ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + exp.add_report(report) + +REVISIONS = [ + "issue1004-base", + "issue1004-v1", + "issue1004-v2", +] + +CONFIGS = [ + common_setup.IssueConfig("lama-first", [], + driver_options=["--alias", "lama-first"]), + common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), + common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_SATISFICING_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_1", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("disjunctive_landmarks", min_wins=False), + Attribute("conjunctive_landmarks", min_wins=False), + Attribute("orderings", min_wins=False), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/experiments/issue1004/v3-optimal.py b/experiments/issue1004/v3-optimal.py new file mode 100755 index 0000000000..b2da1ec5ab --- /dev/null +++ b/experiments/issue1004/v3-optimal.py @@ -0,0 +1,81 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1004-base-seq-opt-bjolp", "issue1004-v2-seq-opt-bjolp"), + ("issue1004-base-seq-opt-bjolp-opt", "issue1004-v2-seq-opt-bjolp-opt"), + ], attributes=ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + + +REVISIONS = [ + "issue1004-base", + "issue1004-v3", +] + +CONFIGS = [ + common_setup.IssueConfig("seq-opt-bjolp", [], + driver_options=["--alias", "seq-opt-bjolp"]), + common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_1", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctiv", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time"), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_comparison_table_step(attributes=ATTRIBUTES) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/experiments/issue1004/v3-satisficing.py b/experiments/issue1004/v3-satisficing.py new file mode 100755 index 0000000000..ac75be6dad --- /dev/null +++ b/experiments/issue1004/v3-satisficing.py @@ -0,0 +1,68 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup + +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +REVISIONS = [ + "issue1004-base", + "issue1004-v3", +] + +CONFIGS = [ + common_setup.IssueConfig("lama-first", [], + driver_options=["--alias", "lama-first"]), + common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_SATISFICING_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_1", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctiv", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time"), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_comparison_table_step(attributes=ATTRIBUTES) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/experiments/issue1004/v3-v4-optimal.py b/experiments/issue1004/v3-v4-optimal.py new file mode 100755 index 0000000000..69ecc2f05e --- /dev/null +++ b/experiments/issue1004/v3-v4-optimal.py @@ -0,0 +1,85 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v3-seq-opt-bjolp"), + ("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"), + ("issue1004-v3-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"), + ("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v3-seq-opt-bjolp-opt"), + ("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"), + ("issue1004-v3-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"), + ], attributes=ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + +REVISIONS = [ + "issue1004-base-with-stats", + "issue1004-v3", + "issue1004-v4", +] + +CONFIGS = [ + common_setup.IssueConfig("seq-opt-bjolp", [], + driver_options=["--alias", "seq-opt-bjolp"]), + common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time"), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/experiments/issue1004/v3-v4-satisficing.py b/experiments/issue1004/v3-v4-satisficing.py new file mode 100755 index 0000000000..900f7a1479 --- /dev/null +++ b/experiments/issue1004/v3-v4-satisficing.py @@ -0,0 +1,87 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import common_setup + +from common_setup import IssueConfig, IssueExperiment + +import os + +from lab.reports import Attribute + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1004-base-with-stats-lama-first", "issue1004-v3-lama-first"), + ("issue1004-base-with-stats-lama-first", "issue1004-v4-lama-first"), + ("issue1004-v3-lama-first", "issue1004-v4-lama-first"), + ("issue1004-base-with-stats-lama-first-pref", "issue1004-v3-lama-first-pref"), + ("issue1004-base-with-stats-lama-first-pref", "issue1004-v4-lama-first-pref"), + ("issue1004-v3-lama-first-pref", "issue1004-v4-lama-first-pref"), + ], attributes=ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + +REVISIONS = [ + "issue1004-base-with-stats", + "issue1004-v3", + "issue1004-v4", +] + +CONFIGS = [ + common_setup.IssueConfig("lama-first", [], + driver_options=["--alias", "lama-first"]), + common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), +] + +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REPO = os.environ["DOWNWARD_REPO"] + +if common_setup.is_running_on_cluster(): + SUITE = common_setup.DEFAULT_SATISFICING_SUITE + ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="tho.keller@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], + ) +else: + SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time"), +] + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) +exp.add_parse_again_step() + +exp.run_steps() + diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index c0297680be..c8479dd7f4 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -37,7 +37,7 @@ LandmarkCountHeuristic::LandmarkCountHeuristic(const options::Options &opts) (!task_properties::has_axioms(task_proxy) && (!task_properties::has_conditional_effects(task_proxy) || conditional_effects_supported))), successor_generator(nullptr) { - utils::g_log << "Initializing landmarks count heuristic..." << endl; + utils::g_log << "Initializing landmark count heuristic..." << endl; /* Actually, we should like to test if this is the root task or a @@ -52,8 +52,17 @@ LandmarkCountHeuristic::LandmarkCountHeuristic(const options::Options &opts) utils::exit_with(utils::ExitCode::SEARCH_UNSUPPORTED); } + utils::Timer lm_graph_timer; + utils::g_log << "Generating landmark graph..." << endl; shared_ptr lm_graph_factory = opts.get>("lm_factory"); lgraph = lm_graph_factory->compute_lm_graph(task); + utils::g_log << "Landmark graph generation time: " << lm_graph_timer << endl; + utils::g_log << "Landmark graph contains " << lgraph->get_num_landmarks() + << " landmarks, of which " << lgraph->get_num_disjunctive_landmarks() + << " are disjunctive and " << lgraph->get_num_conjunctive_landmarks() + << " are conjunctive." << endl; + utils::g_log << "Landmark graph contains " << lgraph->get_num_edges() + << " orderings." << endl; bool reasonable_orders = lm_graph_factory->use_reasonable_orders(); lm_status_manager = utils::make_unique_ptr(*lgraph); diff --git a/src/search/landmarks/landmark_factory_rpg_sasp.cc b/src/search/landmarks/landmark_factory_rpg_sasp.cc index bec49bb442..b6a1367c8d 100644 --- a/src/search/landmarks/landmark_factory_rpg_sasp.cc +++ b/src/search/landmarks/landmark_factory_rpg_sasp.cc @@ -166,60 +166,57 @@ int LandmarkFactoryRpgSasp::min_cost_for_landmark(const TaskProxy &task_proxy, return min_cost; } -void LandmarkFactoryRpgSasp::found_simple_lm_and_order(const FactPair &a, - LandmarkNode &b, EdgeType t) { - LandmarkNode *new_lm; +void LandmarkFactoryRpgSasp::found_simple_lm_and_order( + const FactPair &a, LandmarkNode &b, EdgeType t) { if (lm_graph->contains_simple_landmark(a)) { - new_lm = &lm_graph->get_simple_landmark(a); - edge_add(*new_lm, b, t); + LandmarkNode &simple_lm = lm_graph->get_simple_landmark(a); + edge_add(simple_lm, b, t); return; } - set a_set; - a_set.insert(a); - if (lm_graph->contains_overlapping_disjunctive_landmark(a_set)) { + + if (lm_graph->contains_disjunctive_landmark(a)) { + // In issue1004, we fixed a bug in this part of the code. It now removes + // the disjunctive landmark along with all its orderings from the + // landmark graph and adds a new simple landmark node. Before this + // change, incoming orderings were maintained, which is not always + // correct for greedy necessary orderings. We now replace those + // incoming orderings with natural orderings. + // Simple landmarks are more informative than disjunctive ones, - // change disj. landmark into simple - - // old: call to methode - LandmarkNode &node = lm_graph->replace_disjunctive_by_simple_landmark(a); - - /* TODO: Problem: Schon diese jetzige Implementierung ist nicht mehr korrekt, - da rm_landmark_node nicht nur bei allen children die parents-zeiger auf sich selbst - loescht, sondern auch bei allen parents die children-zeiger auf sich selbst. Ein - einfaches Speichern aller Attribute von node funktioniert also nicht - entweder man - muss dann manuell bei den parents des alten node alle children-Zeiger neu setzen auf - den neuen node oder man ueberarbeitet das ganze komplett anders... Eine andere Vermutung - meinerseits waere, dass die alte Version verbugt ist und eigentlich auch die children- - Zeiger der parents von node geloescht werden muessten, wie es in rm_landmark_node passiert. - */ - // TODO: avoid copy constructor, save attributes locally and assign to new lm - // new: replace by new program logic - /*LandmarkNode &node2 = lm_graph->get_disj_lm_node(a); - LandmarkNode node(node2); - lm_graph->rm_landmark_node(&node2); - lm_graph->landmark_add_simple(a);*/ - - node.facts.clear(); - node.facts.push_back(a); - // Clean orders: let disj. LM {D1,...,Dn} be ordered before L. We - // cannot infer that any one of D1,...Dn by itself is ordered before L - for (const auto &child : node.children) { - child.first->parents.erase(&node); + // remove disj. landmark and add simple one + LandmarkNode *disj_lm = &lm_graph->get_disjunctive_landmark(a); + + // Remove all pointers to disj_lm from internal data structures (i.e., + // the list of open landmarks and forward orders) + auto it = find(open_landmarks.begin(), open_landmarks.end(), disj_lm); + if (it != open_landmarks.end()) { + open_landmarks.erase(it); + } + forward_orders.erase(disj_lm); + + // Retrieve incoming edges from disj_lm + vector predecessors; + for (auto &pred : disj_lm->parents) { + predecessors.push_back(pred.first); + } + + // Remove disj_lm from landmark graph + lm_graph->remove_node(disj_lm); + + // Add simple landmark node + LandmarkNode &simple_lm = lm_graph->add_simple_landmark(a); + open_landmarks.push_back(&simple_lm); + edge_add(simple_lm, b, t); + + // Add incoming orderings of replaced disj_lm as natural orderings to + // simple_lm + for (LandmarkNode *pred : predecessors) { + edge_add(*pred, simple_lm, EdgeType::NATURAL); } - node.children.clear(); - forward_orders[&node].clear(); - - edge_add(node, b, t); - // Node has changed, reexamine it again. This also fixes min_cost. - for (list::const_iterator it = open_landmarks.begin(); it - != open_landmarks.end(); ++it) - if (*it == &node) - return; - open_landmarks.push_back(&node); } else { - new_lm = &lm_graph->add_simple_landmark(a); - open_landmarks.push_back(new_lm); - edge_add(*new_lm, b, t); + LandmarkNode &simple_lm = lm_graph->add_simple_landmark(a); + open_landmarks.push_back(&simple_lm); + edge_add(simple_lm, b, t); } } diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 75b6fe9bbb..671045d730 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -181,29 +181,29 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { } } +void LandmarkGraph::remove_node(LandmarkNode *node) { + remove_node_occurrences(node); + auto it = find_if(nodes.begin(), nodes.end(), + [&node](unique_ptr &n) { + return n.get() == node; + }); + assert(it != nodes.end()); + nodes.erase(it); +} + void LandmarkGraph::remove_node_if( - const function &remove_node) { + const function &remove_node_condition) { for (auto &node : nodes) { - if (remove_node(*node)) { + if (remove_node_condition(*node)) { remove_node_occurrences(node.get()); } } nodes.erase(remove_if(nodes.begin(), nodes.end(), - [&remove_node](const unique_ptr &node) { - return remove_node(*node); + [&remove_node_condition](const unique_ptr &node) { + return remove_node_condition(*node); }), nodes.end()); } -LandmarkNode &LandmarkGraph::replace_disjunctive_by_simple_landmark(const FactPair &lm) { - LandmarkNode &node = get_disjunctive_landmark(lm); - node.disjunctive = false; - for (const FactPair &lm_fact : node.facts) - // TODO: shouldn't this decrease num_disjunctive_landmarks? - disjunctive_landmarks_to_nodes.erase(lm_fact); - simple_landmarks_to_nodes.emplace(lm, &node); - return node; -} - void LandmarkGraph::set_landmark_ids() { int id = 0; for (auto &lmn : nodes) { diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index 9d2344846f..e8f008e95d 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -85,6 +85,7 @@ class LandmarkGraph { Nodes nodes; void remove_node_occurrences(LandmarkNode *node); + public: /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ @@ -153,13 +154,10 @@ class LandmarkGraph { LandmarkNode &add_conjunctive_landmark(const std::set &lm); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ + void remove_node(LandmarkNode *node); void remove_node_if( - const std::function &remove_node); + const std::function &remove_node_condition); - /* This is needed only by landmark graph factories (in particular, only - LandmarkFactorySasp) and will disappear when moving landmark graph - creation there. */ - LandmarkNode &replace_disjunctive_by_simple_landmark(const FactPair &lm); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ void set_landmark_ids();