From 38ddb1774c018849b6b2568a3dcbca45bca31d9e Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 6 Feb 2023 08:29:52 +0100 Subject: [PATCH 1/7] make conjunctive landmarks usable for preferred operators --- experiments/issue1072/archive.py | 107 +++++ experiments/issue1072/common_setup.py | 396 ++++++++++++++++++ experiments/issue1072/landmark_parser.py | 29 ++ experiments/issue1072/v1-anytime.py | 99 +++++ experiments/issue1072/v1-first.py | 81 ++++ .../landmarks/landmark_count_heuristic.cc | 52 ++- .../landmarks/landmark_count_heuristic.h | 2 + src/search/landmarks/landmark_factory_h_m.cc | 4 +- src/search/landmarks/landmark_graph.cc | 42 +- src/search/landmarks/landmark_graph.h | 11 +- 10 files changed, 807 insertions(+), 16 deletions(-) create mode 100644 experiments/issue1072/archive.py create mode 100644 experiments/issue1072/common_setup.py create mode 100755 experiments/issue1072/landmark_parser.py create mode 100755 experiments/issue1072/v1-anytime.py create mode 100755 experiments/issue1072/v1-first.py diff --git a/experiments/issue1072/archive.py b/experiments/issue1072/archive.py new file mode 100644 index 0000000000..bd76273619 --- /dev/null +++ b/experiments/issue1072/archive.py @@ -0,0 +1,107 @@ +from pathlib import Path +import subprocess +import tarfile +from tempfile import TemporaryDirectory + +ARCHIVE_HOST = "aifiles" +ARCHIVE_LOCATION = Path("experiments") + +def add_archive_step(exp, path): + """ + Adds a step to the given experiment that will archive it to the + archive location specified in ARCHIVE_LOCATION und the given path. + We archive the following files: + - everything in the same directory as the main experiment script + (except for 'data', '.venv', and '__pycache__') + - all generated reports + - the combined properties file + - all run and error logs + - the source code stored in the experiment data directory + - any files added as resources to the experiment + + The first two items in the above list will be stored unpacked for easier + access while all otherdata will be packed. + """ + def archive(): + archive_path = ARCHIVE_LOCATION / path + _archive_script_dir(exp, ARCHIVE_HOST, archive_path) + _archive_data_dir(exp, ARCHIVE_HOST, archive_path) + _archive_eval_dir(exp, ARCHIVE_HOST, archive_path) + + exp.add_step("archive", archive) + + +def _archive_script_dir(exp, host, archive_path): + """ + Archives everything except 'data', '.venv', and '__pycache__' from the + same directory as the experiment script at host:archive_path/scripts. + """ + script_dir = Path(exp._script).parent + target_path = archive_path / "scripts" + + script_files = [f for f in script_dir.glob("*") + if f.name not in ["data", ".venv", "venv", "__pycache__"]] + _rsync(script_files, host, target_path) + + +def _archive_data_dir(exp, host, archive_path): + """ + Packs all files we want to archive from the experiment's data directory and + then archives the packed data at host:archive_path/data. Specifically, the + archived files are: + - all files directly in the data dir (added resources such as parsers) + - all directories starting with "code_" (source code of all revisions and + the compilied binaries) + - All *.log and *.err files from the run directories + """ + data_dir = Path(exp.path) + target_path = archive_path / "data" + + data_files = [f for f in data_dir.glob("*") if f.is_file()] + data_files.extend([d for d in data_dir.glob("code-*") if d.is_dir()]) + data_files.extend(data_dir.glob("runs*/*/*.log")) + data_files.extend(data_dir.glob("runs*/*/*.err")) + with TemporaryDirectory() as tmpdirname: + packed_filename = Path(tmpdirname) / (exp.name + ".tar.xz") + _pack(data_files, packed_filename, Path(exp.path).parent) + _rsync([packed_filename], host, target_path) + + +def _archive_eval_dir(exp, host, archive_path): + """ + Archives all files in the experiment's eval dir. + If there is a properties file, it will be packed and only the + packed version will be included in the resulting list. + """ + eval_dir = Path(exp.eval_dir) + target_path = archive_path / "data" / eval_dir.name + + filenames = list(eval_dir.glob("*")) + properties = eval_dir / "properties" + if properties.exists(): + filenames.remove(properties) + with TemporaryDirectory() as tmpdirname: + packed_properties = Path(tmpdirname) / "properties.tar.xz" + _pack([properties], packed_properties, eval_dir) + _rsync([packed_properties], host, target_path) + _rsync(filenames, host, target_path) + + +def _pack(filenames, archive_filename, path_prefix): + """ + Packs all files given in filenames into an archive (.tar.xz) located at + archive_filename. The path_prefix is removed in the archive, i.e., + if the filename is '/path/to/file' and the prefix is '/path', the location + inside the archive will be 'to/file'. + """ + with tarfile.open(archive_filename, "w|xz") as f: + for name in filenames: + f.add(name, name.relative_to(path_prefix)) + +def _rsync(filenames, host, target_path): + # Before copying files we have to create the target path on host. + # We could use the rsync option --mkpath but it is only available in newer + # rsync versions (and not in the one running on the grid) + # https://stackoverflow.com/questions/1636889 + subprocess.run(["ssh", host, "mkdir", "-p", target_path]) + subprocess.run(["rsync", "-avz"] + [str(f) for f in filenames] + [f"{host}:{target_path}"]) diff --git a/experiments/issue1072/common_setup.py b/experiments/issue1072/common_setup.py new file mode 100644 index 0000000000..d0b72fe313 --- /dev/null +++ b/experiments/issue1072/common_setup.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + +import archive + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + get_repo_base(), + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + + def add_comparison_table_step(self, **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + def make_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + self.add_step("make-comparison-tables", make_comparison_tables) + + def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + if attribute == "cost": + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + scale="log", + get_category=lambda run1, run2: run1["domain"]) + else: + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + for rev1, rev2 in itertools.combinations(self._revisions, 2): + for attribute in self.get_supported_attributes( + config.nick, attributes): + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, make_scatter_plots) + + def add_archive_step(self, archive_path): + archive.add_archive_step(self, archive_path) \ No newline at end of file diff --git a/experiments/issue1072/landmark_parser.py b/experiments/issue1072/landmark_parser.py new file mode 100755 index 0000000000..d32ec08819 --- /dev/null +++ b/experiments/issue1072/landmark_parser.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +import re + +from lab.parser import Parser + +parser = Parser() +parser.add_pattern( + "lmgraph_generation_time", + r"Landmark graph generation time: (.+)s", + type=float) +parser.add_pattern( + "landmarks", + r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_disjunctive", + r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_conjunctive", + r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", + type=int) +parser.add_pattern( + "orderings", + r"Landmark graph contains (\d+) orderings.", + type=int) + +parser.parse() diff --git a/experiments/issue1072/v1-anytime.py b/experiments/issue1072/v1-anytime.py new file mode 100755 index 0000000000..7cde0f3910 --- /dev/null +++ b/experiments/issue1072/v1-anytime.py @@ -0,0 +1,99 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig(f"lama-hm2-pref-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--if-unit-cost", + "--evaluator", + f"hlm=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff()", + "--search", """iterated([ + lazy_greedy([hff,hlm],preferred=[hff,hlm]), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) + ],repeat_last=true,continue_on_fail=true)""", + "--if-non-unit-cost", + "--evaluator", + f"hlm1=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff1=ff(transform=adapt_costs(one))", + "--evaluator", + f"hlm2=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(plusone),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", + "--search", """iterated([ + lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], + cost_type=one,reopen_closed=false), + lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], + reopen_closed=false), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) + ],repeat_last=true,continue_on_fail=true)""", + # Append --always to be on the safe side if we want to append + # additional options later. + "--always"]) + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/experiments/issue1072/v1-first.py b/experiments/issue1072/v1-first.py new file mode 100755 index 0000000000..d38adafdfd --- /dev/null +++ b/experiments/issue1072/v1-first.py @@ -0,0 +1,81 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig( + f"lama-hm2-first-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" + f"lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff(transform=adapt_costs(one))", + "--search", + "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,reopen_closed=false)"]), + IssueConfig( + f"lm_hm2-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + f"hlm=lmcount(lm_factory=lm_hm(m=2),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--search", + "eager_greedy([hlm],preferred=[hlm])"]), + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index 75d4aacf77..38a65d75c4 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -30,6 +30,8 @@ namespace landmarks { LandmarkCountHeuristic::LandmarkCountHeuristic(const plugins::Options &opts) : Heuristic(opts), use_preferred_operators(opts.get("pref")), + use_preferred_operators_conjunctive(opts.get("use_preferred_operators_conjunctive")), + use_preferrence_hierarchy(opts.get("use_preferrence_hierarchy")), conditional_effects_supported( opts.get>("lm_factory")->supports_conditional_effects()), admissible(opts.get("admissible")), @@ -222,6 +224,7 @@ void LandmarkCountHeuristic::generate_preferred_operators( assert(successor_generator); vector applicable_operators; successor_generator->generate_applicable_ops(state, applicable_operators); + vector preferred_operators_conjunctive; vector preferred_operators_simple; vector preferred_operators_disjunctive; @@ -242,25 +245,52 @@ void LandmarkCountHeuristic::generate_preferred_operators( FactProxy fact_proxy = effect.get_fact(); LandmarkNode *lm_node = lgraph->get_node(fact_proxy.get_pair()); if (lm_node && landmark_is_interesting( - state, reached, *lm_node, all_landmarks_reached)) { + state, reached, *lm_node, all_landmarks_reached)) { if (lm_node->get_landmark().disjunctive) { preferred_operators_disjunctive.push_back(op_id); } else { preferred_operators_simple.push_back(op_id); } } + if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { + std::vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); + for( auto conj_lm : conjunctive_landmarks){ + if (landmark_is_interesting( + state, reached, *conj_lm, all_landmarks_reached) ){ + preferred_operators_conjunctive.push_back(op_id); + break; + } + } + + } } } OperatorsProxy operators = task_proxy.get_operators(); - if (preferred_operators_simple.empty()) { - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); + if (use_preferrence_hierarchy){ + if (!preferred_operators_conjunctive.empty()) { + for (OperatorID op_id : preferred_operators_conjunctive) { + set_preferred(operators[op_id]); + } + } else if (!preferred_operators_simple.empty()) { + for (OperatorID op_id : preferred_operators_simple) { + set_preferred(operators[op_id]); + } + } else { + for (OperatorID op_id : preferred_operators_disjunctive) { + set_preferred(operators[op_id]); + } } } else { + for (OperatorID op_id: preferred_operators_conjunctive) { + set_preferred(operators[op_id]); + } for (OperatorID op_id : preferred_operators_simple) { set_preferred(operators[op_id]); } + for (OperatorID op_id : preferred_operators_disjunctive) { + set_preferred(operators[op_id]); + } } } @@ -364,6 +394,20 @@ class LandmarkCountHeuristicFeature : public plugins::TypedFeature( + "use_preferred_operators_conjunctive", + "If true, conjunctive landmarks are considered for the generation " + "of preferred operators. Otherwise, only simple and disjunctive " + "landmarks are considered.", + "false"); + add_option( + "use_preferrence_hierarchy", + "If true, only applicable operators leading to one kind of " + "interesting landmarks are preferred operators, namely those of the highest " + "hierarchy (conjunctive > simple > disjunctive)." + "Otherwise, all applicable operators leading to any interesting landmark " + "are preferred operators.", + "true"); add_option("alm", "use action landmarks", "true"); lp::add_lp_solver_option_to_feature(*this); Heuristic::add_options_to_feature(*this); diff --git a/src/search/landmarks/landmark_count_heuristic.h b/src/search/landmarks/landmark_count_heuristic.h index 2ae5aa33c3..18b9a9cc22 100644 --- a/src/search/landmarks/landmark_count_heuristic.h +++ b/src/search/landmarks/landmark_count_heuristic.h @@ -18,6 +18,8 @@ class LandmarkStatusManager; class LandmarkCountHeuristic : public Heuristic { std::shared_ptr lgraph; const bool use_preferred_operators; + const bool use_preferred_operators_conjunctive; + const bool use_preferrence_hierarchy; const bool conditional_effects_supported; const bool admissible; const bool dead_ends_reliable; diff --git a/src/search/landmarks/landmark_factory_h_m.cc b/src/search/landmarks/landmark_factory_h_m.cc index faa744c697..e736e1376f 100644 --- a/src/search/landmarks/landmark_factory_h_m.cc +++ b/src/search/landmarks/landmark_factory_h_m.cc @@ -619,7 +619,7 @@ void LandmarkFactoryHM::discard_conjunctive_landmarks() { << " conjunctive landmarks" << endl; } lm_graph->remove_node_if( - [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}); + [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}, conjunctive_landmarks); } } @@ -931,7 +931,7 @@ void LandmarkFactoryHM::add_lm_node(int set_index, bool goal) { landmark.first_achievers.insert( hm_entry.first_achievers.begin(), hm_entry.first_achievers.end()); - lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark)); + lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark), conjunctive_landmarks); } } diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 812147d6b9..3b5e5f80ef 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -57,6 +57,10 @@ LandmarkNode &LandmarkGraph::get_disjunctive_landmark(const FactPair &fact) cons return *(disjunctive_landmarks_to_nodes.find(fact)->second); } +const vector &LandmarkGraph::get_conjunctive_landmarks(const FactPair &fact) const { + assert(contains_conjunctive_landmark(fact)); + return conjunctive_landmarks_to_nodes.find(fact)->second; +} bool LandmarkGraph::contains_simple_landmark(const FactPair &lm) const { return simple_landmarks_to_nodes.count(lm) != 0; @@ -66,6 +70,10 @@ bool LandmarkGraph::contains_disjunctive_landmark(const FactPair &lm) const { return disjunctive_landmarks_to_nodes.count(lm) != 0; } +bool LandmarkGraph::contains_conjunctive_landmark(const FactPair &lm) const { + return conjunctive_landmarks_to_nodes.count(lm) != 0; +} + bool LandmarkGraph::contains_overlapping_disjunctive_landmark( const set &lm) const { // Test whether ONE of the facts is present in some disjunctive landmark. @@ -101,7 +109,7 @@ bool LandmarkGraph::contains_landmark(const FactPair &lm) const { return contains_simple_landmark(lm) || contains_disjunctive_landmark(lm); } -LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { +LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark, bool store_conjunctive) { assert(all_of(landmark.facts.begin(), landmark.facts.end(), [&](const FactPair &lm_fact) { return !contains_landmark(lm_fact); })); @@ -116,6 +124,16 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { } ++num_disjunctive_landmarks; } else if (lm.conjunctive) { + if (store_conjunctive) { + for (const FactPair &lm_fact: lm.facts) { + auto it = conjunctive_landmarks_to_nodes.find(lm_fact); + if (it == conjunctive_landmarks_to_nodes.end()) { + conjunctive_landmarks_to_nodes.emplace(lm_fact, std::vector{new_node_p}); + } else { + (it->second).push_back(new_node_p); + } + } + } ++num_conjunctive_landmarks; } else { simple_landmarks_to_nodes.emplace(lm.facts.front(), new_node_p); @@ -123,7 +141,7 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { return *new_node_p; } -void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { +void LandmarkGraph::remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive) { for (const auto &parent : node->parents) { LandmarkNode &parent_node = *(parent.first); parent_node.children.erase(node); @@ -142,13 +160,23 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { } } else if (landmark.conjunctive) { --num_conjunctive_landmarks; + if (erase_conjunctive) { + for (const FactPair &lm_fact: landmark.facts) { + std::vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( + lm_fact)->second); + auto it = std::find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); + if (it != conjunctive_landmarks_vector->end()) { + conjunctive_landmarks_vector->erase(it); + } + } + } } else { simple_landmarks_to_nodes.erase(landmark.facts[0]); } } -void LandmarkGraph::remove_node(LandmarkNode *node) { - remove_node_occurrences(node); +void LandmarkGraph::remove_node(LandmarkNode *node, bool erase_conjunctive) { + remove_node_occurrences(node, erase_conjunctive); auto it = find_if(nodes.begin(), nodes.end(), [&node](unique_ptr &n) { return n.get() == node; @@ -158,10 +186,10 @@ void LandmarkGraph::remove_node(LandmarkNode *node) { } void LandmarkGraph::remove_node_if( - const function &remove_node_condition) { + const function &remove_node_condition, bool erase_conjunctive) { for (auto &node : nodes) { if (remove_node_condition(*node)) { - remove_node_occurrences(node.get()); + remove_node_occurrences(node.get(), erase_conjunctive); } } nodes.erase(remove_if(nodes.begin(), nodes.end(), @@ -177,4 +205,6 @@ void LandmarkGraph::set_landmark_ids() { ++id; } } + + } diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index 072522c638..d0f1308a59 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -78,9 +78,10 @@ class LandmarkGraph { utils::HashMap simple_landmarks_to_nodes; utils::HashMap disjunctive_landmarks_to_nodes; + utils::HashMap> conjunctive_landmarks_to_nodes; Nodes nodes; - void remove_node_occurrences(LandmarkNode *node); + void remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive); public: /* This is needed only by landmark graph factories and will disappear @@ -119,6 +120,7 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ LandmarkNode &get_disjunctive_landmark(const FactPair &fact) const; + const std::vector &get_conjunctive_landmarks(const FactPair &fact) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by @@ -126,6 +128,7 @@ class LandmarkGraph { bool contains_simple_landmark(const FactPair &lm) const; /* Only used internally. */ bool contains_disjunctive_landmark(const FactPair &lm) const; + bool contains_conjunctive_landmark(const FactPair &lm) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by HMLandmarkFactory*/ @@ -140,12 +143,12 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - LandmarkNode &add_landmark(Landmark &&landmark); + LandmarkNode &add_landmark(Landmark &&landmark, bool store_conjunctive = false); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - void remove_node(LandmarkNode *node); + void remove_node(LandmarkNode *node, bool erase_conjunctive = false); void remove_node_if( - const std::function &remove_node_condition); + const std::function &remove_node_condition, bool erase_conjunctive = false); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ From 82e644249ab1eedba380a302a1a4386c3176face Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 6 Feb 2023 08:29:52 +0100 Subject: [PATCH 2/7] [issue1072] make conjunctive landmarks usable for preferred operators --- experiments/issue1072/archive.py | 107 +++++ experiments/issue1072/common_setup.py | 396 ++++++++++++++++++ experiments/issue1072/landmark_parser.py | 29 ++ experiments/issue1072/v1-anytime.py | 99 +++++ experiments/issue1072/v1-first.py | 81 ++++ .../landmarks/landmark_count_heuristic.cc | 52 ++- .../landmarks/landmark_count_heuristic.h | 2 + src/search/landmarks/landmark_factory_h_m.cc | 4 +- src/search/landmarks/landmark_graph.cc | 42 +- src/search/landmarks/landmark_graph.h | 11 +- 10 files changed, 807 insertions(+), 16 deletions(-) create mode 100644 experiments/issue1072/archive.py create mode 100644 experiments/issue1072/common_setup.py create mode 100755 experiments/issue1072/landmark_parser.py create mode 100755 experiments/issue1072/v1-anytime.py create mode 100755 experiments/issue1072/v1-first.py diff --git a/experiments/issue1072/archive.py b/experiments/issue1072/archive.py new file mode 100644 index 0000000000..bd76273619 --- /dev/null +++ b/experiments/issue1072/archive.py @@ -0,0 +1,107 @@ +from pathlib import Path +import subprocess +import tarfile +from tempfile import TemporaryDirectory + +ARCHIVE_HOST = "aifiles" +ARCHIVE_LOCATION = Path("experiments") + +def add_archive_step(exp, path): + """ + Adds a step to the given experiment that will archive it to the + archive location specified in ARCHIVE_LOCATION und the given path. + We archive the following files: + - everything in the same directory as the main experiment script + (except for 'data', '.venv', and '__pycache__') + - all generated reports + - the combined properties file + - all run and error logs + - the source code stored in the experiment data directory + - any files added as resources to the experiment + + The first two items in the above list will be stored unpacked for easier + access while all otherdata will be packed. + """ + def archive(): + archive_path = ARCHIVE_LOCATION / path + _archive_script_dir(exp, ARCHIVE_HOST, archive_path) + _archive_data_dir(exp, ARCHIVE_HOST, archive_path) + _archive_eval_dir(exp, ARCHIVE_HOST, archive_path) + + exp.add_step("archive", archive) + + +def _archive_script_dir(exp, host, archive_path): + """ + Archives everything except 'data', '.venv', and '__pycache__' from the + same directory as the experiment script at host:archive_path/scripts. + """ + script_dir = Path(exp._script).parent + target_path = archive_path / "scripts" + + script_files = [f for f in script_dir.glob("*") + if f.name not in ["data", ".venv", "venv", "__pycache__"]] + _rsync(script_files, host, target_path) + + +def _archive_data_dir(exp, host, archive_path): + """ + Packs all files we want to archive from the experiment's data directory and + then archives the packed data at host:archive_path/data. Specifically, the + archived files are: + - all files directly in the data dir (added resources such as parsers) + - all directories starting with "code_" (source code of all revisions and + the compilied binaries) + - All *.log and *.err files from the run directories + """ + data_dir = Path(exp.path) + target_path = archive_path / "data" + + data_files = [f for f in data_dir.glob("*") if f.is_file()] + data_files.extend([d for d in data_dir.glob("code-*") if d.is_dir()]) + data_files.extend(data_dir.glob("runs*/*/*.log")) + data_files.extend(data_dir.glob("runs*/*/*.err")) + with TemporaryDirectory() as tmpdirname: + packed_filename = Path(tmpdirname) / (exp.name + ".tar.xz") + _pack(data_files, packed_filename, Path(exp.path).parent) + _rsync([packed_filename], host, target_path) + + +def _archive_eval_dir(exp, host, archive_path): + """ + Archives all files in the experiment's eval dir. + If there is a properties file, it will be packed and only the + packed version will be included in the resulting list. + """ + eval_dir = Path(exp.eval_dir) + target_path = archive_path / "data" / eval_dir.name + + filenames = list(eval_dir.glob("*")) + properties = eval_dir / "properties" + if properties.exists(): + filenames.remove(properties) + with TemporaryDirectory() as tmpdirname: + packed_properties = Path(tmpdirname) / "properties.tar.xz" + _pack([properties], packed_properties, eval_dir) + _rsync([packed_properties], host, target_path) + _rsync(filenames, host, target_path) + + +def _pack(filenames, archive_filename, path_prefix): + """ + Packs all files given in filenames into an archive (.tar.xz) located at + archive_filename. The path_prefix is removed in the archive, i.e., + if the filename is '/path/to/file' and the prefix is '/path', the location + inside the archive will be 'to/file'. + """ + with tarfile.open(archive_filename, "w|xz") as f: + for name in filenames: + f.add(name, name.relative_to(path_prefix)) + +def _rsync(filenames, host, target_path): + # Before copying files we have to create the target path on host. + # We could use the rsync option --mkpath but it is only available in newer + # rsync versions (and not in the one running on the grid) + # https://stackoverflow.com/questions/1636889 + subprocess.run(["ssh", host, "mkdir", "-p", target_path]) + subprocess.run(["rsync", "-avz"] + [str(f) for f in filenames] + [f"{host}:{target_path}"]) diff --git a/experiments/issue1072/common_setup.py b/experiments/issue1072/common_setup.py new file mode 100644 index 0000000000..d0b72fe313 --- /dev/null +++ b/experiments/issue1072/common_setup.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + +import archive + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + get_repo_base(), + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + + def add_comparison_table_step(self, **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + def make_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + self.add_step("make-comparison-tables", make_comparison_tables) + + def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + if attribute == "cost": + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + scale="log", + get_category=lambda run1, run2: run1["domain"]) + else: + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + for rev1, rev2 in itertools.combinations(self._revisions, 2): + for attribute in self.get_supported_attributes( + config.nick, attributes): + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, make_scatter_plots) + + def add_archive_step(self, archive_path): + archive.add_archive_step(self, archive_path) \ No newline at end of file diff --git a/experiments/issue1072/landmark_parser.py b/experiments/issue1072/landmark_parser.py new file mode 100755 index 0000000000..d32ec08819 --- /dev/null +++ b/experiments/issue1072/landmark_parser.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +import re + +from lab.parser import Parser + +parser = Parser() +parser.add_pattern( + "lmgraph_generation_time", + r"Landmark graph generation time: (.+)s", + type=float) +parser.add_pattern( + "landmarks", + r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_disjunctive", + r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_conjunctive", + r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", + type=int) +parser.add_pattern( + "orderings", + r"Landmark graph contains (\d+) orderings.", + type=int) + +parser.parse() diff --git a/experiments/issue1072/v1-anytime.py b/experiments/issue1072/v1-anytime.py new file mode 100755 index 0000000000..7cde0f3910 --- /dev/null +++ b/experiments/issue1072/v1-anytime.py @@ -0,0 +1,99 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig(f"lama-hm2-pref-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--if-unit-cost", + "--evaluator", + f"hlm=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff()", + "--search", """iterated([ + lazy_greedy([hff,hlm],preferred=[hff,hlm]), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) + ],repeat_last=true,continue_on_fail=true)""", + "--if-non-unit-cost", + "--evaluator", + f"hlm1=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff1=ff(transform=adapt_costs(one))", + "--evaluator", + f"hlm2=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(plusone),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", + "--search", """iterated([ + lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], + cost_type=one,reopen_closed=false), + lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], + reopen_closed=false), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) + ],repeat_last=true,continue_on_fail=true)""", + # Append --always to be on the safe side if we want to append + # additional options later. + "--always"]) + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/experiments/issue1072/v1-first.py b/experiments/issue1072/v1-first.py new file mode 100755 index 0000000000..d38adafdfd --- /dev/null +++ b/experiments/issue1072/v1-first.py @@ -0,0 +1,81 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig( + f"lama-hm2-first-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" + f"lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff(transform=adapt_costs(one))", + "--search", + "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,reopen_closed=false)"]), + IssueConfig( + f"lm_hm2-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + f"hlm=lmcount(lm_factory=lm_hm(m=2),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--search", + "eager_greedy([hlm],preferred=[hlm])"]), + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index 75d4aacf77..38a65d75c4 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -30,6 +30,8 @@ namespace landmarks { LandmarkCountHeuristic::LandmarkCountHeuristic(const plugins::Options &opts) : Heuristic(opts), use_preferred_operators(opts.get("pref")), + use_preferred_operators_conjunctive(opts.get("use_preferred_operators_conjunctive")), + use_preferrence_hierarchy(opts.get("use_preferrence_hierarchy")), conditional_effects_supported( opts.get>("lm_factory")->supports_conditional_effects()), admissible(opts.get("admissible")), @@ -222,6 +224,7 @@ void LandmarkCountHeuristic::generate_preferred_operators( assert(successor_generator); vector applicable_operators; successor_generator->generate_applicable_ops(state, applicable_operators); + vector preferred_operators_conjunctive; vector preferred_operators_simple; vector preferred_operators_disjunctive; @@ -242,25 +245,52 @@ void LandmarkCountHeuristic::generate_preferred_operators( FactProxy fact_proxy = effect.get_fact(); LandmarkNode *lm_node = lgraph->get_node(fact_proxy.get_pair()); if (lm_node && landmark_is_interesting( - state, reached, *lm_node, all_landmarks_reached)) { + state, reached, *lm_node, all_landmarks_reached)) { if (lm_node->get_landmark().disjunctive) { preferred_operators_disjunctive.push_back(op_id); } else { preferred_operators_simple.push_back(op_id); } } + if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { + std::vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); + for( auto conj_lm : conjunctive_landmarks){ + if (landmark_is_interesting( + state, reached, *conj_lm, all_landmarks_reached) ){ + preferred_operators_conjunctive.push_back(op_id); + break; + } + } + + } } } OperatorsProxy operators = task_proxy.get_operators(); - if (preferred_operators_simple.empty()) { - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); + if (use_preferrence_hierarchy){ + if (!preferred_operators_conjunctive.empty()) { + for (OperatorID op_id : preferred_operators_conjunctive) { + set_preferred(operators[op_id]); + } + } else if (!preferred_operators_simple.empty()) { + for (OperatorID op_id : preferred_operators_simple) { + set_preferred(operators[op_id]); + } + } else { + for (OperatorID op_id : preferred_operators_disjunctive) { + set_preferred(operators[op_id]); + } } } else { + for (OperatorID op_id: preferred_operators_conjunctive) { + set_preferred(operators[op_id]); + } for (OperatorID op_id : preferred_operators_simple) { set_preferred(operators[op_id]); } + for (OperatorID op_id : preferred_operators_disjunctive) { + set_preferred(operators[op_id]); + } } } @@ -364,6 +394,20 @@ class LandmarkCountHeuristicFeature : public plugins::TypedFeature( + "use_preferred_operators_conjunctive", + "If true, conjunctive landmarks are considered for the generation " + "of preferred operators. Otherwise, only simple and disjunctive " + "landmarks are considered.", + "false"); + add_option( + "use_preferrence_hierarchy", + "If true, only applicable operators leading to one kind of " + "interesting landmarks are preferred operators, namely those of the highest " + "hierarchy (conjunctive > simple > disjunctive)." + "Otherwise, all applicable operators leading to any interesting landmark " + "are preferred operators.", + "true"); add_option("alm", "use action landmarks", "true"); lp::add_lp_solver_option_to_feature(*this); Heuristic::add_options_to_feature(*this); diff --git a/src/search/landmarks/landmark_count_heuristic.h b/src/search/landmarks/landmark_count_heuristic.h index 2ae5aa33c3..18b9a9cc22 100644 --- a/src/search/landmarks/landmark_count_heuristic.h +++ b/src/search/landmarks/landmark_count_heuristic.h @@ -18,6 +18,8 @@ class LandmarkStatusManager; class LandmarkCountHeuristic : public Heuristic { std::shared_ptr lgraph; const bool use_preferred_operators; + const bool use_preferred_operators_conjunctive; + const bool use_preferrence_hierarchy; const bool conditional_effects_supported; const bool admissible; const bool dead_ends_reliable; diff --git a/src/search/landmarks/landmark_factory_h_m.cc b/src/search/landmarks/landmark_factory_h_m.cc index faa744c697..e736e1376f 100644 --- a/src/search/landmarks/landmark_factory_h_m.cc +++ b/src/search/landmarks/landmark_factory_h_m.cc @@ -619,7 +619,7 @@ void LandmarkFactoryHM::discard_conjunctive_landmarks() { << " conjunctive landmarks" << endl; } lm_graph->remove_node_if( - [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}); + [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}, conjunctive_landmarks); } } @@ -931,7 +931,7 @@ void LandmarkFactoryHM::add_lm_node(int set_index, bool goal) { landmark.first_achievers.insert( hm_entry.first_achievers.begin(), hm_entry.first_achievers.end()); - lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark)); + lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark), conjunctive_landmarks); } } diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 812147d6b9..3b5e5f80ef 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -57,6 +57,10 @@ LandmarkNode &LandmarkGraph::get_disjunctive_landmark(const FactPair &fact) cons return *(disjunctive_landmarks_to_nodes.find(fact)->second); } +const vector &LandmarkGraph::get_conjunctive_landmarks(const FactPair &fact) const { + assert(contains_conjunctive_landmark(fact)); + return conjunctive_landmarks_to_nodes.find(fact)->second; +} bool LandmarkGraph::contains_simple_landmark(const FactPair &lm) const { return simple_landmarks_to_nodes.count(lm) != 0; @@ -66,6 +70,10 @@ bool LandmarkGraph::contains_disjunctive_landmark(const FactPair &lm) const { return disjunctive_landmarks_to_nodes.count(lm) != 0; } +bool LandmarkGraph::contains_conjunctive_landmark(const FactPair &lm) const { + return conjunctive_landmarks_to_nodes.count(lm) != 0; +} + bool LandmarkGraph::contains_overlapping_disjunctive_landmark( const set &lm) const { // Test whether ONE of the facts is present in some disjunctive landmark. @@ -101,7 +109,7 @@ bool LandmarkGraph::contains_landmark(const FactPair &lm) const { return contains_simple_landmark(lm) || contains_disjunctive_landmark(lm); } -LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { +LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark, bool store_conjunctive) { assert(all_of(landmark.facts.begin(), landmark.facts.end(), [&](const FactPair &lm_fact) { return !contains_landmark(lm_fact); })); @@ -116,6 +124,16 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { } ++num_disjunctive_landmarks; } else if (lm.conjunctive) { + if (store_conjunctive) { + for (const FactPair &lm_fact: lm.facts) { + auto it = conjunctive_landmarks_to_nodes.find(lm_fact); + if (it == conjunctive_landmarks_to_nodes.end()) { + conjunctive_landmarks_to_nodes.emplace(lm_fact, std::vector{new_node_p}); + } else { + (it->second).push_back(new_node_p); + } + } + } ++num_conjunctive_landmarks; } else { simple_landmarks_to_nodes.emplace(lm.facts.front(), new_node_p); @@ -123,7 +141,7 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { return *new_node_p; } -void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { +void LandmarkGraph::remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive) { for (const auto &parent : node->parents) { LandmarkNode &parent_node = *(parent.first); parent_node.children.erase(node); @@ -142,13 +160,23 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { } } else if (landmark.conjunctive) { --num_conjunctive_landmarks; + if (erase_conjunctive) { + for (const FactPair &lm_fact: landmark.facts) { + std::vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( + lm_fact)->second); + auto it = std::find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); + if (it != conjunctive_landmarks_vector->end()) { + conjunctive_landmarks_vector->erase(it); + } + } + } } else { simple_landmarks_to_nodes.erase(landmark.facts[0]); } } -void LandmarkGraph::remove_node(LandmarkNode *node) { - remove_node_occurrences(node); +void LandmarkGraph::remove_node(LandmarkNode *node, bool erase_conjunctive) { + remove_node_occurrences(node, erase_conjunctive); auto it = find_if(nodes.begin(), nodes.end(), [&node](unique_ptr &n) { return n.get() == node; @@ -158,10 +186,10 @@ void LandmarkGraph::remove_node(LandmarkNode *node) { } void LandmarkGraph::remove_node_if( - const function &remove_node_condition) { + const function &remove_node_condition, bool erase_conjunctive) { for (auto &node : nodes) { if (remove_node_condition(*node)) { - remove_node_occurrences(node.get()); + remove_node_occurrences(node.get(), erase_conjunctive); } } nodes.erase(remove_if(nodes.begin(), nodes.end(), @@ -177,4 +205,6 @@ void LandmarkGraph::set_landmark_ids() { ++id; } } + + } diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index 072522c638..d0f1308a59 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -78,9 +78,10 @@ class LandmarkGraph { utils::HashMap simple_landmarks_to_nodes; utils::HashMap disjunctive_landmarks_to_nodes; + utils::HashMap> conjunctive_landmarks_to_nodes; Nodes nodes; - void remove_node_occurrences(LandmarkNode *node); + void remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive); public: /* This is needed only by landmark graph factories and will disappear @@ -119,6 +120,7 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ LandmarkNode &get_disjunctive_landmark(const FactPair &fact) const; + const std::vector &get_conjunctive_landmarks(const FactPair &fact) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by @@ -126,6 +128,7 @@ class LandmarkGraph { bool contains_simple_landmark(const FactPair &lm) const; /* Only used internally. */ bool contains_disjunctive_landmark(const FactPair &lm) const; + bool contains_conjunctive_landmark(const FactPair &lm) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by HMLandmarkFactory*/ @@ -140,12 +143,12 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - LandmarkNode &add_landmark(Landmark &&landmark); + LandmarkNode &add_landmark(Landmark &&landmark, bool store_conjunctive = false); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - void remove_node(LandmarkNode *node); + void remove_node(LandmarkNode *node, bool erase_conjunctive = false); void remove_node_if( - const std::function &remove_node_condition); + const std::function &remove_node_condition, bool erase_conjunctive = false); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ From 02898b3a777e5c05bd1b6660e2bc0e3b06bcff07 Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 13 Feb 2023 14:46:24 +0100 Subject: [PATCH 3/7] [issue1072] remove experiment options --- .../landmarks/landmark_count_heuristic.cc | 55 ++++--------------- .../landmarks/landmark_count_heuristic.h | 2 - src/search/landmarks/landmark_factory_h_m.cc | 4 +- src/search/landmarks/landmark_graph.cc | 43 ++++++--------- src/search/landmarks/landmark_graph.h | 8 +-- 5 files changed, 35 insertions(+), 77 deletions(-) diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index 38a65d75c4..c59f1cd33a 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -30,8 +30,6 @@ namespace landmarks { LandmarkCountHeuristic::LandmarkCountHeuristic(const plugins::Options &opts) : Heuristic(opts), use_preferred_operators(opts.get("pref")), - use_preferred_operators_conjunctive(opts.get("use_preferred_operators_conjunctive")), - use_preferrence_hierarchy(opts.get("use_preferrence_hierarchy")), conditional_effects_supported( opts.get>("lm_factory")->supports_conditional_effects()), admissible(opts.get("admissible")), @@ -245,7 +243,7 @@ void LandmarkCountHeuristic::generate_preferred_operators( FactProxy fact_proxy = effect.get_fact(); LandmarkNode *lm_node = lgraph->get_node(fact_proxy.get_pair()); if (lm_node && landmark_is_interesting( - state, reached, *lm_node, all_landmarks_reached)) { + state, reached, *lm_node, all_landmarks_reached)) { if (lm_node->get_landmark().disjunctive) { preferred_operators_disjunctive.push_back(op_id); } else { @@ -254,43 +252,26 @@ void LandmarkCountHeuristic::generate_preferred_operators( } if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { std::vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); - for( auto conj_lm : conjunctive_landmarks){ + for (auto conj_lm : conjunctive_landmarks) { if (landmark_is_interesting( - state, reached, *conj_lm, all_landmarks_reached) ){ + state, reached, *conj_lm, all_landmarks_reached)) { preferred_operators_conjunctive.push_back(op_id); break; } } - } } } OperatorsProxy operators = task_proxy.get_operators(); - if (use_preferrence_hierarchy){ - if (!preferred_operators_conjunctive.empty()) { - for (OperatorID op_id : preferred_operators_conjunctive) { - set_preferred(operators[op_id]); - } - } else if (!preferred_operators_simple.empty()) { - for (OperatorID op_id : preferred_operators_simple) { - set_preferred(operators[op_id]); - } - } else { - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); - } - } - } else { - for (OperatorID op_id: preferred_operators_conjunctive) { - set_preferred(operators[op_id]); - } - for (OperatorID op_id : preferred_operators_simple) { - set_preferred(operators[op_id]); - } - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); - } + for (OperatorID op_id: preferred_operators_conjunctive) { + set_preferred(operators[op_id]); + } + for (OperatorID op_id : preferred_operators_simple) { + set_preferred(operators[op_id]); + } + for (OperatorID op_id : preferred_operators_disjunctive) { + set_preferred(operators[op_id]); } } @@ -394,20 +375,6 @@ class LandmarkCountHeuristicFeature : public plugins::TypedFeature( - "use_preferred_operators_conjunctive", - "If true, conjunctive landmarks are considered for the generation " - "of preferred operators. Otherwise, only simple and disjunctive " - "landmarks are considered.", - "false"); - add_option( - "use_preferrence_hierarchy", - "If true, only applicable operators leading to one kind of " - "interesting landmarks are preferred operators, namely those of the highest " - "hierarchy (conjunctive > simple > disjunctive)." - "Otherwise, all applicable operators leading to any interesting landmark " - "are preferred operators.", - "true"); add_option("alm", "use action landmarks", "true"); lp::add_lp_solver_option_to_feature(*this); Heuristic::add_options_to_feature(*this); diff --git a/src/search/landmarks/landmark_count_heuristic.h b/src/search/landmarks/landmark_count_heuristic.h index 18b9a9cc22..2ae5aa33c3 100644 --- a/src/search/landmarks/landmark_count_heuristic.h +++ b/src/search/landmarks/landmark_count_heuristic.h @@ -18,8 +18,6 @@ class LandmarkStatusManager; class LandmarkCountHeuristic : public Heuristic { std::shared_ptr lgraph; const bool use_preferred_operators; - const bool use_preferred_operators_conjunctive; - const bool use_preferrence_hierarchy; const bool conditional_effects_supported; const bool admissible; const bool dead_ends_reliable; diff --git a/src/search/landmarks/landmark_factory_h_m.cc b/src/search/landmarks/landmark_factory_h_m.cc index e736e1376f..faa744c697 100644 --- a/src/search/landmarks/landmark_factory_h_m.cc +++ b/src/search/landmarks/landmark_factory_h_m.cc @@ -619,7 +619,7 @@ void LandmarkFactoryHM::discard_conjunctive_landmarks() { << " conjunctive landmarks" << endl; } lm_graph->remove_node_if( - [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}, conjunctive_landmarks); + [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}); } } @@ -931,7 +931,7 @@ void LandmarkFactoryHM::add_lm_node(int set_index, bool goal) { landmark.first_achievers.insert( hm_entry.first_achievers.begin(), hm_entry.first_achievers.end()); - lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark), conjunctive_landmarks); + lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark)); } } diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 3b5e5f80ef..4809411cc3 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -109,7 +109,7 @@ bool LandmarkGraph::contains_landmark(const FactPair &lm) const { return contains_simple_landmark(lm) || contains_disjunctive_landmark(lm); } -LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark, bool store_conjunctive) { +LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { assert(all_of(landmark.facts.begin(), landmark.facts.end(), [&](const FactPair &lm_fact) { return !contains_landmark(lm_fact); })); @@ -124,14 +124,12 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark, bool store_conjun } ++num_disjunctive_landmarks; } else if (lm.conjunctive) { - if (store_conjunctive) { - for (const FactPair &lm_fact: lm.facts) { - auto it = conjunctive_landmarks_to_nodes.find(lm_fact); - if (it == conjunctive_landmarks_to_nodes.end()) { - conjunctive_landmarks_to_nodes.emplace(lm_fact, std::vector{new_node_p}); - } else { - (it->second).push_back(new_node_p); - } + for (const FactPair &lm_fact: lm.facts) { + auto it = conjunctive_landmarks_to_nodes.find(lm_fact); + if (it == conjunctive_landmarks_to_nodes.end()) { + conjunctive_landmarks_to_nodes.emplace(lm_fact, std::vector{new_node_p}); + } else { + (it->second).push_back(new_node_p); } } ++num_conjunctive_landmarks; @@ -141,7 +139,7 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark, bool store_conjun return *new_node_p; } -void LandmarkGraph::remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive) { +void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { for (const auto &parent : node->parents) { LandmarkNode &parent_node = *(parent.first); parent_node.children.erase(node); @@ -160,23 +158,20 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node, bool erase_conju } } else if (landmark.conjunctive) { --num_conjunctive_landmarks; - if (erase_conjunctive) { - for (const FactPair &lm_fact: landmark.facts) { - std::vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( - lm_fact)->second); - auto it = std::find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); - if (it != conjunctive_landmarks_vector->end()) { - conjunctive_landmarks_vector->erase(it); - } - } + for (const FactPair &lm_fact: landmark.facts) { + std::vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( + lm_fact)->second); + auto it = std::find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); + assert(it != conjunctive_landmarks_vector->end()); + conjunctive_landmarks_vector->erase(it); } } else { simple_landmarks_to_nodes.erase(landmark.facts[0]); } } -void LandmarkGraph::remove_node(LandmarkNode *node, bool erase_conjunctive) { - remove_node_occurrences(node, erase_conjunctive); +void LandmarkGraph::remove_node(LandmarkNode *node) { + remove_node_occurrences(node); auto it = find_if(nodes.begin(), nodes.end(), [&node](unique_ptr &n) { return n.get() == node; @@ -186,10 +181,10 @@ void LandmarkGraph::remove_node(LandmarkNode *node, bool erase_conjunctive) { } void LandmarkGraph::remove_node_if( - const function &remove_node_condition, bool erase_conjunctive) { + const function &remove_node_condition) { for (auto &node : nodes) { if (remove_node_condition(*node)) { - remove_node_occurrences(node.get(), erase_conjunctive); + remove_node_occurrences(node.get()); } } nodes.erase(remove_if(nodes.begin(), nodes.end(), @@ -205,6 +200,4 @@ void LandmarkGraph::set_landmark_ids() { ++id; } } - - } diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index d0f1308a59..870b119cff 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -81,7 +81,7 @@ class LandmarkGraph { utils::HashMap> conjunctive_landmarks_to_nodes; Nodes nodes; - void remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive); + void remove_node_occurrences(LandmarkNode *node); public: /* This is needed only by landmark graph factories and will disappear @@ -143,12 +143,12 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - LandmarkNode &add_landmark(Landmark &&landmark, bool store_conjunctive = false); + LandmarkNode &add_landmark(Landmark &&landmark); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - void remove_node(LandmarkNode *node, bool erase_conjunctive = false); + void remove_node(LandmarkNode *node); void remove_node_if( - const std::function &remove_node_condition, bool erase_conjunctive = false); + const std::function &remove_node_condition); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ From 4d2ad222a5b78c100efb9029b367152818c823b0 Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 13 Feb 2023 15:46:30 +0100 Subject: [PATCH 4/7] [issue1072] remove intermediate op-lists --- .../landmarks/landmark_count_heuristic.cc | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index c59f1cd33a..b93485865f 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -222,9 +222,6 @@ void LandmarkCountHeuristic::generate_preferred_operators( assert(successor_generator); vector applicable_operators; successor_generator->generate_applicable_ops(state, applicable_operators); - vector preferred_operators_conjunctive; - vector preferred_operators_simple; - vector preferred_operators_disjunctive; bool all_landmarks_reached = true; for (int i = 0; i < reached.size(); ++i) { @@ -244,35 +241,20 @@ void LandmarkCountHeuristic::generate_preferred_operators( LandmarkNode *lm_node = lgraph->get_node(fact_proxy.get_pair()); if (lm_node && landmark_is_interesting( state, reached, *lm_node, all_landmarks_reached)) { - if (lm_node->get_landmark().disjunctive) { - preferred_operators_disjunctive.push_back(op_id); - } else { - preferred_operators_simple.push_back(op_id); - } + set_preferred(op); } if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { std::vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); for (auto conj_lm : conjunctive_landmarks) { if (landmark_is_interesting( state, reached, *conj_lm, all_landmarks_reached)) { - preferred_operators_conjunctive.push_back(op_id); + set_preferred(op); break; } } } } } - - OperatorsProxy operators = task_proxy.get_operators(); - for (OperatorID op_id: preferred_operators_conjunctive) { - set_preferred(operators[op_id]); - } - for (OperatorID op_id : preferred_operators_simple) { - set_preferred(operators[op_id]); - } - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); - } } bool LandmarkCountHeuristic::landmark_is_interesting( From 4794d4951dc2a924a4f3fc100495ccfc2d30b17f Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 13 Feb 2023 16:05:15 +0100 Subject: [PATCH 5/7] [issue1072] remove experiment options --- src/search/landmarks/landmark_count_heuristic.h | 2 -- src/search/landmarks/landmark_factory_h_m.cc | 4 ++-- src/search/landmarks/landmark_graph.h | 8 ++++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/search/landmarks/landmark_count_heuristic.h b/src/search/landmarks/landmark_count_heuristic.h index 18b9a9cc22..2ae5aa33c3 100644 --- a/src/search/landmarks/landmark_count_heuristic.h +++ b/src/search/landmarks/landmark_count_heuristic.h @@ -18,8 +18,6 @@ class LandmarkStatusManager; class LandmarkCountHeuristic : public Heuristic { std::shared_ptr lgraph; const bool use_preferred_operators; - const bool use_preferred_operators_conjunctive; - const bool use_preferrence_hierarchy; const bool conditional_effects_supported; const bool admissible; const bool dead_ends_reliable; diff --git a/src/search/landmarks/landmark_factory_h_m.cc b/src/search/landmarks/landmark_factory_h_m.cc index e736e1376f..faa744c697 100644 --- a/src/search/landmarks/landmark_factory_h_m.cc +++ b/src/search/landmarks/landmark_factory_h_m.cc @@ -619,7 +619,7 @@ void LandmarkFactoryHM::discard_conjunctive_landmarks() { << " conjunctive landmarks" << endl; } lm_graph->remove_node_if( - [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}, conjunctive_landmarks); + [](const LandmarkNode &node) {return node.get_landmark().conjunctive;}); } } @@ -931,7 +931,7 @@ void LandmarkFactoryHM::add_lm_node(int set_index, bool goal) { landmark.first_achievers.insert( hm_entry.first_achievers.begin(), hm_entry.first_achievers.end()); - lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark), conjunctive_landmarks); + lm_node_table_[set_index] = &lm_graph->add_landmark(move(landmark)); } } diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index d0f1308a59..870b119cff 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -81,7 +81,7 @@ class LandmarkGraph { utils::HashMap> conjunctive_landmarks_to_nodes; Nodes nodes; - void remove_node_occurrences(LandmarkNode *node, bool erase_conjunctive); + void remove_node_occurrences(LandmarkNode *node); public: /* This is needed only by landmark graph factories and will disappear @@ -143,12 +143,12 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - LandmarkNode &add_landmark(Landmark &&landmark, bool store_conjunctive = false); + LandmarkNode &add_landmark(Landmark &&landmark); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ - void remove_node(LandmarkNode *node, bool erase_conjunctive = false); + void remove_node(LandmarkNode *node); void remove_node_if( - const std::function &remove_node_condition, bool erase_conjunctive = false); + const std::function &remove_node_condition); /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ From 46d5ecb4272040498c6a2650b7ed121a1bfcc7c0 Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 13 Feb 2023 16:26:01 +0100 Subject: [PATCH 6/7] add missing function --- src/search/landmarks/landmark_graph.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index a4f8f250fc..4809411cc3 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -57,6 +57,10 @@ LandmarkNode &LandmarkGraph::get_disjunctive_landmark(const FactPair &fact) cons return *(disjunctive_landmarks_to_nodes.find(fact)->second); } +const vector &LandmarkGraph::get_conjunctive_landmarks(const FactPair &fact) const { + assert(contains_conjunctive_landmark(fact)); + return conjunctive_landmarks_to_nodes.find(fact)->second; +} bool LandmarkGraph::contains_simple_landmark(const FactPair &lm) const { return simple_landmarks_to_nodes.count(lm) != 0; From 96f5f4abe12e72d7e736a28b70102c33cdc1387c Mon Sep 17 00:00:00 2001 From: Simon Dold Date: Mon, 13 Feb 2023 16:42:08 +0100 Subject: [PATCH 7/7] fix style --- src/search/landmarks/landmark_count_heuristic.cc | 2 +- src/search/landmarks/landmark_graph.cc | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index b93485865f..9ef42de154 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -244,7 +244,7 @@ void LandmarkCountHeuristic::generate_preferred_operators( set_preferred(op); } if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { - std::vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); + vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); for (auto conj_lm : conjunctive_landmarks) { if (landmark_is_interesting( state, reached, *conj_lm, all_landmarks_reached)) { diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 4809411cc3..9fbc94c7ad 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -127,7 +127,7 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { for (const FactPair &lm_fact: lm.facts) { auto it = conjunctive_landmarks_to_nodes.find(lm_fact); if (it == conjunctive_landmarks_to_nodes.end()) { - conjunctive_landmarks_to_nodes.emplace(lm_fact, std::vector{new_node_p}); + conjunctive_landmarks_to_nodes.emplace(lm_fact, vector{new_node_p}); } else { (it->second).push_back(new_node_p); } @@ -159,9 +159,9 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { } else if (landmark.conjunctive) { --num_conjunctive_landmarks; for (const FactPair &lm_fact: landmark.facts) { - std::vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( - lm_fact)->second); - auto it = std::find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); + vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( + lm_fact)->second); + auto it = find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); assert(it != conjunctive_landmarks_vector->end()); conjunctive_landmarks_vector->erase(it); }