diff --git a/experiments/issue1072/archive.py b/experiments/issue1072/archive.py new file mode 100644 index 0000000000..bd76273619 --- /dev/null +++ b/experiments/issue1072/archive.py @@ -0,0 +1,107 @@ +from pathlib import Path +import subprocess +import tarfile +from tempfile import TemporaryDirectory + +ARCHIVE_HOST = "aifiles" +ARCHIVE_LOCATION = Path("experiments") + +def add_archive_step(exp, path): + """ + Adds a step to the given experiment that will archive it to the + archive location specified in ARCHIVE_LOCATION und the given path. + We archive the following files: + - everything in the same directory as the main experiment script + (except for 'data', '.venv', and '__pycache__') + - all generated reports + - the combined properties file + - all run and error logs + - the source code stored in the experiment data directory + - any files added as resources to the experiment + + The first two items in the above list will be stored unpacked for easier + access while all otherdata will be packed. + """ + def archive(): + archive_path = ARCHIVE_LOCATION / path + _archive_script_dir(exp, ARCHIVE_HOST, archive_path) + _archive_data_dir(exp, ARCHIVE_HOST, archive_path) + _archive_eval_dir(exp, ARCHIVE_HOST, archive_path) + + exp.add_step("archive", archive) + + +def _archive_script_dir(exp, host, archive_path): + """ + Archives everything except 'data', '.venv', and '__pycache__' from the + same directory as the experiment script at host:archive_path/scripts. + """ + script_dir = Path(exp._script).parent + target_path = archive_path / "scripts" + + script_files = [f for f in script_dir.glob("*") + if f.name not in ["data", ".venv", "venv", "__pycache__"]] + _rsync(script_files, host, target_path) + + +def _archive_data_dir(exp, host, archive_path): + """ + Packs all files we want to archive from the experiment's data directory and + then archives the packed data at host:archive_path/data. Specifically, the + archived files are: + - all files directly in the data dir (added resources such as parsers) + - all directories starting with "code_" (source code of all revisions and + the compilied binaries) + - All *.log and *.err files from the run directories + """ + data_dir = Path(exp.path) + target_path = archive_path / "data" + + data_files = [f for f in data_dir.glob("*") if f.is_file()] + data_files.extend([d for d in data_dir.glob("code-*") if d.is_dir()]) + data_files.extend(data_dir.glob("runs*/*/*.log")) + data_files.extend(data_dir.glob("runs*/*/*.err")) + with TemporaryDirectory() as tmpdirname: + packed_filename = Path(tmpdirname) / (exp.name + ".tar.xz") + _pack(data_files, packed_filename, Path(exp.path).parent) + _rsync([packed_filename], host, target_path) + + +def _archive_eval_dir(exp, host, archive_path): + """ + Archives all files in the experiment's eval dir. + If there is a properties file, it will be packed and only the + packed version will be included in the resulting list. + """ + eval_dir = Path(exp.eval_dir) + target_path = archive_path / "data" / eval_dir.name + + filenames = list(eval_dir.glob("*")) + properties = eval_dir / "properties" + if properties.exists(): + filenames.remove(properties) + with TemporaryDirectory() as tmpdirname: + packed_properties = Path(tmpdirname) / "properties.tar.xz" + _pack([properties], packed_properties, eval_dir) + _rsync([packed_properties], host, target_path) + _rsync(filenames, host, target_path) + + +def _pack(filenames, archive_filename, path_prefix): + """ + Packs all files given in filenames into an archive (.tar.xz) located at + archive_filename. The path_prefix is removed in the archive, i.e., + if the filename is '/path/to/file' and the prefix is '/path', the location + inside the archive will be 'to/file'. + """ + with tarfile.open(archive_filename, "w|xz") as f: + for name in filenames: + f.add(name, name.relative_to(path_prefix)) + +def _rsync(filenames, host, target_path): + # Before copying files we have to create the target path on host. + # We could use the rsync option --mkpath but it is only available in newer + # rsync versions (and not in the one running on the grid) + # https://stackoverflow.com/questions/1636889 + subprocess.run(["ssh", host, "mkdir", "-p", target_path]) + subprocess.run(["rsync", "-avz"] + [str(f) for f in filenames] + [f"{host}:{target_path}"]) diff --git a/experiments/issue1072/common_setup.py b/experiments/issue1072/common_setup.py new file mode 100644 index 0000000000..d0b72fe313 --- /dev/null +++ b/experiments/issue1072/common_setup.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + +import archive + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + get_repo_base(), + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + + def add_comparison_table_step(self, **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + def make_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + self.add_step("make-comparison-tables", make_comparison_tables) + + def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + if attribute == "cost": + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + scale="log", + get_category=lambda run1, run2: run1["domain"]) + else: + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + for rev1, rev2 in itertools.combinations(self._revisions, 2): + for attribute in self.get_supported_attributes( + config.nick, attributes): + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, make_scatter_plots) + + def add_archive_step(self, archive_path): + archive.add_archive_step(self, archive_path) \ No newline at end of file diff --git a/experiments/issue1072/landmark_parser.py b/experiments/issue1072/landmark_parser.py new file mode 100755 index 0000000000..d32ec08819 --- /dev/null +++ b/experiments/issue1072/landmark_parser.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +import re + +from lab.parser import Parser + +parser = Parser() +parser.add_pattern( + "lmgraph_generation_time", + r"Landmark graph generation time: (.+)s", + type=float) +parser.add_pattern( + "landmarks", + r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_disjunctive", + r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", + type=int) +parser.add_pattern( + "landmarks_conjunctive", + r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", + type=int) +parser.add_pattern( + "orderings", + r"Landmark graph contains (\d+) orderings.", + type=int) + +parser.parse() diff --git a/experiments/issue1072/v1-anytime.py b/experiments/issue1072/v1-anytime.py new file mode 100755 index 0000000000..7cde0f3910 --- /dev/null +++ b/experiments/issue1072/v1-anytime.py @@ -0,0 +1,99 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig(f"lama-hm2-pref-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--if-unit-cost", + "--evaluator", + f"hlm=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff()", + "--search", """iterated([ + lazy_greedy([hff,hlm],preferred=[hff,hlm]), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), + lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) + ],repeat_last=true,continue_on_fail=true)""", + "--if-non-unit-cost", + "--evaluator", + f"hlm1=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff1=ff(transform=adapt_costs(one))", + "--evaluator", + f"hlm2=lmcount(lm_reasonable_orders_hps(lm_hm(m=2)),transform=adapt_costs(plusone),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", + "--search", """iterated([ + lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], + cost_type=one,reopen_closed=false), + lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], + reopen_closed=false), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), + lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) + ],repeat_last=true,continue_on_fail=true)""", + # Append --always to be on the safe side if we want to append + # additional options later. + "--always"]) + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/experiments/issue1072/v1-first.py b/experiments/issue1072/v1-first.py new file mode 100755 index 0000000000..d38adafdfd --- /dev/null +++ b/experiments/issue1072/v1-first.py @@ -0,0 +1,81 @@ +#! /usr/bin/env python3 + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1075" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = [ + "issue1072-v1", +] + +CONFIGS = [] + +for use_preferrence_hierarchy in ["true", "false"]: + for use_preferred_operators_conjunctive in ["true", "false"]: + CONFIGS += [ + IssueConfig( + f"lama-hm2-first-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" + f"lm_hm(m=2)),transform=adapt_costs(one),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--evaluator", "hff=ff(transform=adapt_costs(one))", + "--search", + "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one,reopen_closed=false)"]), + IssueConfig( + f"lm_hm2-hierarchy={use_preferrence_hierarchy}-conjunctive={use_preferred_operators_conjunctive}", + ["--evaluator", + f"hlm=lmcount(lm_factory=lm_hm(m=2),pref=true, use_preferrence_hierarchy={use_preferrence_hierarchy}, use_preferred_operators_conjunctive={use_preferred_operators_conjunctive})", + "--search", + "eager_greedy([hlm],preferred=[hlm])"]), + ] + + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="simon.dold@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"]) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=3) + +exp = IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser("landmark_parser.py") + +ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ + Attribute("landmarks", min_wins=False), + Attribute("landmarks_disjunctive", min_wins=False), + Attribute("landmarks_conjunctive", min_wins=False), + Attribute("orderings", min_wins=False), + Attribute("lmgraph_generation_time", min_wins=True), +] + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch', merge=True) + +exp.add_absolute_report_step(attributes=ATTRIBUTES) +exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) +exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps() diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index 75d4aacf77..9ef42de154 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -222,8 +222,6 @@ void LandmarkCountHeuristic::generate_preferred_operators( assert(successor_generator); vector applicable_operators; successor_generator->generate_applicable_ops(state, applicable_operators); - vector preferred_operators_simple; - vector preferred_operators_disjunctive; bool all_landmarks_reached = true; for (int i = 0; i < reached.size(); ++i) { @@ -243,25 +241,20 @@ void LandmarkCountHeuristic::generate_preferred_operators( LandmarkNode *lm_node = lgraph->get_node(fact_proxy.get_pair()); if (lm_node && landmark_is_interesting( state, reached, *lm_node, all_landmarks_reached)) { - if (lm_node->get_landmark().disjunctive) { - preferred_operators_disjunctive.push_back(op_id); - } else { - preferred_operators_simple.push_back(op_id); + set_preferred(op); + } + if (lgraph->contains_conjunctive_landmark(fact_proxy.get_pair())) { + vector conjunctive_landmarks = lgraph->get_conjunctive_landmarks(fact_proxy.get_pair()); + for (auto conj_lm : conjunctive_landmarks) { + if (landmark_is_interesting( + state, reached, *conj_lm, all_landmarks_reached)) { + set_preferred(op); + break; + } } } } } - - OperatorsProxy operators = task_proxy.get_operators(); - if (preferred_operators_simple.empty()) { - for (OperatorID op_id : preferred_operators_disjunctive) { - set_preferred(operators[op_id]); - } - } else { - for (OperatorID op_id : preferred_operators_simple) { - set_preferred(operators[op_id]); - } - } } bool LandmarkCountHeuristic::landmark_is_interesting( diff --git a/src/search/landmarks/landmark_graph.cc b/src/search/landmarks/landmark_graph.cc index 812147d6b9..9fbc94c7ad 100644 --- a/src/search/landmarks/landmark_graph.cc +++ b/src/search/landmarks/landmark_graph.cc @@ -57,6 +57,10 @@ LandmarkNode &LandmarkGraph::get_disjunctive_landmark(const FactPair &fact) cons return *(disjunctive_landmarks_to_nodes.find(fact)->second); } +const vector &LandmarkGraph::get_conjunctive_landmarks(const FactPair &fact) const { + assert(contains_conjunctive_landmark(fact)); + return conjunctive_landmarks_to_nodes.find(fact)->second; +} bool LandmarkGraph::contains_simple_landmark(const FactPair &lm) const { return simple_landmarks_to_nodes.count(lm) != 0; @@ -66,6 +70,10 @@ bool LandmarkGraph::contains_disjunctive_landmark(const FactPair &lm) const { return disjunctive_landmarks_to_nodes.count(lm) != 0; } +bool LandmarkGraph::contains_conjunctive_landmark(const FactPair &lm) const { + return conjunctive_landmarks_to_nodes.count(lm) != 0; +} + bool LandmarkGraph::contains_overlapping_disjunctive_landmark( const set &lm) const { // Test whether ONE of the facts is present in some disjunctive landmark. @@ -116,6 +124,14 @@ LandmarkNode &LandmarkGraph::add_landmark(Landmark &&landmark) { } ++num_disjunctive_landmarks; } else if (lm.conjunctive) { + for (const FactPair &lm_fact: lm.facts) { + auto it = conjunctive_landmarks_to_nodes.find(lm_fact); + if (it == conjunctive_landmarks_to_nodes.end()) { + conjunctive_landmarks_to_nodes.emplace(lm_fact, vector{new_node_p}); + } else { + (it->second).push_back(new_node_p); + } + } ++num_conjunctive_landmarks; } else { simple_landmarks_to_nodes.emplace(lm.facts.front(), new_node_p); @@ -142,6 +158,13 @@ void LandmarkGraph::remove_node_occurrences(LandmarkNode *node) { } } else if (landmark.conjunctive) { --num_conjunctive_landmarks; + for (const FactPair &lm_fact: landmark.facts) { + vector *conjunctive_landmarks_vector = &(conjunctive_landmarks_to_nodes.find( + lm_fact)->second); + auto it = find(conjunctive_landmarks_vector->begin(), conjunctive_landmarks_vector->end(), node); + assert(it != conjunctive_landmarks_vector->end()); + conjunctive_landmarks_vector->erase(it); + } } else { simple_landmarks_to_nodes.erase(landmark.facts[0]); } diff --git a/src/search/landmarks/landmark_graph.h b/src/search/landmarks/landmark_graph.h index 072522c638..870b119cff 100644 --- a/src/search/landmarks/landmark_graph.h +++ b/src/search/landmarks/landmark_graph.h @@ -78,6 +78,7 @@ class LandmarkGraph { utils::HashMap simple_landmarks_to_nodes; utils::HashMap disjunctive_landmarks_to_nodes; + utils::HashMap> conjunctive_landmarks_to_nodes; Nodes nodes; void remove_node_occurrences(LandmarkNode *node); @@ -119,6 +120,7 @@ class LandmarkGraph { /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. */ LandmarkNode &get_disjunctive_landmark(const FactPair &fact) const; + const std::vector &get_conjunctive_landmarks(const FactPair &fact) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by @@ -126,6 +128,7 @@ class LandmarkGraph { bool contains_simple_landmark(const FactPair &lm) const; /* Only used internally. */ bool contains_disjunctive_landmark(const FactPair &lm) const; + bool contains_conjunctive_landmark(const FactPair &lm) const; /* This is needed only by landmark graph factories and will disappear when moving landmark graph creation there. It is not needed by HMLandmarkFactory*/