diff --git a/.github/workflows/lha_bot.yml b/.github/workflows/lha_bot.yml new file mode 100644 index 000000000..d70c4a948 --- /dev/null +++ b/.github/workflows/lha_bot.yml @@ -0,0 +1,44 @@ +# A single CI script with github workflow. +name: LHA Benchmarks + +on: + push: + branches-ignore: + - "*" + tags: + pull_request: + types: + - closed + - ready_for_review + - review_requested + workflow_dispatch: + +jobs: + lhabench: + name: LHA paper Benchmarks + runs-on: ubuntu-latest + container: + image: ghcr.io/nnpdf/bench-evol:v2 + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + steps: + - uses: actions/checkout@v2 + with: + # tags needed for dynamic versioning + fetch-depth: 0 + - name: Install and configure Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: false + installer-parallel: true + - name: Install project + run: | + poetry install --no-interaction --with test -E mark -E box + - name: Install task runner + run: pip install poethepoet + - name: Run benchmark + run: | + poe lha -m "nnlo and sv" + poe lha -m "ffns_pol and sv" diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py index 821908689..e5826c946 100644 --- a/benchmarks/lha_paper_bench.py +++ b/benchmarks/lha_paper_bench.py @@ -1,9 +1,12 @@ """ Benchmark to :cite:`Giele:2002hx` (LO + NLO) and :cite:`Dittmar:2005ed` (NNLO). """ +import argparse +import os from math import nan import numpy as np +import pytest from banana import register from eko.interpolation import lambertgrid @@ -36,7 +39,7 @@ # ffns_skip_pdfs.extend([-5, 5, "T24"]) -class LHABenchmark(Runner): +class LHA(Runner): """Globally set the external program to LHA.""" def __init__(self): @@ -121,16 +124,52 @@ def run_lha(self, theory_updates): ["ToyLH"], ) - def benchmark_plain(self, pto): + def run_plain(self, pto): """Run plain configuration.""" self.run_lha(self.plain_theory(pto)) - def benchmark_sv(self, pto): + def run_sv(self, pto): """Run scale variations.""" self.run_lha(self.sv_theories(pto)) -class BenchmarkVFNS(LHABenchmark): +class BaseBenchmark: + """Abstract common benchmark tasks.""" + + def runner(self) -> LHA: + """Runner to run.""" + raise NotImplementedError("runner method has to be overwritten!") + + def transformed_runner(self): + """Prepare runner for benchmark setup""" + r = self.runner() + r.log_to_stdout = os.environ.get("EKO_LOG_STDOUT", False) + return r + + @pytest.mark.lo + def benchmark_plain_lo(self): + self.transformed_runner().run_plain(0) + + @pytest.mark.nlo + def benchmark_plain_nlo(self): + self.transformed_runner().run_plain(1) + + @pytest.mark.nnlo + def benchmark_plain_nnlo(self): + self.transformed_runner().run_plain(2) + + @pytest.mark.nlo + @pytest.mark.sv + def benchmark_sv_nlo(self): + self.transformed_runner().run_sv(1) + + @pytest.mark.nnlo + @pytest.mark.sv + def benchmark_sv_nnlo(self): + self.transformed_runner().run_sv(2) + + +class VFNS(LHA): """Provide |VFNS| settings.""" def __init__(self): @@ -148,7 +187,13 @@ def __init__(self): ) -class BenchmarkFFNS(LHABenchmark): +@pytest.mark.vfns +class BenchmarkVFNS(BaseBenchmark): + def runner(self): + return VFNS() + + +class FFNS(LHA): """Provide |FFNS| settings.""" def __init__(self): @@ -188,7 +233,51 @@ def skip_pdfs(theory): return ffns_skip_pdfs -class BenchmarkRunner(BenchmarkVFNS): +@pytest.mark.ffns +class BenchmarkFFNS(BaseBenchmark): + def runner(self): + return FFNS() + + +class FFNS_polarized(FFNS): + def run_lha(self, theory_updates): + """Enforce operator grid and PDF. + + Parameters + ---------- + theory_updates : list(dict) + theory updates + """ + self.run( + theory_updates, + [ + { + "Q2grid": [1e4], + "ev_op_iterations": 10, + "interpolation_xgrid": lambertgrid(60).tolist(), + "polarized": True, + } + ], + ["ToyLH_polarized"], + ) + + +@pytest.mark.ffns_pol +class BenchmarkFFNS_polarized(BaseBenchmark): + def runner(self): + return FFNS_polarized() + + @pytest.mark.nnlo + def benchmark_plain_nnlo(self): + pass + + @pytest.mark.nnlo + @pytest.mark.sv + def benchmark_sv_nnlo(self): + pass + + +class CommonRunner(VFNS): """Generic benchmark runner using the LHA |VFNS| settings.""" def __init__(self, external): @@ -218,42 +307,3 @@ def benchmark_sv(self, pto): high["XIR"] = np.sqrt(0.5) self.run_lha([low, high]) - - -class BenchmarkFFNS_polarized(BenchmarkFFNS): - def run_lha(self, theory_updates): - """Enforce operator grid and PDF. - - Parameters - ---------- - theory_updates : list(dict) - theory updates - """ - self.run( - theory_updates, - [ - { - "mugrid": [100], - "ev_op_iterations": 10, - "interpolation_xgrid": lambertgrid(60).tolist(), - "polarized": True, - } - ], - ["ToyLH_polarized"], - ) - - -if __name__ == "__main__": - # Benchmark to LHA - # obj = BenchmarkFFNS_polarized() - # obj = BenchmarkFFNS() - obj = BenchmarkVFNS() - # obj.benchmark_plain(1) - obj.benchmark_sv(2) - - # # VFNS benchmarks with LHA settings - # programs = ["LHA", "pegasus", "apfel"] - # for p in programs: - # obj = BenchmarkRunner(p) - # # obj.benchmark_plain(2) - # obj.benchmark_sv(2) diff --git a/pyproject.toml b/pyproject.toml index a385d9f79..043e32d4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,7 +115,8 @@ bench-run.env.NUMBA_DISABLE_JIT.default = "0" lint = "pylint src/**/*.py -E" lint-warnings = "pylint src/**/*.py --exit-zero" sandbox = "python benchmarks/sandbox.py" -lha = "python benchmarks/lha_paper_bench.py" +lha.cmd = "pytest benchmarks/lha_paper_bench.py -s" +lha.env.NUMBA_DISABLE_JIT.default = "0" nav = "ekonav --config benchmarks/banana.yaml" navigator = "ekonav --config benchmarks/banana.yaml" docs = { "shell" = "cd doc; make html" } @@ -145,7 +146,16 @@ addopts = [ '--strict-markers', ] env = ["D:NUMBA_DISABLE_JIT=1"] -markers = ["isolated: marks benchmarks as isolated"] +markers = [ + "isolated: marks benchmarks as isolated", + "ffns: Fixed flavor configuration", + "ffns_pol: Polarized fixed flavor configuration", + "vfns: Variable flavor configuration", + "lo: Leading order", + "nlo: Next-to-leading order", + "nnlo: Next-to-next-to-leading order", + "sv: Scale variations", +] [tool.pylint.master] # extensions not to check diff --git a/src/ekomark/benchmark/runner.py b/src/ekomark/benchmark/runner.py index 8292f8460..5a59f80cf 100644 --- a/src/ekomark/benchmark/runner.py +++ b/src/ekomark/benchmark/runner.py @@ -27,6 +27,7 @@ class Runner(BenchmarkRunner): rotate_to_evolution_basis = False sandbox = False plot_operator = False + log_to_stdout = True def __init__(self): self.banana_cfg = banana_cfg.cfg @@ -74,12 +75,13 @@ def run_me(self, theory, ocard, _pdf): DGLAP result """ # activate logging - logStdout = logging.StreamHandler(sys.stdout) - logStdout.setLevel(logging.INFO) - logStdout.setFormatter(logging.Formatter("%(message)s")) - logging.getLogger("eko").handlers = [] - logging.getLogger("eko").addHandler(logStdout) - logging.getLogger("eko").setLevel(logging.INFO) + if self.log_to_stdout: + logStdout = logging.StreamHandler(sys.stdout) + logStdout.setLevel(logging.INFO) + logStdout.setFormatter(logging.Formatter("%(message)s")) + logging.getLogger("eko").handlers = [] + logging.getLogger("eko").addHandler(logStdout) + logging.getLogger("eko").setLevel(logging.INFO) ops_id = f"o{ocard['hash'][:6]}_t{theory['hash'][:6]}" root = banana_cfg.cfg["paths"]["database"].parents[0]