diff --git a/.gitignore b/.gitignore index e376a263e..23745b641 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ docs/src_examples/*/*.png # Tests .pytest_cache tests/.ipynb_checkpoints +.coverage* # IDE-specific .idea diff --git a/CHANGELOG.md b/CHANGELOG.md index 89bd0c698..d9fe6909d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,22 @@ - Documented uses of `Model` class in general contexts (previously only cosmo) - `Model` methods to compute log-probabilities and derived parameters now have an `as_dict` keyword (default `False`), for more informative return value. +- Added ``--minimize`` flag to ``cobaya-run`` for quick minimization (replaces sampler, uses previous output). ### Cosmological likelihoods and theory codes +- `Pk_interpolator`: added extrapolation up to `extrap_kmin` and improved robustness + #### CAMB - Removed problematic `zrei: zre` alias (fixes #199, thanks @pcampeti) +- Added `Omega_b|cdm|nu_massive(z)` and `angular_diameter_distance_2` +- Returned values for `get_sigma_R` changed from `R, z, sigma(z, R)` to `z, R, sigma(z, R)`. + +#### CLASS + +- Updated to v3.1.2 +- Added `Omega_b|cdm|nu_massive(z)`, `angular_diameter_distance_2`, `sigmaR(z)`, `sigma8(z)`, `fsgima8(z)` and Weyl potential power spectrum. #### BAO diff --git a/cobaya/cosmo_input/input_database.py b/cobaya/cosmo_input/input_database.py index b2216c75c..56e452f82 100644 --- a/cobaya/cosmo_input/input_database.py +++ b/cobaya/cosmo_input/input_database.py @@ -399,7 +399,7 @@ # EXPERIMENTS ############################################################################ base_precision: InfoDict = {"camb": {"halofit_version": "mead"}, - "classy": {"non linear": "hmcode", "hmcode_min_k_max": 20}} + "classy": {"non linear": "hmcode", "nonlinear_min_k_max": 20}} cmb_precision = deepcopy(base_precision) cmb_precision["camb"].update({"bbn_predictor": "PArthENoPE_880.2_standard.dat", "lens_potential_accuracy": 1}) diff --git a/cobaya/input.py b/cobaya/input.py index 2dc76cbf6..9e4c18af5 100644 --- a/cobaya/input.py +++ b/cobaya/input.py @@ -168,10 +168,10 @@ def get_info_path(folder, prefix, infix=None, kind="updated", ext=Extension.yaml def get_used_components(*infos, return_infos=False): """ - Returns all requested components as an dict ``{kind: set([components])}``. + Returns all requested components as a dict ``{kind: set([components])}``. Priors are not included. - If ``return_infos=True`` (default: ``False``), returns too a dictionary of inputs per + If ``return_infos=True`` (default: ``False``), also returns a dictionary of inputs per component, updated in the order in which the info arguments are given. Components which are just renames of others (i.e. defined with `class_name`) return @@ -640,7 +640,7 @@ def get_class_path(cls) -> str: def get_file_base_name(cls) -> str: """ Gets the string used as the name for .yaml, .bib files, typically the - class name or a un-CamelCased class name + class name or an un-CamelCased class name """ return cls.__dict__.get('file_base_name') or cls.__name__ diff --git a/cobaya/likelihoods/base_classes/des.py b/cobaya/likelihoods/base_classes/des.py index 040cee2df..30676fecd 100644 --- a/cobaya/likelihoods/base_classes/des.py +++ b/cobaya/likelihoods/base_classes/des.py @@ -567,10 +567,10 @@ def chi_squared(self, theory, return_theory_vector=False): def logp(self, **params_values): PKdelta = self.provider.get_Pk_interpolator(("delta_tot", "delta_tot"), - extrap_kmax=500 * self.acc) + extrap_kmax=3000 * self.acc) if self.use_Weyl: PKWeyl = self.provider.get_Pk_interpolator(("Weyl", "Weyl"), - extrap_kmax=500 * self.acc) + extrap_kmax=3000 * self.acc) else: PKWeyl = None diff --git a/cobaya/likelihoods/base_classes/sn.py b/cobaya/likelihoods/base_classes/sn.py index a5e126728..fa75646b7 100644 --- a/cobaya/likelihoods/base_classes/sn.py +++ b/cobaya/likelihoods/base_classes/sn.py @@ -19,13 +19,13 @@ .. note:: - If you use ``sn.pantheon``, please cite:|br| - Scolnic, D. M. et al, + Scolnic, D. M. et al., `The Complete Light-curve Sample of Spectroscopically Confirmed Type Ia Supernovae from Pan-STARRS1 and Cosmological Constraints from The Combined Pantheon Sample` `(arXiv:1710.00845) `_ - If you use ``sn.jla`` or ``sn.jla_lite``, please cite:|br| - Betoule, M. et al, + Betoule, M. et al., `Improved cosmological constraints from a joint analysis of the SDSS-II and SNLS supernova samples` `(arXiv:1401.4064) `_ diff --git a/cobaya/log.py b/cobaya/log.py index bde3b660b..9f12b6f5e 100644 --- a/cobaya/log.py +++ b/cobaya/log.py @@ -136,7 +136,7 @@ def logger_setup(debug=None, debug_file=None): """ Configuring the root logger, for its children to inherit level, format and handlers. - Level: if debug=True, take DEBUG. If numerical, use "logging"'s corresponding level. + Level: if debug=True, take DEBUG. If numerical, use ""logging""'s corresponding level. Default: INFO """ if debug is True or os.getenv('COBAYA_DEBUG'): diff --git a/cobaya/model.py b/cobaya/model.py index 0161126ea..1453ab71b 100644 --- a/cobaya/model.py +++ b/cobaya/model.py @@ -586,7 +586,7 @@ def get_valid_point(self, max_tries: int, ignore_fixed_ref: bool = False, ) -> Union[Tuple[np.ndarray, LogPosterior], Tuple[np.ndarray, dict]]: """ - Finds a point with finite posterior, sampled from from the reference pdf. + Finds a point with finite posterior, sampled from the reference pdf. It will fail if no valid point is found after `max_tries`. diff --git a/cobaya/mpi.py b/cobaya/mpi.py index 8602ce0f7..62cdc186c 100644 --- a/cobaya/mpi.py +++ b/cobaya/mpi.py @@ -173,7 +173,7 @@ def allgather(data) -> list: def zip_gather(list_of_data, root=0) -> Iterable[tuple]: """ - Takes a list of items and returns a iterable of lists of items from each process + Takes a list of items and returns an iterable of lists of items from each process e.g. for root node [(a_1, a_2),(b_1,b_2),...] = zip_gather([a,b,...]) """ diff --git a/cobaya/output.py b/cobaya/output.py index 37c582ffc..69edf4b76 100644 --- a/cobaya/output.py +++ b/cobaya/output.py @@ -369,7 +369,7 @@ def check_and_dump_info(self, input_info, updated_info, check_compatible=True, "%s:%s, but you are trying to resume a " "run that used a newer version: %r.", new_version, k, c, old_version) - # If resuming, we don't want to to *partial* dumps + # If resuming, we don't want to do *partial* dumps if ignore_blocks and self.is_resuming(): return # Work on a copy of the input info, since we are updating the prefix diff --git a/cobaya/parameterization.py b/cobaya/parameterization.py index 61caab939..3caaef9d8 100644 --- a/cobaya/parameterization.py +++ b/cobaya/parameterization.py @@ -44,7 +44,7 @@ def is_derived_param(info_param: ParamInput) -> bool: def expand_info_param(info_param: ParamInput, default_derived=True) -> ParamDict: """ - Expands the info of a parameter, from the user friendly, shorter format + Expands the info of a parameter, from the user-friendly, shorter format to a more unambiguous one. """ info_param = deepcopy_where_possible(info_param) diff --git a/cobaya/prior.py b/cobaya/prior.py index 097248ce9..a15fb18f8 100644 --- a/cobaya/prior.py +++ b/cobaya/prior.py @@ -50,7 +50,7 @@ as they are understood for each particular pdf in :class:`scipy.stats`; e.g. for a ``uniform`` pdf, ``loc`` is the lower bound and ``scale`` is the length of the domain, whereas in a gaussian (``norm``) ``loc`` is the mean and ``scale`` is the standard - deviation). + deviation. + Additional specific parameters of the distribution, e.g. ``a`` and ``b`` as the powers of a Beta pdf. @@ -112,8 +112,8 @@ custom priors "`external` priors". Inside the ``prior`` block, list a pair of priors as ``[name]: [function]``, where the -functions must return **log**-priors. This priors will be multiplied by the -one-dimensional ones defined above. Even if you define an prior for some parameters +functions must return **log**-priors. These priors will be multiplied by the +one-dimensional ones defined above. Even if you define a prior for some parameters in the ``prior`` block, you still have to specify their bounds in the ``params`` block. A prior function can be specified in two different ways: @@ -144,7 +144,7 @@ External priors can only be functions **sampled** and **fixed** and **derived** parameters that are dynamically defined in terms of other inputs. - Derived parameters computed by the theory code cannot be used in in a prior, since + Derived parameters computed by the theory code cannot be used in a prior, since otherwise the full prior could not be computed **before** the likelihood, preventing us from avoiding computing the likelihood when the prior is null, or forcing a *post-call* to the prior. @@ -183,7 +183,7 @@ Defining parameters dynamically ------------------------------- -We may want to sample in a parameter space different than the one understood by the +We may want to sample in a parameter space different from the one understood by the likelihood, e.g. because we expect the posterior to be simpler on the alternative parameters. diff --git a/cobaya/run.py b/cobaya/run.py index cd0f9e76b..a26fd3cb5 100644 --- a/cobaya/run.py +++ b/cobaya/run.py @@ -36,6 +36,7 @@ def run(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], debug: Union[bool, int, None] = None, stop_at_error: Optional[bool] = None, resume: bool = False, force: bool = False, + minimize: Optional[bool] = None, no_mpi: bool = False, test: bool = False, override: Optional[InputDict] = None, ) -> Union[InfoSamplerTuple, PostTuple]: @@ -50,6 +51,7 @@ def run(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], :param stop_at_error: stop if an error is raised :param resume: continue an existing run :param force: overwrite existing output if it exists + :param minimize: if true, ignores the sampler and runs default minimizer :param no_mpi: run without MPI :param test: only test initialization rather than actually running :param override: option dictionary to merge into the input one, overriding settings @@ -59,7 +61,7 @@ def run(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], """ # This function reproduces the model-->output-->sampler pipeline one would follow - # when instantiating by hand, but alters the order to performs checks and dump info + # when instantiating by hand, but alters the order to perform checks and dump info # as early as possible, e.g. to check if resuming possible or `force` needed. if no_mpi or test: mpi.set_mpi_disabled() @@ -77,6 +79,9 @@ def run(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], info["resume"] = bool(resume) info["force"] = bool(force) if info.get("post"): + if minimize: + raise ValueError( + "``minimize`` option is incompatible with post-processing.") if isinstance(output, str) or output is False: info["post"]["output"] = output or None return post(info) @@ -94,6 +99,11 @@ def run(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], # GetDist needs to know the original sampler, so don't overwrite if minimizer try: which_sampler = list(info["sampler"])[0] + if minimize: + # Preserve options if "minimize" was already the sampler + if which_sampler.lower() != "minimize": + info["sampler"] = {"minimize": None} + which_sampler = "minimize" except (KeyError, TypeError): raise LoggedError( logger_run, "You need to specify a sampler using the 'sampler' key " @@ -192,6 +202,9 @@ def run_script(args=None): "(use with care!)") parser.add_argument("--%s" % "test", action="store_true", help="Initialize model and sampler, and exit.") + parser.add_argument("--minimize", action="store_true", + help=("Replaces the sampler in the input and runs a minimization " + "process (incompatible with post-processing).")) parser.add_argument("--version", action="version", version=get_version()) parser.add_argument("--no-mpi", action='store_true', help="disable MPI when mpi4py installed but MPI does " diff --git a/cobaya/samplers/mcmc/proposal.py b/cobaya/samplers/mcmc/proposal.py index 61742919f..8124df8bc 100644 --- a/cobaya/samplers/mcmc/proposal.py +++ b/cobaya/samplers/mcmc/proposal.py @@ -255,7 +255,7 @@ def set_covariance(self, propose_matrix): """ Take covariance of sampled parameters (propose_matrix), and construct orthonormal parameters where orthonormal parameters are grouped in blocks by speed, so changes - in slowest block changes slow and fast parameters, but changes in the fastest + in the slowest block changes slow and fast parameters, but changes in the fastest block only changes fast parameters :param propose_matrix: covariance matrix for the sampled parameters. diff --git a/cobaya/samplers/minimize/minimize.py b/cobaya/samplers/minimize/minimize.py index ffce91754..84fa66a6d 100644 --- a/cobaya/samplers/minimize/minimize.py +++ b/cobaya/samplers/minimize/minimize.py @@ -6,12 +6,10 @@ This is a **maximizer** for posteriors or likelihoods, based on `scipy.optimize.Minimize `_ -and `Py-BOBYQA `_ -(added in 2.0). +and `Py-BOBYQA `_. -.. note:: - - BOBYQA tends to work better on Cosmological problems with the default settings. +The default is BOBYQA, which tends to work better on Cosmological problems with default +settings. .. |br| raw:: html @@ -36,13 +34,23 @@ **If you use scipy**, you can find `the appropriate references here `_. -It works more effectively when run on top of a Monte Carlo sample: just change the sampler -for ``Minimize`` with the desired options, and it will use as a starting point the -*maximum a posteriori* (MAP) or best fit (maximum likelihood, or minimal :math:`\chi^2`) -found so far, as well as the covariance matrix of the sample for rescaling of the -parameter jumps. +It works more effectively when run on top of a Monte Carlo sample: it will use the maximum +a posteriori as a starting point (or the best fit, depending on whether the prior is +ignored, :ref:`see below `), and the recovered covariance matrix of the +posterior to rescale the variables. + +To take advantage of a previous run with a Monte Carlo sampler, either: + +- change the ``sampler`` to ``minimize`` in the input file, + +- or, if running from the shell, repeat the ``cobaya-run`` command used for the original + run, adding the ``--minimize`` flag. -As text output, it produces two different files: +When called from a Python script, Cobaya's ``run`` function returns the updated info +and the products described below in the method +:func:`samplers.minimize.Minimize.products` (see below). + +If text output is requested, it produces two different files: - ``[output prefix].minimum.txt``, in :ref:`the same format as Cobaya samples `, @@ -50,10 +58,6 @@ - ``[output prefix].minimum``, the equivalent **GetDist-formatted** file. -If ``ignore_prior: True``, those files are named ``.bestfit[.txt]`` instead of ``minimum``, -and contain the best-fit (maximum of the likelihood) instead of the MAP -(maximum of the posterior). - .. warning:: For historical reasons, in the first two lines of the GetDist-formatted output file @@ -62,10 +66,6 @@ :math:`-2` times the sum of the individual :math:`\chi^2` (``chi2__``, with double underscore) in the table that follows these first lines. -When called from a Python script, Cobaya's ``run`` function returns the updated info -and the products described below in the method -:func:`products `. - It is recommended to run a couple of parallel MPI processes: it will finally pick the best among the results. @@ -77,6 +77,18 @@ want to refine the convergence parameters (see ``override`` options in the ``yaml`` below). + +.. _minimize_like: + +Maximizing the likelihood instead of the posterior +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To maximize the likelihood, add ``ignore_prior: True`` in the ``minimize`` input block. + +When producing text output, the generated files are named ``.bestfit[.txt]`` instead of +``minimum``, and contain the best-fit (maximum of the likelihood) instead of the MAP +(maximum of the posterior). + """ # Global diff --git a/cobaya/samplers/polychord/polychord.py b/cobaya/samplers/polychord/polychord.py index 442cfd19b..4b06c603f 100644 --- a/cobaya/samplers/polychord/polychord.py +++ b/cobaya/samplers/polychord/polychord.py @@ -52,6 +52,8 @@ class polychord(Sampler): oversample_power: float nlive: NumberWithUnits path: str + logzero: float + max_ndead: int def initialize(self): """Imports the PolyChord sampler and prepares its arguments.""" diff --git a/cobaya/theories/camb/camb.py b/cobaya/theories/camb/camb.py index 7fe2b8372..2b91cf06f 100644 --- a/cobaya/theories/camb/camb.py +++ b/cobaya/theories/camb/camb.py @@ -118,7 +118,7 @@ * [**Recommended for staying up-to-date**] To install CAMB locally and keep it up-to-date, clone the - `CAMB repository in Github `_ + `CAMB repository in GitHub `_ in some folder of your choice, say ``/path/to/theories/CAMB``: .. code:: bash @@ -138,7 +138,7 @@ $ python -m pip install -e /path/to/CAMB * [**Recommended for modifying CAMB**] - First, `fork the CAMB repository in Github `_ + First, `fork the CAMB repository in GitHub `_ (follow `these instructions `_) and then follow the same steps as above, substituting the second one with: @@ -147,8 +147,7 @@ $ git clone --recursive https://[YourGithubUser]@github.com/[YourGithubUser]/CAMB.git * To use your own version, assuming it's placed under ``/path/to/theories/CAMB``, - just make sure it is compiled (and that the version on top of which you based your - modifications is old enough to have the Python interface implemented. + just make sure it is compiled. In the cases above, you **must** specify the path to your CAMB installation in the input block for CAMB (otherwise a system-wide CAMB may be used instead): @@ -161,7 +160,7 @@ .. note:: - In any of these methods, if you intent to switch between different versions or + In any of these methods, if you intend to switch between different versions or modifications of CAMB you should not install CAMB as python package using ``python setup.py install``, as the official instructions suggest. """ @@ -180,9 +179,9 @@ from cobaya.log import LoggedError, get_logger from cobaya.install import download_github_release, check_gcc_version, NotInstalledError from cobaya.tools import getfullargspec, get_class_methods, get_properties, load_module, \ - VersionCheckError, str_to_list + VersionCheckError, str_to_list, Pool1D, Pool2D, PoolND from cobaya.theory import HelperTheory -from cobaya.typing import InfoDict +from cobaya.typing import InfoDict, empty_dict # Result collector @@ -190,6 +189,8 @@ class Collector(NamedTuple): method: Callable args: list = [] kwargs: dict = {} + z_pool: Optional[PoolND] = None + post: Optional[Callable] = None class CAMBOutputs(NamedTuple): @@ -202,6 +203,7 @@ class CAMB(BoltzmannBase): r""" CAMB cosmological Boltzmann code \cite{Lewis:1999bs,Howlett:2012mh}. """ + # Name of the Class repo/folder and version to download _camb_repo_name = "cmbant/CAMB" _camb_repo_version = os.environ.get("CAMB_REPO_VERSION", "master") @@ -343,24 +345,30 @@ def must_provide(self, **requirements): method=CAMBdata.get_cmb_power_spectra, kwargs={"spectra": ["unlensed_total"], "raw_cl": False}) elif k == "Hubble": - self.collectors[k] = Collector( - method=CAMBdata.h_of_z, - kwargs={"z": self._combine_z(k, v)}) + self.set_collector_with_z_pool(k, v["z"], CAMBdata.h_of_z) + elif k in ["Omega_b", "Omega_cdm", "Omega_nu_massive"]: + varnames = { + "Omega_b": "baryon", "Omega_cdm": "cdm", "Omega_nu_massive": "nu"} + self.set_collector_with_z_pool( + k, v["z"], CAMBdata.get_Omega, kwargs={"var": varnames[k]}) elif k in ("angular_diameter_distance", "comoving_radial_distance"): - self.collectors[k] = Collector( - method=getattr(CAMBdata, k), - kwargs={"z": self._combine_z(k, v)}) + self.set_collector_with_z_pool(k, v["z"], getattr(CAMBdata, k)) + elif k == "angular_diameter_distance_2": + self.set_collector_with_z_pool( + k, v["z_pairs"], CAMBdata.angular_diameter_distance2, d=2) elif k == "sigma8_z": self.add_to_redshifts(v["z"]) self.collectors[k] = Collector( method=CAMBdata.get_sigma8, - kwargs={}) + kwargs={}, + post=(lambda *x: x[::-1])) # returned in inverse order self.needs_perts = True elif k == "fsigma8": self.add_to_redshifts(v["z"]) self.collectors[k] = Collector( method=CAMBdata.get_fsigma8, - kwargs={}) + kwargs={}, + post=(lambda* x: x[::-1])) # returned in inverse order self.needs_perts = True elif isinstance(k, tuple) and k[0] == "sigma_R": kwargs = v.copy() @@ -386,8 +394,9 @@ def get_sigmaR(results, **tmp): "in computed P_K array %s", z) _indices = np.array(z_indices, dtype=np.int32) self._sigmaR_z_indices[var_pair] = _indices - return results.get_sigmaR(hubble_units=False, return_R_z=True, - z_indices=_indices, **tmp) + R, z, sigma = results.get_sigmaR(hubble_units=False, return_R_z=True, + z_indices=_indices, **tmp) + return z, R, sigma kwargs.update(dict(zip(["var1", "var2"], var_pair))) self.collectors[k] = Collector(method=get_sigmaR, kwargs=kwargs) @@ -470,16 +479,33 @@ def get_sigmaR(results, **tmp): return must_provide def add_to_redshifts(self, z): - self.extra_args["redshifts"] = np.sort(np.unique(np.concatenate( - (np.atleast_1d(z), self.extra_args.get("redshifts", [])))))[::-1] - - def _combine_z(self, k, v): - c = self.collectors.get(k, None) - if c: - return np.sort( - np.unique(np.concatenate((c.kwargs['z'], np.atleast_1d(v['z']))))) + """ + Adds redshifts to the list of them for which CAMB computes perturbations. + """ + if not hasattr(self, "z_pool_for_perturbations"): + self.z_pool_for_perturbations = Pool1D(z) + else: + self.z_pool_for_perturbations.update(z) + self.extra_args["redshifts"] = np.flip(self.z_pool_for_perturbations.values) + + def set_collector_with_z_pool(self, k, zs, method, args=(), kwargs=empty_dict, d=1): + """ + Creates a collector for a z-dependent quantity, keeping track of the pool of z's. + """ + if k in self.collectors: + z_pool = self.collectors[k].z_pool + z_pool.update(zs) else: - return np.sort(np.atleast_1d(v['z'])) + Pool = {1: Pool1D, 2: Pool2D}[d] + z_pool = Pool(zs) + if d == 1: + kwargs_with_z = {"z": z_pool.values} + else: + kwargs_with_z = {"z1": np.array(z_pool.values[:, 0]), + "z2": np.array(z_pool.values[:, 1])} + kwargs_with_z.update(kwargs) + self.collectors[k] = Collector( + method=method, z_pool=z_pool, kwargs=kwargs_with_z, args=args) def calculate(self, state, want_derived=True, **params_values_dict): try: @@ -522,6 +548,8 @@ def calculate(self, state, want_derived=True, **params_values_dict): if collector: state[product] = \ collector.method(results, *collector.args, **collector.kwargs) + if collector.post: + state[product] = collector.post(*state[product]) else: state[product] = results except self.camb.baseconfig.CAMBError as e: @@ -630,20 +658,11 @@ def get_unlensed_Cl(self, ell_factor=False, units="FIRASmuK2"): return self._get_Cl(ell_factor=ell_factor, units=units, lensed=False) def _get_z_dependent(self, quantity, z): + # Partially reimplemented because of sigma8_z, etc, use different pool + pool = None if quantity in ["sigma8_z", "fsigma8"]: - computed_redshifts = self.extra_args["redshifts"] - i_kwarg_z = np.concatenate( - [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)]) - else: - computed_redshifts = self.collectors[quantity].kwargs["z"] - i_kwarg_z = np.searchsorted(computed_redshifts, np.atleast_1d(z)) - return np.array(self.current_state[quantity], copy=True)[i_kwarg_z] - - def get_sigma8_z(self, z): - return self._get_z_dependent("sigma8_z", z) - - def get_fsigma8(self, z): - return self._get_z_dependent("fsigma8", z) + pool = self.z_pool_for_perturbations + return super()._get_z_dependent(quantity, z, pool=pool) def get_source_Cl(self): # get C_l^XX from the cosmological code @@ -717,9 +736,9 @@ def set(self, params_values_dict, state): for fixed_param in getfullargspec( getattr(self.camb.CAMBparams, non_param_func)).args[1:]: if fixed_param in args: - raise LoggedError(self.log, - "Trying to sample fixed theory parameter %s", - fixed_param) + raise LoggedError( + self.log, "Trying to sample fixed theory parameter %s", + fixed_param) self._reduced_extra_args.pop(fixed_param, None) if self.extra_attrs: self.log.debug("Setting attributes of CAMBparams: %r", diff --git a/cobaya/theories/classy/classy.py b/cobaya/theories/classy/classy.py index 890054cd0..7e90489ad 100644 --- a/cobaya/theories/classy/classy.py +++ b/cobaya/theories/classy/classy.py @@ -144,16 +144,21 @@ from cobaya.log import LoggedError, get_logger from cobaya.install import download_github_release, pip_install, NotInstalledError, \ check_gcc_version -from cobaya.tools import load_module, VersionCheckError +from cobaya.tools import load_module, VersionCheckError, Pool1D, Pool2D, PoolND, \ + combine_1d +from cobaya.typing import empty_dict # Result collector +# NB: cannot use kwargs for the args, because the CLASS Python interface +# is C-based, so args without default values are not named. class Collector(NamedTuple): method: str args: Sequence = [] args_names: Sequence = [] kwargs: dict = {} arg_array: Union[int, Sequence, None] = None + z_pool: Optional[PoolND] = None post: Optional[Callable] = None @@ -165,9 +170,10 @@ class classy(BoltzmannBase): r""" CLASS cosmological Boltzmann code \cite{Blas:2011rf}. """ + # Name of the Class repo/folder and version to download _classy_repo_name = "lesgourg/class_public" - _min_classy_version = "v2.9.3" + _min_classy_version = "v3.1.2" _classy_min_gcc_version = "6.4" # Lower ones are possible atm, but leak memory! _classy_repo_version = os.environ.get('CLASSY_REPO_VERSION', _min_classy_version) @@ -226,22 +232,24 @@ def must_provide(self, **requirements): self.collectors[k] = Collector( method="raw_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]}) elif k == "Hubble": - self.collectors[k] = Collector( - method="Hubble", - args=[np.atleast_1d(v["z"])], - args_names=["z"], - arg_array=0) + self.set_collector_with_z_pool( + k, v["z"], "Hubble", args_names=["z"], arg_array=0) + elif k in ["Omega_b", "Omega_cdm", "Omega_nu_massive"]: + func_name = {"Omega_b": "Om_b", "Omega_cdm": "Om_cdm", + "Omega_nu_massive": "Om_ncdm"}[k] + self.set_collector_with_z_pool( + k, v["z"], func_name, args_names=["z"], arg_array=0) elif k == "angular_diameter_distance": - self.collectors[k] = Collector( - method="angular_distance", - args=[np.atleast_1d(v["z"])], - args_names=["z"], - arg_array=0) + self.set_collector_with_z_pool( + k, v["z"], "angular_distance", args_names=["z"], arg_array=0) elif k == "comoving_radial_distance": - self.collectors[k] = Collector( - method="z_of_r", - args_names=["z"], - args=[np.atleast_1d(v["z"])]) + self.set_collector_with_z_pool(k, v["z"], "z_of_r", args_names=["z"], + # returns r and dzdr! + post=(lambda r, dzdr: r)) + elif k == "angular_diameter_distance_2": + self.set_collector_with_z_pool( + k, v["z_pairs"], "angular_distance_from_to", + args_names=["z1", "z2"], arg_array=[0, 1], d=2) elif isinstance(k, tuple) and k[0] == "Pk_grid": self.extra_args["output"] += " mPk" v = deepcopy(v) @@ -251,23 +259,53 @@ def must_provide(self, **requirements): # (default: 0.1). But let's leave it like this in case this changes # in the future. self.add_z_for_matter_power(v.pop("z")) - if v["nonlinear"] and "non linear" not in self.extra_args: self.extra_args["non linear"] = non_linear_default_code pair = k[2:] if pair == ("delta_tot", "delta_tot"): v["only_clustering_species"] = False + self.collectors[k] = Collector( + method="get_pk_and_k_and_z", + kwargs=v, + post=(lambda P, kk, z: (kk, z, np.array(P).T))) elif pair == ("delta_nonu", "delta_nonu"): v["only_clustering_species"] = True + self.collectors[k] = Collector( + method="get_pk_and_k_and_z", kwargs=v, + post=(lambda P, kk, z: (kk, z, np.array(P).T))) + elif pair == ("Weyl", "Weyl"): + self.extra_args["output"] += " mTk" + self.collectors[k] = Collector( + method="get_Weyl_pk_and_k_and_z", kwargs=v, + post=(lambda P, kk, z: (kk, z, np.array(P).T))) else: raise LoggedError(self.log, "NotImplemented in CLASS: %r", pair) - self.collectors[k] = Collector( - method="get_pk_and_k_and_z", - kwargs=v, - post=(lambda P, kk, z: (kk, z, np.array(P).T))) + elif k == "sigma8_z": + self.add_z_for_matter_power(v["z"]) + self.set_collector_with_z_pool( + k, v["z"], "sigma", args=[8], args_names=["R", "z"], + kwargs={"h_units": True}, arg_array=1) + elif k == "fsigma8": + self.add_z_for_matter_power(v["z"]) + z_step = 0.1 # left to CLASS default; increasing does not appear to help + self.set_collector_with_z_pool( + k, v["z"], "effective_f_sigma8", args=[z_step], + args_names=["z", "z_step"], arg_array=0) elif isinstance(k, tuple) and k[0] == "sigma_R": - raise LoggedError( - self.log, "Classy sigma_R not implemented as yet - use CAMB only") + self.extra_args["output"] += " mPk" + self.add_P_k_max(v.pop("k_max"), units="1/Mpc") + # NB: See note about redshifts in Pk_grid + self.add_z_for_matter_power(v["z"]) + pair = k[1:] + try: + method = {("delta_tot", "delta_tot"): "sigma", + ("delta_nonu", "delta_nonu"): "sigma_cb"}[pair] + except KeyError: + raise LoggedError(self.log, f"sigma(R,z) not implemented for {pair}") + self.collectors[k] = Collector( + method=method, kwargs={"h_units": False}, args=[v["R"], v["z"]], + args_names=["R", "z"], arg_array=[[0], [1]], + post=(lambda R, z, sigma: (z, R, sigma.T))) elif v is None: k_translated = self.translate_param(k) if k_translated not in self.derived_extra: @@ -275,7 +313,7 @@ def must_provide(self, **requirements): else: raise LoggedError(self.log, "Requested product not known: %r", {k: v}) # Derived parameters (if some need some additional computations) - if any(("sigma8" in s) for s in self.output_params or requirements): + if any(("sigma8" in s) for s in set(self.output_params).union(requirements)): self.extra_args["output"] += " mPk" self.add_P_k_max(1, units="1/Mpc") # Adding tensor modes if requested @@ -298,14 +336,52 @@ def must_provide(self, **requirements): def add_z_for_matter_power(self, z): if getattr(self, "z_for_matter_power", None) is None: self.z_for_matter_power = np.empty(0) - self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate( - [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0) + self.z_for_matter_power = np.flip(combine_1d(z, self.z_for_matter_power)) self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power]) + def set_collector_with_z_pool(self, k, zs, method, args=(), args_names=(), + kwargs=empty_dict, arg_array=None, post=None, d=1): + """ + Creates a collector for a z-dependent quantity, keeping track of the pool of z's. + + If ``z`` is an arg, i.e. it is in ``args_names``, then omit it in the ``args``, + e.g. ``args_names=["a", "z", "b"]`` should be passed together with + ``args=[a_value, b_value]``. + """ + if k in self.collectors: + z_pool = self.collectors[k].z_pool + z_pool.update(zs) + else: + Pool = {1: Pool1D, 2: Pool2D}[d] + z_pool = Pool(zs) + # Insert z as arg or kwarg + if d == 1 and "z" in kwargs: + kwargs = deepcopy(kwargs) + kwargs["z"] = z_pool.values + elif d == 1 and "z" in args_names: + args = deepcopy(args) + i_z = args_names.index("z") + args = list(args[:i_z]) + [z_pool.values] + list(args[i_z:]) + elif d == 2 and "z1" in args_names and "z2" in args_names: + # z1 assumed appearing before z2! + args = deepcopy(args) + i_z1 = args_names.index("z1") + i_z2 = args_names.index("z2") + args = (list(args[:i_z1]) + [z_pool.values[:, 0]] + list(args[i_z1:i_z2]) + + [z_pool.values[:, 1]] + list(args[i_z2:])) + else: + raise LoggedError( + self.log, + f"I do not know how to insert the redshift for collector method {method} " + f"of requisite {k}") + self.collectors[k] = Collector( + method=method, z_pool=z_pool, args=args, args_names=args_names, kwargs=kwargs, + arg_array=arg_array, post=post) + def add_P_k_max(self, k_max, units): r""" Unifies treatment of :math:`k_\mathrm{max}` for matter power spectrum: - ``P_k_max_[1|h]/Mpc]``. + ``P_k_max_[1|h]/Mpc``. Make ``units="1/Mpc"|"h/Mpc"``. """ @@ -366,17 +442,44 @@ def calculate(self, state, want_derived=True, **params_values_dict): self.collectors["sigma8"].args[0] = 8 / self.classy.h() method = getattr(self.classy, collector.method) arg_array = self.collectors[product].arg_array + if isinstance(arg_array, int): + arg_array = np.atleast_1d(arg_array) if arg_array is None: state[product] = method( *self.collectors[product].args, **self.collectors[product].kwargs) - elif isinstance(arg_array, int): - state[product] = np.zeros( - len(self.collectors[product].args[arg_array])) - for i, v in enumerate(self.collectors[product].args[arg_array]): - args = (list(self.collectors[product].args[:arg_array]) + [v] + - list(self.collectors[product].args[arg_array + 1:])) - state[product][i] = method( - *args, **self.collectors[product].kwargs) + elif isinstance(arg_array, Sequence) or isinstance(arg_array, np.ndarray): + arg_array = np.array(arg_array) + if len(arg_array.shape) == 1: + # if more than one vectorised arg, assume all vectorised in parallel + n_values = len(self.collectors[product].args[arg_array[0]]) + state[product] = np.zeros(n_values) + args = deepcopy(list(self.collectors[product].args)) + for i in range(n_values): + for arg_arr_index in arg_array: + args[arg_arr_index] = \ + self.collectors[product].args[arg_arr_index][i] + state[product][i] = method( + *args, **self.collectors[product].kwargs) + elif len(arg_array.shape) == 2: + if len(arg_array) > 2: + raise NotImplementedError("Only 2 array expanded vars so far.") + # Create outer combinations + x_and_y = np.array(np.meshgrid( + self.collectors[product].args[arg_array[0, 0]], + self.collectors[product].args[arg_array[1, 0]])).T + args = deepcopy(list(self.collectors[product].args)) + result = np.empty(shape=x_and_y.shape[:2]) + for i, row in enumerate(x_and_y): + for j, column_element in enumerate(x_and_y[i]): + args[arg_array[0, 0]] = column_element[0] + args[arg_array[1, 0]] = column_element[1] + result[i, j] = method( + *args, **self.collectors[product].kwargs) + state[product] = ( + self.collectors[product].args[arg_array[0, 0]], + self.collectors[product].args[arg_array[1, 0]], result) + else: + raise ValueError("arg_array not correctly formatted.") elif arg_array in self.collectors[product].kwargs: value = np.atleast_1d(self.collectors[product].kwargs[arg_array]) state[product] = np.zeros(value.shape) @@ -385,6 +488,9 @@ def calculate(self, state, want_derived=True, **params_values_dict): kwargs[arg_array] = v state[product][i] = method( *self.collectors[product].args, **kwargs) + else: + raise LoggedError(self.log, "Variable over which to do an array call " + f"not known: arg_array={arg_array}") if collector.post: state[product] = collector.post(*state[product]) # Prepare derived parameters @@ -440,8 +546,8 @@ def _get_Cl(self, ell_factor=False, units="FIRASmuK2", lensed=True): raise LoggedError(self.log, "No %s Cl's were computed. Are you sure that you " "have requested them?", which_error) # unit conversion and ell_factor - ells_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[ - 2:] if ell_factor else 1 + ells_factor = \ + ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[2:] if ell_factor else 1 units_factor = self._cmb_unit_factor( units, self.current_state['derived_extra']['T_cmb']) for cl in cls: @@ -457,21 +563,6 @@ def get_Cl(self, ell_factor=False, units="FIRASmuK2"): def get_unlensed_Cl(self, ell_factor=False, units="FIRASmuK2"): return self._get_Cl(ell_factor=ell_factor, units=units, lensed=False) - def _get_z_dependent(self, quantity, z): - try: - z_name = next(k for k in ["redshifts", "z"] - if k in self.collectors[quantity].kwargs) - computed_redshifts = self.collectors[quantity].kwargs[z_name] - except StopIteration: - computed_redshifts = self.collectors[quantity].args[ - self.collectors[quantity].args_names.index("z")] - i_kwarg_z = np.concatenate( - [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)]) - values = np.array(deepcopy(self.current_state[quantity])) - if quantity == "comoving_radial_distance": - values = values[0] - return values[i_kwarg_z] - def close(self): self.classy.empty() diff --git a/cobaya/theories/cosmo/boltzmannbase.py b/cobaya/theories/cosmo/boltzmannbase.py index cf83ae190..b0bbbc7c0 100644 --- a/cobaya/theories/cosmo/boltzmannbase.py +++ b/cobaya/theories/cosmo/boltzmannbase.py @@ -8,12 +8,12 @@ """ import numpy as np from scipy.interpolate import RectBivariateSpline -from typing import Mapping, Iterable, Callable +from typing import Mapping, Iterable # Local from cobaya.theory import Theory -from cobaya.tools import deepcopy_where_possible -from cobaya.log import LoggedError, abstract +from cobaya.tools import deepcopy_where_possible, combine_1d, combine_2d, check_2d +from cobaya.log import LoggedError, abstract, get_logger from cobaya.conventions import Const from cobaya.typing import empty_dict, InfoDict @@ -22,7 +22,6 @@ class BoltzmannBase(Theory): _is_abstract = True - _get_z_dependent: Callable # defined by inheriting classes renames: Mapping[str, str] = empty_dict extra_args: InfoDict _must_provide: dict @@ -95,32 +94,53 @@ def must_provide(self, **requirements): ``source_name: {"function": "spline"|"gaussian", [source_args]``; for now, ``[source_args]`` follows the notation of `CAMBSources `_. - If can also take ``"lmax": [int]``, ``"limber": True`` if Limber approximation + It can also take ``"lmax": [int]``, ``"limber": True`` if Limber approximation desired, and ``"non_linear": True`` if non-linear contributions requested. Get with :func:`~BoltzmannBase.get_source_Cl`. - ``Pk_interpolator={...}``: Matter power spectrum interpolator in :math:`(z, k)`. Takes ``"z": [list_of_evaluated_redshifts]``, ``"k_max": [k_max]``, - ``"extrap_kmax": [max_k_max_extrapolated]``, ``"nonlinear": [True|False]``, + ``"nonlinear": [True|False]``, ``"vars_pairs": [["delta_tot", "delta_tot"], ["Weyl", "Weyl"], [...]]}``. + Notice that ``k_min`` cannot be specified. To reach a lower one, use + ``extra_args`` in CAMB to increase ``accuracyboost`` and ``TimeStepBoost``, or + in CLASS to decrease ``k_min_tau0``. The method :func:`~get_Pk_interpolator` to + retrieve the interpolator admits extrapolation limits ``extrap_[kmax|kmin]``. + It is recommended to use ``extrap_kmin`` to reach the desired ``k_min``, and + increase precision parameters only as much as necessary to improve over the + interpolation. All :math:`k` values should be in units of + :math:`1/\mathrm{Mpc}`. + Non-linear contributions are included by default. Note that the non-linear setting determines whether non-linear corrections are calculated; the :func:`~get_Pk_interpolator` method also has a non-linear argument to specify if you want the linear or non-linear spectrum returned (to have both linear and non-linear spectra available request a tuple ``(False,True)`` for the non-linear - argument). All :math:`k` values should be in units of :math:`1/\mathrm{Mpc}`. + argument). - ``Pk_grid={...}``: similar to ``Pk_interpolator`` except that rather than returning a bicubic spline object it returns the raw power spectrum grid as a ``(k, z, P(z,k))`` set of arrays. Get with :func:`~BoltzmannBase.get_Pk_grid`. - ``sigma_R={...}``: RMS linear fluctuation in spheres of radius :math:`R` at redshifts :math:`z`. Takes ``"z": [list_of_evaluated_redshifts]``, - ``"k_max": [k_max]``, ``"vars_pairs": [["delta_tot", "delta_tot"], [...]]}``, + ``"k_max": [k_max]``, ``"vars_pairs": [["delta_tot", "delta_tot"], [...]]``, ``"R": [list_of_evaluated_R]``. Note that :math:`R` is in :math:`\mathrm{Mpc}`, not :math:`h^{-1}\,\mathrm{Mpc}`. Get with :func:`~BoltzmannBase.get_sigma_R`. - - ``Hubble={'z': [z_1, ...]}``: Hubble rate at the requested redshifts. + - ``Hubble={'z': [z_1, ...]}``: Hubble rates at the requested redshifts. Get it with :func:`~BoltzmannBase.get_Hubble`. + - ``Omega_b={'z': [z_1, ...]}``: Baryon density parameter at the requested + redshifts. Get it with :func:`~BoltzmannBase.get_Omega_b`. + - ``Omega_cdm={'z': [z_1, ...]}``: Cold dark matter density parameter at the + requested redshifts. Get it with :func:`~BoltzmannBase.get_Omega_cdm`. + - ``Omega_nu_massive={'z': [z_1, ...]}``: Massive neutrinos' density parameter at + the requested redshifts. Get it with + :func:`~BoltzmannBase.get_Omega_nu_massive`. - ``angular_diameter_distance={'z': [z_1, ...]}``: Physical angular diameter distance to the redshifts requested. Get it with :func:`~BoltzmannBase.get_angular_diameter_distance`. + - ``angular_diameter_distance_2={'z_pairs': [(z_1a, z_1b), (z_2a, z_2b)...]}``: + Physical angular diameter distance between the pairs of redshifts requested. + If a 1d array of redshifts is passed as `z_pairs`, all possible combinations + of two are computed and stored (not recommended if only a subset is needed). + Get it with :func:`~BoltzmannBase.get_angular_diameter_distance_2`. - ``comoving_radial_distance={'z': [z_1, ...]}``: Comoving radial distance from us to the redshifts requested. Get it with :func:`~BoltzmannBase.get_comoving_radial_distance`. @@ -130,7 +150,6 @@ def must_provide(self, **requirements): - ``fsigma8={'z': [z_1, ...]}``: Structure growth rate :math:`f\sigma_8` at the redshifts requested. Get it with :func:`~BoltzmannBase.get_fsigma8`. - - ``k_max=[...]``: Fixes the maximum comoving wavenumber considered. - **Other derived parameters** that are not included in the input but whose value the likelihood may need. @@ -158,10 +177,8 @@ def must_provide(self, **requirements): k = ("sigma_R",) + pair current = self._must_provide.get(k, {}) self._must_provide[k] = { - "R": np.sort(np.unique(np.concatenate( - (current.get("R", []), np.atleast_1d(v["R"]))))), - "z": np.unique(np.concatenate( - (current.get("z", []), np.atleast_1d(v["z"])))), + "R": combine_1d(v["R"], current.get("R")), + "z": combine_1d(v["z"], current.get("z")), "k_max": max(current.get("k_max", 0), v.get("k_max", 2 / np.min(v["R"])))} elif k in ("Pk_interpolator", "Pk_grid"): @@ -178,8 +195,7 @@ def must_provide(self, **requirements): current = self._must_provide.get(k, {}) self._must_provide[k] = dict( nonlinear=nonlinear, - z=np.unique(np.concatenate((current.get("z", []), - np.atleast_1d(redshifts)))), + z=combine_1d(redshifts, current.get("z")), k_max=max(current.get("k_max", 0), k_max), **v) elif k == "source_Cl": if k not in self._must_provide: @@ -192,19 +208,33 @@ def must_provide(self, **requirements): # for source, window in v["sources"].items(): # if source in (getattr(self, "sources", {}) or {}): # # TODO: improve this test!!! - # # (e.g. 2 z-vectors that fulfill np.allclose would fail a == test) + # # (2 z-vectors that fulfill np.allclose would fail a == test) # if window != self.sources[source]: # raise LoggedError( # self.log, - # "Source %r requested twice with different specification: " - # "%r vs %r.", window, self.sources[source]) + # "Source %r requested twice with different specification" + # ": %r vs %r.", source, window, self.sources[source]) self._must_provide[k].update(v) - elif k in ["Hubble", "angular_diameter_distance", - "comoving_radial_distance", "sigma8_z", "fsigma8"]: + elif k in ["Hubble", "Omega_b", "Omega_cdm", "Omega_nu_massive", + "angular_diameter_distance", "comoving_radial_distance", + "sigma8_z", "fsigma8"]: + if k not in self._must_provide: + self._must_provide[k] = {} + self._must_provide[k]["z"] = combine_1d( + v["z"], self._must_provide[k].get("z")) + elif k == "angular_diameter_distance_2": if k not in self._must_provide: self._must_provide[k] = {} - self._must_provide[k]["z"] = np.unique(np.concatenate( - (self._must_provide[k].get("z", []), v["z"]))) + zs = v.pop("z_pairs", None) + # Combine pairs with previous ones and uniquify + try: + self._must_provide[k]["z_pairs"] = combine_2d( + zs, self._must_provide[k].get("z_pairs")) + except ValueError: + raise LoggedError( + self.log, "For requisite `angular_diameter_distance2`, `z_pairs` " + "must be a list of pairs (z1, z2), but is " + f"{v['z_pairs']}") # Extra derived parameters and other unknown stuff (keep capitalization) elif v is None: self._must_provide[k] = None @@ -230,6 +260,41 @@ def check_no_repeated_input_extra(self): "as extra arguments: %s. Please, remove one of the definitions " "of each.", common) + def _get_z_dependent(self, quantity, z, pool=None): + if pool is None: + pool = self.collectors[quantity].z_pool + try: + i_kwarg_z = pool.find_indices(z) + except ValueError: + raise LoggedError(self.log, f"{quantity} not computed for all z requested. " + f"Requested z are {z}, but computed ones are " + f"{pool.values}.") + return np.array(self.current_state[quantity], copy=True)[i_kwarg_z] + + def _get_z_pair_dependent(self, quantity, z_pairs, inv_value=0): + """ + ``inv_value`` (default=0) is assigned to pairs for which ``z1 > z2``. + """ + try: + check_2d(z_pairs, allow_1d=False) + except ValueError: + raise LoggedError(self.log, f"z_pairs={z_pairs} not correctly formatted for " + f"{quantity}. It should be a list of pairs.") + # Only recover for correctly sorted pairs + z_pairs_arr = np.array(z_pairs) + i_right = (z_pairs_arr[:, 0] <= z_pairs_arr[:, 1]) + pool = self.collectors[quantity].z_pool + try: + i_z_pair = pool.find_indices(z_pairs_arr[i_right]) + except ValueError: + raise LoggedError( + self.log, f"{quantity} not computed for all z pairs requested. " + f"Requested z are {z_pairs}, but computed ones are " + f"{pool.values}.") + result = np.full(len(z_pairs), inv_value, dtype=float) + result[i_right] = np.array(self.current_state[quantity], copy=True)[i_z_pair] + return result + def _cmb_unit_factor(self, units, T_cmb): units_factors = {"1": 1, "muK2": T_cmb * 1.e6, @@ -288,6 +353,33 @@ def get_Hubble(self, z, units="km/s/Mpc"): self.log, "Units not known for H: '%s'. Try instead one of %r.", units, list(H_units_conv_factor)) + def get_Omega_b(self, z): + r""" + Returns the Baryon density parameter at the given redshift(s) ``z``. + + The redshifts must be a subset of those requested when + :func:`~BoltzmannBase.must_provide` was called. + """ + return self._get_z_dependent("Omega_b", z) + + def get_Omega_cdm(self, z): + r""" + Returns the Cold Dark Matter density parameter at the given redshift(s) ``z``. + + The redshifts must be a subset of those requested when + :func:`~BoltzmannBase.must_provide` was called. + """ + return self._get_z_dependent("Omega_cdm", z) + + def get_Omega_nu_massive(self, z): + r""" + Returns the Massive neutrinos' density parameter at the given redshift(s) ``z``. + + The redshifts must be a subset of those requested when + :func:`~BoltzmannBase.must_provide` was called. + """ + return self._get_z_dependent("Omega_nu_massive", z) + def get_angular_diameter_distance(self, z): r""" Returns the physical angular diameter distance in :math:`\mathrm{Mpc}` to the @@ -298,6 +390,19 @@ def get_angular_diameter_distance(self, z): """ return self._get_z_dependent("angular_diameter_distance", z) + def get_angular_diameter_distance_2(self, z_pairs): + r""" + Returns the physical angular diameter distance between pairs of redshifts + `z_pairs` in :math:`\mathrm{Mpc}`. + + The redshift pairs must be a subset of those requested when + :func:`~BoltzmannBase.must_provide` was called. + + Return zero for pairs in which ``z1 > z2``. + """ + return self._get_z_pair_dependent( + "angular_diameter_distance_2", z_pairs, inv_value=0) + def get_comoving_radial_distance(self, z): r""" Returns the comoving radial distance in :math:`\mathrm{Mpc}` to the given @@ -309,7 +414,7 @@ def get_comoving_radial_distance(self, z): return self._get_z_dependent("comoving_radial_distance", z) def get_Pk_interpolator(self, var_pair=("delta_tot", "delta_tot"), nonlinear=True, - extrap_kmax=None): + extrap_kmin=None, extrap_kmax=None): r""" Get a :math:`P(z,k)` bicubic interpolation object (:class:`PowerSpectrumInterpolator`). @@ -320,12 +425,15 @@ def get_Pk_interpolator(self, var_pair=("delta_tot", "delta_tot"), nonlinear=Tru :param var_pair: variable pair for power spectrum :param nonlinear: non-linear spectrum (default True) + :param extrap_kmin: use log linear extrapolation from ``extrap_kmin`` up to min + :math:`k`. :param extrap_kmax: use log linear extrapolation beyond max :math:`k` computed up to ``extrap_kmax``. :return: :class:`PowerSpectrumInterpolator` instance. """ nonlinear = bool(nonlinear) - key = ("Pk_interpolator", nonlinear, extrap_kmax) + tuple(sorted(var_pair)) + key = (("Pk_interpolator", nonlinear, extrap_kmin, extrap_kmax) + + tuple(sorted(var_pair))) if key in self.current_state: return self.current_state[key] k, z, pk = self.get_Pk_grid(var_pair=var_pair, nonlinear=nonlinear) @@ -336,14 +444,17 @@ def get_Pk_interpolator(self, var_pair=("delta_tot", "delta_tot"), nonlinear=Tru sign = -1 else: log_p = False + extrapolating = ((extrap_kmax and extrap_kmax > k[-1]) or + (extrap_kmin and extrap_kmin < k[0])) if log_p: pk = np.log(sign * pk) - elif extrap_kmax > k[-1]: + elif extrapolating: raise LoggedError(self.log, 'Cannot do log extrapolation with zero-crossing pk ' 'for %s, %s' % var_pair) - result = PowerSpectrumInterpolator(z, k, pk, logP=log_p, logsign=sign, - extrap_kmax=extrap_kmax) + result = PowerSpectrumInterpolator( + z, k, pk, logP=log_p, logsign=sign, + extrap_kmin=extrap_kmin, extrap_kmax=extrap_kmax) self.current_state[key] = result return result @@ -375,7 +486,7 @@ def get_Pk_grid(self, var_pair=("delta_tot", "delta_tot"), nonlinear=True): def get_sigma_R(self, var_pair=("delta_tot", "delta_tot")): r""" - Get :math:`\sigma_R(z)`, the RMS power in an sphere of radius :math:`R` at + Get :math:`\sigma_R(z)`, the RMS power in a sphere of radius :math:`R` at redshift :math:`z`. Note that :math:`R` is in :math:`\mathrm{Mpc}`, not :math:`h^{-1}\,\mathrm{Mpc}`, @@ -385,7 +496,7 @@ def get_sigma_R(self, var_pair=("delta_tot", "delta_tot")): :math:`R` and :math:`z` should be in the returned arrays. :param var_pair: which two fields to use for the RMS power - :return: ``R``, ``z``, ``sigma_R``, where ``R`` and ``z`` are arrays of computed + :return: ``z``, ``R``, ``sigma_R``, where ``z`` and ``R`` are arrays of computed values, and ``sigma_R[i,j]`` is the value :math:`\sigma_R(z)` for ``z[i]``, ``R[j]``. """ @@ -402,7 +513,6 @@ def get_source_Cl(self): multipoles. """ - @abstract def get_sigma8_z(self, z): r""" Present day linear theory root-mean-square amplitude of the matter @@ -411,8 +521,8 @@ def get_sigma8_z(self, z): The redshifts must be a subset of those requested when :func:`~BoltzmannBase.must_provide` was called. """ + return self._get_z_dependent("sigma8_z", z) - @abstract def get_fsigma8(self, z): r""" Structure growth rate :math:`f\sigma_8`, as defined in eq. 33 of @@ -423,6 +533,7 @@ def get_fsigma8(self, z): The redshifts must be a subset of those requested when :func:`~BoltzmannBase.must_provide` was called. """ + return self._get_z_dependent("fsigma8", z) def get_auto_covmat(self, params_info, likes_info, random_state=None): r""" @@ -454,7 +565,8 @@ class PowerSpectrumInterpolator(RectBivariateSpline): extrap_kmax; useful for tails of integrals. """ - def __init__(self, z, k, P_or_logP, extrap_kmax=None, logP=False, logsign=1): + def __init__(self, z, k, P_or_logP, extrap_kmin=None, extrap_kmax=None, logP=False, + logsign=1): self.islog = logP # Check order z, k = (np.atleast_1d(x) for x in [z, k]) @@ -462,19 +574,34 @@ def __init__(self, z, k, P_or_logP, extrap_kmax=None, logP=False, logsign=1): raise ValueError('Require at least four redshifts for Pk interpolation.' 'Consider using Pk_grid if you just need a a small number' 'of specific redshifts (doing 1D splines in k yourself).') + z, k, P_or_logP = np.array(z), np.array(k), np.array(P_or_logP) i_z = np.argsort(z) i_k = np.argsort(k) self.logsign = logsign self.z, self.k, P_or_logP = z[i_z], k[i_k], P_or_logP[i_z, :][:, i_k] self.zmin, self.zmax = self.z[0], self.z[-1] - self.kmin, self.kmax = self.k[0], self.k[-1] + self.extrap_kmin, self.extrap_kmax = extrap_kmin, extrap_kmax logk = np.log(self.k) + # Start from extrap_kmin using a (log,log)-linear extrapolation + if extrap_kmin and extrap_kmin < self.input_kmin: + if not logP: + raise ValueError('extrap_kmin must use logP') + logk = np.hstack( + [np.log(extrap_kmin), + np.log(self.input_kmin) * 0.1 + np.log(extrap_kmin) * 0.9, logk]) + logPnew = np.empty((P_or_logP.shape[0], P_or_logP.shape[1] + 2)) + logPnew[:, 2:] = P_or_logP + diff = (logPnew[:, 3] - logPnew[:, 2]) / (logk[3] - logk[2]) + delta = diff * (logk[2] - logk[0]) + logPnew[:, 0] = logPnew[:, 2] - delta + logPnew[:, 1] = logPnew[:, 2] - delta * 0.9 + P_or_logP = logPnew # Continue until extrap_kmax using a (log,log)-linear extrapolation - if extrap_kmax and extrap_kmax > self.kmax: + if extrap_kmax and extrap_kmax > self.input_kmax: if not logP: raise ValueError('extrap_kmax must use logP') logk = np.hstack( - [logk, np.log(self.kmax) * 0.1 + np.log(extrap_kmax) * 0.9, + [logk, np.log(self.input_kmax) * 0.1 + np.log(extrap_kmax) * 0.9, np.log(extrap_kmax)]) logPnew = np.empty((P_or_logP.shape[0], P_or_logP.shape[1] + 2)) logPnew[:, :-2] = P_or_logP @@ -482,31 +609,85 @@ def __init__(self, z, k, P_or_logP, extrap_kmax=None, logP=False, logsign=1): delta = diff * (logk[-1] - logk[-3]) logPnew[:, -1] = logPnew[:, -3] + delta logPnew[:, -2] = logPnew[:, -3] + delta * 0.9 - self.kmax = extrap_kmax # Added for consistency with CAMB - P_or_logP = logPnew - super().__init__(self.z, logk, P_or_logP) + @property + def input_kmin(self): + """Minimum k for the interpolation (not incl. extrapolation range).""" + return self.k[0] + + @property + def input_kmax(self): + """Maximum k for the interpolation (not incl. extrapolation range).""" + return self.k[-1] + + @property + def kmin(self): + """Minimum k of the interpolator (incl. extrapolation range).""" + if self.extrap_kmin is None: + return self.input_kmin + return self.extrap_kmin + + @property + def kmax(self): + """Maximum k of the interpolator (incl. extrapolation range).""" + if self.extrap_kmax is None: + return self.input_kmax + return self.extrap_kmax + + def check_ranges(self, z, k): + """Checks that we are not trying to extrapolate beyond the interpolator limits.""" + z = np.atleast_1d(z).flatten() + min_z, max_z = min(z), max(z) + if min_z < self.zmin and not np.allclose(min_z, self.zmin): + raise LoggedError(get_logger(self.__class__.__name__), + f"Not possible to extrapolate to z={min(z)} " + f"(minimum z computed is {self.zmin}).") + if max_z > self.zmax and not np.allclose(max_z, self.zmax): + raise LoggedError(get_logger(self.__class__.__name__), + f"Not possible to extrapolate to z={max(z)} " + f"(maximum z computed is {self.zmax}).") + k = np.atleast_1d(k).flatten() + min_k, max_k = min(k), max(k) + if min_k < self.kmin and not np.allclose(min_k, self.kmin): + raise LoggedError(get_logger(self.__class__.__name__), + f"Not possible to extrapolate to k={min(k)} 1/Mpc " + f"(minimum k possible is {self.kmin} 1/Mpc).") + if max_k > self.kmax and not np.allclose(max_k, self.kmax): + raise LoggedError(get_logger(self.__class__.__name__), + f"Not possible to extrapolate to k={max(k)} 1/Mpc " + f"(maximum k possible is {self.kmax} 1/Mpc).") + def P(self, z, k, grid=None): """ Get the power spectrum at (z,k). """ + self.check_ranges(z, k) if grid is None: grid = not np.isscalar(z) and not np.isscalar(k) if self.islog: - return self.logsign * np.exp(self(z, np.log(k), grid=grid)) + return self.logsign * np.exp(self(z, np.log(k), grid=grid, warn=False)) else: - return self(z, np.log(k), grid=grid) + return self(z, np.log(k), grid=grid, warn=False) def logP(self, z, k, grid=None): """ Get the log power spectrum at (z,k). (or minus log power spectrum if islog and logsign=-1) """ + self.check_ranges(z, k) if grid is None: grid = not np.isscalar(z) and not np.isscalar(k) if self.islog: - return self(z, np.log(k), grid=grid) + return self(z, np.log(k), grid=grid, warn=False) else: - return np.log(self(z, np.log(k), grid=grid)) + return np.log(self(z, np.log(k), grid=grid, warn=False)) + + def __call__(self, *args, warn=True, **kwargs): + if warn: + get_logger(self.__class__.__name__).warning( + "Do not call the instance directly. Use instead methods P(z, k) or " + "logP(z, k) to get the (log)power spectrum. (If you know what you are " + "doing, pass warn=False)") + return super().__call__(*args, **kwargs) diff --git a/cobaya/theory.py b/cobaya/theory.py index 11c9e0f36..aa5fd6440 100644 --- a/cobaya/theory.py +++ b/cobaya/theory.py @@ -78,10 +78,10 @@ def get_requirements(self) -> Union[InfoDict, Sequence[str], Sequence[Tuple[str, InfoDict]]]: """ Get a dictionary of requirements (or a list of requirement name, option tuples) - that are always needed (e.g. must be calculated by a another component + that are always needed (e.g. must be calculated by another component or provided as input parameters). - :return: dictionary or list of tuples of of requirement names and options + :return: dictionary or list of tuples of requirement names and options (or iterable of requirement names if no optional parameters are needed) """ return str_to_list(getattr(self, "requires", [])) @@ -292,7 +292,7 @@ def current_derived(self) -> ParamValuesDict: @property def type_list(self) -> List[str]: # list of labels that classify this component - # not usually used for Theory, can used for aggregated chi2 in likelihoods + # not usually used for Theory, can be used for aggregated chi2 in likelihoods return str_to_list(getattr(self, "type", []) or []) # MARKED FOR DEPRECATION IN v3.1 diff --git a/cobaya/tools.py b/cobaya/tools.py index 6340496fc..5d5a43c4b 100644 --- a/cobaya/tools.py +++ b/cobaya/tools.py @@ -26,6 +26,7 @@ from types import ModuleType from inspect import cleandoc, getfullargspec from ast import parse +from abc import ABC, abstractmethod import traceback # Local @@ -457,7 +458,7 @@ class NumberWithUnits: def __init__(self, n_with_unit: Any, unit: str, dtype=float, scale=None): """ Reads number possibly with some `unit`, e.g. 10s, 4d. - Loaded from a a case-insensitive string of a number followed by a unit, + Loaded from a case-insensitive string of a number followed by a unit, or just a number in which case the unit is set to None. :param n_with_unit: number string or number @@ -1069,3 +1070,325 @@ def sort_cosmetic(info): sorted_info[k] = info[k] sorted_info.update({k: v for k, v in info.items() if k not in sorted_info}) return sorted_info + + +def combine_1d(new_list, old_list=None): + """ + Combines+sorts+uniquifies two lists of values. Sorting is in ascending order. + + If `old_list` given, it is assumed to be a sorted and uniquified array (e.g. the + output of this function when passed as first argument). + + Uses `np.unique`, which distinguishes numbers up to machine precision. + """ + new_list = np.atleast_1d(new_list) + if old_list is not None: + new_list = np.concatenate((old_list, new_list)) + return np.unique(new_list) + + +class PoolND(ABC): + r""" + Stores a list of ``N``-tuples ``[x_1, x_2...]`` for later retrieval given some + ``N``-tuple ``x``. + + Tuples are sorted internally, and then by ascending order of their values. + + Tuples are uniquified internally up to machine precision, and an adaptive + tolerance (relative to min absolute and relative differences in the list) is + applied at retrieving. + + Adaptive tolerance is defined between limits ``[atol|rtol]_[min|max]``. + """ + + values: np.ndarray + + def __init__(self, values=[], + rtol_min=1e-5, rtol_max=1e-3, atol_min=1e-8, atol_max=1e-6, logger=None): + assert values is not None and len(values) != 0, \ + "Pool needs to be initialised with at least one value." + assert rtol_min <= rtol_max, \ + f"rtol_min={rtol_min} must be smaller or equal to rtol_max={rtol_max}" + assert atol_min <= atol_max, \ + f"atol_min={atol_min} must be smaller or equal to ato_max={atol_max}" + self.atol_min, self.atol_max = atol_min, atol_max + self.rtol_min, self.rtol_max = rtol_min, rtol_max + if logger is None: + self.log = get_logger(self.__class__.__name__) + else: + self.log = logger + self.update(values) + + @property + def d(self): + return len(self.values.shape) + + def __len__(self): + return len(self.values) + + def __getitem__(self, *args, **kwargs): + return self.values.__getitem__(*args, **kwargs) + + def _update_tolerances(self): + """Adapts tolerance to the differences in the list.""" + if self.d == 1: + # Assumes that the pool is sorted! + values = self.values + else: + values = np.copy(self.values) + values.flatten() + values = combine_1d(values) + if len(values) > 1: + differences = values[1:] - values[:-1] + min_difference = np.min(differences) + self._adapt_atol_min = self.atol_min * min_difference + self._adapt_atol_max = self.atol_max * min_difference + min_rel_difference = np.min(differences / values[1:]) + self._adapt_rtol_min = self.rtol_min * min_rel_difference + self._adapt_rtol_max = self.rtol_max * min_rel_difference + else: # single-element list + self._adapt_atol_min = self.atol_min * values[0] + self._adapt_atol_max = self.atol_max * values[0] + self._adapt_rtol_min = self.rtol_min + self._adapt_rtol_max = self.rtol_max + + def update(self, values): + """Adds a set of values, uniquifies and sorts.""" + values = self._check_values(values) + self._update_values(values) + self._update_tolerances() + + @abstractmethod + def _check_values(self, values): + """ + Checks that the input values are correctly formatted and re-formats them if + necessary. + + Returns a correctly formatted array. + + Internal sorting is enforced, but external is ignored. + """ + + @abstractmethod + def _update_values(self, values): + """Combines given and existing pool. Should assign ``self.values``.""" + + def find_indices(self, values): + """ + Finds the indices of elements in array ``values`` in the pool. + + For ``dim > 1`` it expects internally ascending-sorted pairs. + + Calls ``numpy.isclose`` for robust comparison, using adaptive ``rtol``, ``atol`` + limits. + + Raises ValueError if not all elements were found, each only once. + """ + values = self._check_values(values) + # Fast search first if possible + indices = self._fast_find_indices(values) + i_not_found = np.where(indices == -1)[0] + if len(i_not_found): + # TODO: since the pool is sorted already, we could take advantage of that, + # sort the target values (and save indices to recover original order), + # and iterate only over the part of the pool starting where the last + # value was found. But since running rapid test first, prob not needed. + indices_i_prev_not_found = np.concatenate( + [self._pick_at_most_one(x, None, None) for x in values[i_not_found]]) + if len(indices_i_prev_not_found) < len(i_not_found): + raise ValueError( + f"Could not find some of {list(values)} in pool {list(self.values)}. " + "If there appear to be a values close to the values in the pool," + " increase max tolerances.") + indices[i_not_found] = indices_i_prev_not_found + return indices + + def _fast_find_indices(self, values): + """ + Fast way to find indices, possibly ignoring tolerance, e.g. using np.where(a==b). + + It should check that the right elements have been found, and return an array + of length ``values.shape[0]`` with ``-1`` for elements that where not found. + """ + # if no dimensionality-specific implementation: none found + return np.full(shape=len(values), fill_value=-1) + + def _pick_at_most_one(self, x, pool=None, rtol=None, atol=None): + """ + Iterates over the pool (full pool if ``pool`` is ``None``) to find the index of a + single element ``x``, using the provided tolerances. + + It uses the test function ``self._where_isclose(pool, x, rtol, atol)``, returning + an array of indices of matches. + + Tolerances start at the minimum one, and, until an element is found, are + progressively increased until the maximum tolerance is reached. + """ + if pool is None: + pool = self.values + # Start with min tolerance for safety + if rtol is None: + rtol = self._adapt_rtol_min + if atol is None: + atol = self._adapt_atol_min + i = self._where_isclose(pool, x, rtol=rtol, atol=atol) + if not len(i): # none found + # Increase tolerance (if allowed) until one found + if rtol > self._adapt_rtol_max and atol > self._adapt_atol_max: + # Nothing was found despite high tolerance + return np.empty(shape=0, dtype=int) + if rtol <= self._adapt_rtol_max: + rtol *= 10 + if atol <= self._adapt_atol_max: + atol *= 10 + return self._pick_at_most_one(x, pool, rtol, atol) + elif len(i) > 1: # more than one found + # Decrease tolerance (if allowed!) until only one found + if rtol < self._adapt_rtol_min and atol < self._adapt_atol_min: + # No way to find only one element despite low tolerance + return np.empty(shape=0, dtype=int) + # Factor not a divisor of the one above, to avoid infinite loops + if rtol >= self._adapt_rtol_min: + rtol /= 3 + if atol >= self._adapt_atol_min: + atol /= 3 + return self._pick_at_most_one(x, pool, rtol, atol) + else: + return i + + @abstractmethod + def _where_isclose(self, pool, x, rtol, atol): + """ + Returns an array of indices of matches. + + E.g. in 1D it works as ``np.where(np.isclose(pool, x, rtol=rtol, atol=atol))[0]``. + """ + + +class Pool1D(PoolND): + r""" + Stores a list of values ``[x_1, x_2...]`` for later retrieval given some ``x``. + + ``x`` values are uniquified internally up to machine precision, and an adaptive + tolerance (relative to min absolute and relative differences in the list) is + applied at retrieving. + + Adaptive tolerance is defined between limits ``[atol|rtol]_[min|max]``. + """ + + def _check_values(self, values): + return np.atleast_1d(values) + + def _update_values(self, values): + self.values = combine_1d(values, getattr(self, "values", None)) + + def _fast_find_indices(self, values): + i_insert_left = np.clip( + np.searchsorted(self.values, values), a_min=None, a_max=len(self) - 1) + return np.where( + self._cond_isclose( + self.values[i_insert_left], values, rtol=self._adapt_rtol_min, + atol=self._adapt_atol_min), + i_insert_left, -1) + + def _cond_isclose(self, pool, x, rtol, atol): + return np.isclose(pool, x, rtol=rtol, atol=atol) + + def _where_isclose(self, pool, x, rtol, atol): + return np.where(self._cond_isclose(pool, x, rtol=rtol, atol=atol))[0] + + +def check_2d(pairs, allow_1d=True): + """ + Checks that the input is a pair (x1, x2) or a list of them. + + Returns a list of pairs as a 2d array with tuples sorted internally. + + Does not sort the pairs with respect to each other or checks for duplicates. + + If `allow_1d=True` (default) a list of more than 2 single values can be passed, + and will be converted into an internally-sorted list of all possible pairs, + as a 2d array. + + Raises ``ValueError`` if the argument is badly formatted. + """ + pairs = np.array(pairs) + if len(pairs.shape) == 1: + if len(pairs) < 2: # Single element or just a number + raise ValueError(f"Needs at least a pair of values. Got {list(pairs)}.") + elif len(pairs) == 2: # Single pair + pairs = np.atleast_2d(pairs) + elif len(pairs) > 2: # list -> generate combinations + if allow_1d: + pairs = np.array(list(chain(*[[[x_i, x_j] for x_j in pairs[i + 1:]] + for i, x_i in enumerate(pairs)]))) + else: + raise ValueError(f"Not a (list of) pair(s) of values: {list(pairs)}.") + elif (len(pairs.shape) == 2 and pairs.shape[1] != 2) or len(pairs.shape) != 2: + raise ValueError(f"Not a (list of) pair(s) of values: {list(pairs)}.") + return np.sort(pairs, axis=-1) # internal sorting + + +def combine_2d(new_pairs, old_pairs=None): + """ + Combines+sorts+uniquifies two lists of pairs of values. + + Pairs will be internally sorted in ascending order, and with respect to each other in + ascending order of the first value. + + `new_pairs` can be a list of more than 2 elements, from which all possible + internally-sorted combinations will be generated. + + If `old_pairs` given, it is assumed to be a sorted and uniquified array (e.g. the + output of this function when passed as first argument). + + Raises ``ValueError`` if the first argument is badly formatted (e.g. not a list + of pairs of values). + """ + new_pairs = check_2d(new_pairs) + if old_pairs is not None: + new_pairs = np.concatenate((old_pairs, new_pairs)) + return np.unique(new_pairs, axis=0) + + +class Pool2D(PoolND): + r""" + Stores a list of pairs ``[(x_1, y_1), (x_2, y_2)...]`` for later retrieval given some + ``(x, y)``. + + Pairs are uniquified internally up to machine precision, and an adaptive + tolerance (relative to min absolute and relative differences in the list) is + applied at retrieving. + + Adaptive tolerance is defined between limits ``[atol|rtol]_[min|max]``. + """ + + def _update_values(self, values): + self.values = combine_2d(values, getattr(self, "values", None)) + + def _check_values(self, values): + return check_2d(values) + + def _fast_find_indices(self, values): + # first, locate 1st component + i_insert_left = np.clip(np.searchsorted(self.values[:, 0], values[:, 0]), + a_min=None, a_max=len(self) - 1) + # we do not need to clip the "right" index, because we will use it as an endpoint + # for a slice, which is safe + i_insert_right = np.searchsorted(self.values[:, 0], values[:, 0], side="right") + slices = np.array([i_insert_left, i_insert_right]).T + i_maybe_found = [ + slices[i][0] + np.searchsorted( + self.values[slices[i][0]:slices[i][1], 1], values[i][1]) + for i in range(len(values))] + return np.where( + self._cond_isclose( + self.values[i_maybe_found], values, rtol=self._adapt_rtol_min, + atol=self._adapt_atol_min), + i_maybe_found, -1) + + def _cond_isclose(self, pool, x, rtol, atol): + return np.all(np.isclose(pool, x, rtol=rtol, atol=atol), axis=-1) + + def _where_isclose(self, pool, x, rtol, atol): + return np.where(self._cond_isclose(pool, x, rtol=rtol, atol=atol))[0] diff --git a/docs/cosmo_basic_runs.rst b/docs/cosmo_basic_runs.rst index fca9ac7f1..358d36dc3 100644 --- a/docs/cosmo_basic_runs.rst +++ b/docs/cosmo_basic_runs.rst @@ -77,7 +77,7 @@ Save the input generated to a file and run it with ``cobaya-run [your_input_file .. note:: - You may want to start with a *test run*, adding ``--test`` to ``cobaya-run``. It will initialise all components (cosmological theory code and likelihoods, and the sampler) and exit. + You may want to start with a *test run*, adding ``--test`` to ``cobaya-run`` (run without MPI). It will initialise all components (cosmological theory code and likelihoods, and the sampler) and exit. Typical running times for MCMC when using computationally heavy likelihoods (e.g. those involving :math:`C_\ell`, or non-linear :math:`P(k,z)` for several redshifts) are ~10 hours running 4 MPI processes with 4 OpenMP threads per process, provided that the initial covariance matrix is a good approximation to the one of the real posterior (Cobaya tries to select it automatically from a database; check the ``[mcmc]`` output towards the top to see if it succeeded), or a few hours on top of that if the initial covariance matrix is not a good approximation. diff --git a/docs/example.rst b/docs/example.rst index 556c4d5e2..fedfce1fe 100644 --- a/docs/example.rst +++ b/docs/example.rst @@ -58,6 +58,12 @@ The third file, ending in ``.txt``, contains the MCMC sample, and its first line 10.0 4.232834 0.705346 -0.314669 1.598046 -1.356208 2.221210 2.221210 4.023248 4.023248 2.0 4.829217 -0.121871 0.693151 -1.017847 2.041657 2.411930 2.411930 4.834574 4.834574 +You can run a posterior maximization process on top of the Monte Carlo sample (using its maximum as starting point) by repeating the shell command with a ``--minimize`` flag: + +.. code:: bash + + $ cobaya-run gaussian.yaml --minimize + You can use `GetDist `_ to analyse the results of this sample: get marginalized statistics, convergence diagnostics and some plots. We recommend using the `graphical user interface `_. Simply run ``getdist-gui`` from anywhere, press the green ``+`` button, navigate in the pop-up window into the folder containing the chains (here ``chains``) and click ``choose``. Now you can get some result statistics from the ``Data`` menu, or generate some plots like this one (just mark the the options in the red boxes and hit ``Make plot``): .. image:: img/example_quickstart_getdistgui.png @@ -126,6 +132,13 @@ The ``run`` function returns two variables: - An information dictionary updated with the defaults, equivalent to the ``updated`` yaml file produced by the shell invocation. - A sampler object, with a ``sampler.products()`` being a dictionary of results. For the ``mcmc`` sampler, the dictionary contains only one chain under the key ``sample``. +.. note:: + + To run a posterior maximization process after the Monte Carlo run, the simplest way is to repeat the ``run`` call with a ``minimize=True`` flag, saving the return values with a different name: + + .. literalinclude:: ./src_examples/quickstart/run_min.py + :language: python + Let's now analyse the chain and get some plots, using the interactive interface to GetDist instead of the GUI used above: .. literalinclude:: ./src_examples/quickstart/analyze.py diff --git a/docs/src_examples/cosmo_basic/basic_classy.yaml b/docs/src_examples/cosmo_basic/basic_classy.yaml index 0869ac1c3..7a695622d 100644 --- a/docs/src_examples/cosmo_basic/basic_classy.yaml +++ b/docs/src_examples/cosmo_basic/basic_classy.yaml @@ -2,7 +2,7 @@ theory: classy: extra_args: non linear: hmcode - hmcode_min_k_max: 20 + nonlinear_min_k_max: 20 N_ncdm: 1 N_ur: 2.0328 likelihood: diff --git a/docs/src_examples/quickstart/run_min.py b/docs/src_examples/quickstart/run_min.py new file mode 100644 index 000000000..cbed69415 --- /dev/null +++ b/docs/src_examples/quickstart/run_min.py @@ -0,0 +1,3 @@ +updated_info_minimizer, minimizer = run(info, minimize=True) +# To get the maximum-a-posteriori: +print(minimizer.products()["minimum"]) diff --git a/tests/common_cosmo.py b/tests/common_cosmo.py index db6bf64d6..24a565424 100644 --- a/tests/common_cosmo.py +++ b/tests/common_cosmo.py @@ -33,7 +33,7 @@ def body_of_test(packages_path, best_fit, info_likelihood, info_theory, ref_chi2 info["theory"][theo]["use_renames"] = True info = recursive_update(info, {"likelihood": info_likelihood}) info["params"].update(dict.fromkeys(best_fit_derived or [])) - # We need UPDATED info, to get the likelihoods nuisance parameters + # We need UPDATED info, to get the likelihoods' nuisance parameters info = update_info(info) # Notice that update_info adds an aux internal-only "params" property to the likes for lik in info["likelihood"]: diff --git a/tests/test_cosmo_bao.py b/tests/test_cosmo_bao.py index 9cf93d9d7..721d7dc39 100644 --- a/tests/test_cosmo_bao.py +++ b/tests/test_cosmo_bao.py @@ -29,16 +29,17 @@ def test_sdss_dr16_consensus_bao_lrg_camb(packages_path, skip_not_installed): body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sdss_dr16_baoplus_lrg, skip_not_installed=skip_not_installed) - -@pytest.mark.skip + def test_sdss_dr16_consensus_bao_lrg_classy(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_lrg" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr16_baoplus_lrg) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr16_baoplus_lrg, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) + - def test_sdss_dr16_consensus_bao_elg_camb(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_elg" info_likelihood = {like: {}} @@ -46,16 +47,17 @@ def test_sdss_dr16_consensus_bao_elg_camb(packages_path, skip_not_installed): body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sdss_dr16_baoplus_elg, skip_not_installed=skip_not_installed) - -@pytest.mark.skip + def test_sdss_dr16_consensus_bao_elg_classy(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_elg" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr16_baoplus_elg) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr16_baoplus_elg, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) + - def test_sdss_dr16_consensus_bao_qso_camb(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_qso" info_likelihood = {like: {}} @@ -63,16 +65,17 @@ def test_sdss_dr16_consensus_bao_qso_camb(packages_path, skip_not_installed): body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sdss_dr16_baoplus_qso, skip_not_installed=skip_not_installed) - -@pytest.mark.skip + def test_sdss_dr16_consensus_bao_qso_classy(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_qso" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr16_baoplus_qso) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr16_baoplus_qso, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) + - def test_sdss_dr16_consensus_bao_lyauto_camb(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_lyauto" info_likelihood = {like: {}} @@ -80,16 +83,17 @@ def test_sdss_dr16_consensus_bao_lyauto_camb(packages_path, skip_not_installed): body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sdss_dr16_baoplus_lyauto, skip_not_installed=skip_not_installed) - -@pytest.mark.skip + def test_sdss_dr16_consensus_bao_lyauto_classy(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_lyauto" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr16_baoplus_lyauto) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr16_baoplus_lyauto, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) + - def test_sdss_dr16_consensus_bao_lyxqso_camb(packages_path, skip_not_installed): like = "bao.sdss_dr16_baoplus_lyxqso" info_likelihood = {like: {}} @@ -97,16 +101,17 @@ def test_sdss_dr16_consensus_bao_lyxqso_camb(packages_path, skip_not_installed): body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sdss_dr16_baoplus_lyxqso, skip_not_installed=skip_not_installed) - -@pytest.mark.skip -def test_sdss_dr16_consensus_bao_lrg_classy(packages_path, skip_not_installed): - like = "bao.sdss_dr16_baoplus_lrg" + +def test_sdss_dr16_consensus_bao_lyxqso_classy(packages_path, skip_not_installed): + like = "bao.sdss_dr16_baoplus_lyxqso" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr16_baoplus_lyxqso) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr16_baoplus_lrg, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) + - def test_sdss_dr12_consensus_bao_camb(packages_path, skip_not_installed): like = "bao.sdss_dr12_consensus_bao" info_likelihood = {like: {}} @@ -119,8 +124,10 @@ def test_sdss_dr12_consensus_bao_classy(packages_path, skip_not_installed): like = "bao.sdss_dr12_consensus_bao" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr12_consensus_bao) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr12_consensus_bao, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) def test_sdss_dr12_consensus_full_shape_camb(packages_path, skip_not_installed): @@ -135,17 +142,17 @@ def test_sdss_dr12_consensus_full_shape_camb(packages_path, skip_not_installed): skip_not_installed=skip_not_installed) -@pytest.mark.skip def test_sdss_dr12_consensus_full_shape_classy(packages_path, skip_not_installed): like = "bao.sdss_dr12_consensus_full_shape" info_likelihood = {like: {}} info_theory = {"classy": None} - chi2_classy = deepcopy(chi2_sdss_dr12_consensus_full_shape) + chi2 = deepcopy(chi2_sdss_dr12_consensus_full_shape) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) # Check sigma8(z=0): it used to fail bc it was computed internally by the theory code # for different redshifts derived = {"sigma8": derived_lowTEB_highTTTEEE["sigma8"]} body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_classy, best_fit_derived=derived, + chi2, best_fit_derived=derived, skip_not_installed=skip_not_installed) @@ -157,14 +164,14 @@ def test_sdss_dr12_consensus_final_camb(packages_path, skip_not_installed): chi2_sdss_dr12_consensus_final, skip_not_installed=skip_not_installed) -@pytest.mark.skip def test_sdss_dr12_consensus_final_classy(packages_path, skip_not_installed): like = "bao.sdss_dr12_consensus_final" info_likelihood = {like: {}} info_theory = {"classy": None} - chi2_classy = deepcopy(chi2_sdss_dr12_consensus_final) + chi2 = deepcopy(chi2_sdss_dr12_consensus_final) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_classy, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) def test_sixdf_2011_bao_camb(packages_path, skip_not_installed): @@ -179,8 +186,10 @@ def test_sixdf_2011_bao_classy(packages_path, skip_not_installed): like = "bao.sixdf_2011_bao" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sixdf_2011_bao) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sixdf_2011_bao, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) def test_sdss_dr7_mgs_camb(packages_path, skip_not_installed): @@ -195,8 +204,10 @@ def test_sdss_dr7_mgs_classy(packages_path, skip_not_installed): like = "bao.sdss_dr7_mgs" info_likelihood = {like: {}} info_theory = {"classy": None} + chi2 = deepcopy(chi2_sdss_dr7_mgs) + chi2["tolerance"] += chi2.pop("classy_extra_tolerance", 0) body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_sdss_dr7_mgs, skip_not_installed=skip_not_installed) + chi2, skip_not_installed=skip_not_installed) # BEST FIT AND REFERENCE VALUES ########################################################## @@ -208,7 +219,8 @@ def test_sdss_dr7_mgs_classy(packages_path, skip_not_installed): chi2_sdss_dr16_baoplus_lrg = { "bao.sdss_dr16_baoplus_lrg": 5.96, "tolerance": 0.04} chi2_sdss_dr16_baoplus_qso = { - "bao.sdss_dr16_baoplus_qso": 8.78, "tolerance": 0.04} + "bao.sdss_dr16_baoplus_qso": 8.78, "tolerance": 0.04, + "classy_extra_tolerance": 0.21} chi2_sdss_dr16_baoplus_lyauto = { "bao.sdss_dr16_baoplus_lyauto": 0.87, "tolerance": 0.04} chi2_sdss_dr16_baoplus_lyxqso = { @@ -216,9 +228,11 @@ def test_sdss_dr7_mgs_classy(packages_path, skip_not_installed): chi2_sdss_dr12_consensus_bao = { "bao.sdss_dr12_consensus_bao": 5.687, "tolerance": 0.04} chi2_sdss_dr12_consensus_full_shape = { - "bao.sdss_dr12_consensus_full_shape": 8.154, "tolerance": 0.02} + "bao.sdss_dr12_consensus_full_shape": 8.154, "tolerance": 0.02, + "classy_extra_tolerance": 0.075} chi2_sdss_dr12_consensus_final = { - "bao.sdss_dr12_consensus_final": 8.051, "tolerance": 0.03} + "bao.sdss_dr12_consensus_final": 8.051, "tolerance": 0.03, + "classy_extra_tolerance": 0.03} chi2_sixdf_2011_bao = { "bao.sixdf_2011_bao": 0.088, "tolerance": 0.02} chi2_sdss_dr7_mgs = { diff --git a/tests/test_cosmo_bicep_keck_2018.py b/tests/test_cosmo_bicep_keck_2018.py index 9907e882a..ff71ed085 100644 --- a/tests/test_cosmo_bicep_keck_2018.py +++ b/tests/test_cosmo_bicep_keck_2018.py @@ -8,7 +8,7 @@ camb_extra.update({"halofit_version": "takahashi"}) classy_extra = deepcopy(cmb_precision["classy"]) classy_extra.update({"non linear": "halofit"}) -classy_extra.update({"halofit_min_k_max": 20}) +classy_extra.update({"nonlinear_min_k_max": 20}) def test_bicep_keck_2018_camb(packages_path, skip_not_installed): @@ -21,9 +21,7 @@ def test_bicep_keck_2018_camb(packages_path, skip_not_installed): def test_bicep_keck_2018_classy(packages_path, skip_not_installed): info_theory = {"classy": {"extra_args": classy_extra}} # extra tolerance for CLASS - chi2_classy = deepcopy(chi2) - chi2_classy["tolerance"] *= 2 - body_of_test(packages_path, test_point, lik_info, info_theory, chi2_classy, + body_of_test(packages_path, test_point, lik_info, info_theory, chi2, extra_model={"primordial": "SFSR_t"}, skip_not_installed=skip_not_installed) diff --git a/tests/test_cosmo_planck_2015.py b/tests/test_cosmo_planck_2015.py index f6bb11805..461b9e201 100644 --- a/tests/test_cosmo_planck_2015.py +++ b/tests/test_cosmo_planck_2015.py @@ -17,7 +17,10 @@ "bbn_predictor": "BBN_fitting_parthenope" }) cmb_precision["classy"].update({ - "non linear": "halofit", + # Something changed in CLASS that makes halofit work differently that it did + # before, and hamcode gives results closer to the original chi2, computed with halofit + # "non linear": "halofit", + "non linear": "hmcode", }) # Derived parameters not understood by CLASS @@ -27,7 +30,7 @@ "DH", "Y_p"] # Small chi2 difference with CLASS (total still <0.5) -classy_extra_tolerance = 0.2 +classy_extra_tolerance = 0.49 def test_planck_2015_t_camb(packages_path, skip_not_installed): @@ -116,8 +119,10 @@ def test_planck_2015_l_classy(packages_path, skip_not_installed): best_fit_derived = deepcopy(derived_lensing) for p in classy_unknown: best_fit_derived.pop(p, None) + chi2_lensing_classy = deepcopy(chi2_lensing) + chi2_lensing_classy["tolerance"] += classy_extra_tolerance body_of_test(packages_path, best_fit, info_likelihood, info_theory, - chi2_lensing, best_fit_derived, + chi2_lensing_classy, best_fit_derived, skip_not_installed=skip_not_installed) diff --git a/tests/test_cosmo_planck_2018.py b/tests/test_cosmo_planck_2018.py index d36cf2edc..5749a7543 100644 --- a/tests/test_cosmo_planck_2018.py +++ b/tests/test_cosmo_planck_2018.py @@ -19,7 +19,7 @@ "DH", "Y_p"] # Small chi2 difference with CLASS (total still <0.5) -classy_extra_tolerance = 0.4 +classy_extra_tolerance = 0.35 # STANDARD ############################################################################### diff --git a/tests/test_cosmo_quantities.py b/tests/test_cosmo_quantities.py new file mode 100644 index 000000000..4e0565b6c --- /dev/null +++ b/tests/test_cosmo_quantities.py @@ -0,0 +1,199 @@ +""" +Testing some quantities not used yet by any internal likelihood. +""" + +import pytest +import numpy as np +from copy import deepcopy + +from cobaya.cosmo_input import base_precision, planck_base_model, create_input +from cobaya.model import get_model +from cobaya.tools import recursive_update, check_2d + +from .test_cosmo_planck_2015 import params_lowTEB_highTTTEEE +from .conftest import install_test_wrapper +from .common import process_packages_path + +fiducial_parameters = deepcopy(params_lowTEB_highTTTEEE) +redshifts = [100, 10, 1, 0] + + +def _get_model_with_requirements_and_eval(theo, reqs, packages_path, skip_not_installed): + planck_base_model_prime = deepcopy(planck_base_model) + planck_base_model_prime["hubble"] = "H" # intercompatibility CAMB/CLASS + info_theory = {theo: {"extra_args": base_precision[theo]}} + info = create_input(planck_names=True, theory=theo, **planck_base_model_prime) + info = recursive_update(info, {"theory": info_theory, "likelihood": {"one": None}}) + info["packages_path"] = process_packages_path(packages_path) + info["debug"] = True + model = install_test_wrapper(skip_not_installed, get_model, info) + eval_parameters = {p: v for p, v in fiducial_parameters.items() + if p in model.parameterization.sampled_params()} + model.add_requirements(reqs) + model.logposterior(eval_parameters) + return model + + +# sigma8(z), fsgima8(z) ################################################################## + +sigma8_values = [0.01072007, 0.0964498, 0.50446177, 0.83063667] +fsigma8_values = [0.01063036, 0.09638032, 0.44306551, 0.43904513] + + +def _test_cosmo_sigma8_fsigma8(theo, packages_path, skip_not_installed): + reqs = {"sigma8_z": {"z": redshifts}, "fsigma8": {"z": redshifts}} + model = _get_model_with_requirements_and_eval( + theo, reqs, packages_path, skip_not_installed) + assert np.allclose(model.theory[theo].get_sigma8_z(redshifts), + sigma8_values, rtol=1e-5 if theo.lower() == "camb" else 5e-4) + # NB: classy tolerance quite high for fsigma8! + # (see also test of bao.sdss_dr16_baoplus_qso) + assert np.allclose(model.theory[theo].get_fsigma8(redshifts), + fsigma8_values, rtol=1e-5 if theo.lower() == "camb" else 1e-2) + + +def test_cosmo_sigma8_fsigma8_camb(packages_path, skip_not_installed): + _test_cosmo_sigma8_fsigma8("camb", packages_path, skip_not_installed) + + +def test_cosmo_sigma8_fsigma8_classy(packages_path, skip_not_installed): + _test_cosmo_sigma8_fsigma8("classy", packages_path, skip_not_installed) + + +# sigma(R, z) ############################################################################ + +z_sigma_R = [0, 2, 5] +R_sigma_R = np.arange(1, 20, 1) +sigma_R_values = { + ("delta_tot", "delta_tot"): [ + [2.91601056, 2.2108047, 1.83763103, 1.59277133, 1.41525858, 1.27819406, + 1.16797698, 1.07694361, 1.000067, 0.93400161, 0.87649166, 0.8259037, + 0.7809405, 0.74068264, 0.70437064, 0.67138275, 0.641286, 0.61370782, + 0.58833036], + [1.2189716, 0.92415734, 0.7681488, 0.66578225, 0.59157052, 0.53426856, + 0.48819049, 0.45013247, 0.41799292, 0.39037322, 0.36633031, 0.34518131, + 0.32638387, 0.30955364, 0.29437308, 0.28058226, 0.26800016, 0.25647104, + 0.24586199], + [0.61849435, 0.46890459, 0.38974492, 0.3378033, 0.30014754, 0.27107188, + 0.2476913, 0.22838016, 0.21207212, 0.19805749, 0.18585777, 0.17512646, + 0.16558837, 0.15704846, 0.14934564, 0.14234799, 0.13596367, 0.13011365, + 0.12473049]], + ("delta_nonu", "delta_nonu"): [ + [2.91601056, 2.2108047, 1.83763103, 1.59277133, 1.41525858, 1.27819406, + 1.16797698, 1.07694361, 1.000067, 0.93400161, 0.87649166, 0.8259037, + 0.7809405, 0.74068264, 0.70437064, 0.67138275, 0.641286, 0.61370782, + 0.58833036], + [1.2189716, 0.92415734, 0.7681488, 0.66578225, 0.59157052, 0.53426856, + 0.48819049, 0.45013247, 0.41799292, 0.39037322, 0.36633031, 0.34518131, + 0.32638387, 0.30955364, 0.29437308, 0.28058226, 0.26800016, 0.25647104, + 0.24586199], + [0.61849435, 0.46890459, 0.38974492, 0.3378033, 0.30014754, 0.27107188, + 0.2476913, 0.22838016, 0.21207212, 0.19805749, 0.18585777, 0.17512646, + 0.16558837, 0.15704846, 0.14934564, 0.14234799, 0.13596367, 0.13011365, + 0.12473049]]} + + +def _test_cosmo_sigma_R(theo, packages_path, skip_not_installed): + vars_pairs = (("delta_tot", "delta_tot"), ("delta_nonu", "delta_nonu")) + reqs = {"sigma_R": {"z": z_sigma_R, "R": R_sigma_R, "vars_pairs": vars_pairs}} + model = _get_model_with_requirements_and_eval( + theo, reqs, packages_path, skip_not_installed) + for pair in vars_pairs: + z_out, R_out, sigma_R_out = model.theory[theo].get_sigma_R() + assert np.allclose(R_out, R_sigma_R) + assert np.allclose(z_out, z_sigma_R) + assert np.allclose(sigma_R_out, sigma_R_values[pair], + rtol=1e-5 if theo.lower() == "camb" else 2e-3) + + +def test_cosmo_sigma_R_camb(packages_path, skip_not_installed): + _test_cosmo_sigma_R("camb", packages_path, skip_not_installed) + + +def test_cosmo_sigma_R_classy(packages_path, skip_not_installed): + _test_cosmo_sigma_R("classy", packages_path, skip_not_installed) + + +# Omega_X(z) ############################################################################# + +Omega_b_values = [0.15172485, 0.15517809, 0.12258897, 0.04920226] +Omega_cdm_values = [0.81730093, 0.83590262, 0.66035382, 0.26503934] +Omega_nu_massive_values = [0.00608926, 0.00452621, 0.00355468, 0.00142649] + + +def _test_cosmo_omega(theo, packages_path, skip_not_installed): + reqs = {"Omega_b": {"z": redshifts}, "Omega_cdm": {"z": redshifts}, + "Omega_nu_massive": {"z": redshifts}} + model = _get_model_with_requirements_and_eval( + theo, reqs, packages_path, skip_not_installed) + assert np.allclose(model.theory[theo].get_Omega_b(redshifts), + Omega_b_values, rtol=1e-5 if theo.lower() == "camb" else 5e-4) + assert np.allclose(model.theory[theo].get_Omega_cdm(redshifts), + Omega_cdm_values, rtol=1e-5 if theo.lower() == "camb" else 5e-4) + assert np.allclose(model.theory[theo].get_Omega_nu_massive(redshifts), + Omega_nu_massive_values, + rtol=1e-5 if theo.lower() == "camb" else 2e-3) + + +def test_cosmo_omega_camb(packages_path, skip_not_installed): + _test_cosmo_omega("camb", packages_path, skip_not_installed) + + +def test_cosmo_omega_classy(packages_path, skip_not_installed): + _test_cosmo_omega("classy", packages_path, skip_not_installed) + + +# angular_diameter_distance_2 ############################################################ + +ang_diam_dist_2_values = [ + 31.59567987, 93.34513188, 127.08027199, 566.97224099, 876.72216398, 1703.62457558] + + +def _test_cosmo_ang_diam_dist_2(theo, packages_path, skip_not_installed): + reqs = {"angular_diameter_distance_2": {"z_pairs": redshifts}} + model = _get_model_with_requirements_and_eval( + theo, reqs, packages_path, skip_not_installed) + redshift_pairs = check_2d(redshifts) + assert np.allclose(model.theory[theo].get_angular_diameter_distance_2(redshift_pairs), + ang_diam_dist_2_values, rtol=1e-5) + + +def test_cosmo_ang_diam_dist_2_camb(packages_path, skip_not_installed): + _test_cosmo_ang_diam_dist_2("camb", packages_path, skip_not_installed) + + +def test_cosmo_ang_diam_dist_2_classy(packages_path, skip_not_installed): + _test_cosmo_ang_diam_dist_2("classy", packages_path, skip_not_installed) + + +# Weyl power spectrum #################################################################### + +var_pair = ("Weyl", "Weyl") +zs = [0, 0.5, 1, 1.5, 2, 3.5] +ks = np.logspace(-4, np.log10(15), 5) +Pkz_values = [[-27.43930416, -24.66947574, -24.57497701, -28.00145562, -33.24503304], + [-27.15442457, -24.38434986, -24.28560236, -28.05638781, -33.27063402], + [-27.05300137, -24.28274865, -24.18057448, -28.25209712, -33.31536626], + [-27.01121008, -24.24081427, -24.13606913, -28.46637302, -33.37454288], + [-26.99148444, -24.22096153, -24.11426728, -28.65883515, -33.44109139], + [-26.97141437, -24.20053966, -24.09009681, -29.05111258, -33.70130814]] + + +def _test_cosmo_weyl_pkz(theo, packages_path, skip_not_installed): + # Similar to what"s requested by DES (but not used by default) + reqs = {"Pk_interpolator": {"z": zs, "k_max": max(ks), "nonlinear": (False, True), + "vars_pairs": var_pair}} + model = _get_model_with_requirements_and_eval( + theo, reqs, packages_path, skip_not_installed) + interp = model.theory[theo].get_Pk_interpolator(var_pair=var_pair, nonlinear=True) + assert np.allclose(Pkz_values, interp.logP(zs, ks), + rtol=1e-5 if theo.lower() == "camb" else 5e-4) + + +def test_cosmo_weyl_pkz_camb(packages_path, skip_not_installed): + _test_cosmo_weyl_pkz("camb", packages_path, skip_not_installed) + + +@pytest.mark.skip +def test_cosmo_weyl_pkz_classy(packages_path, skip_not_installed): + _test_cosmo_weyl_pkz("classy", packages_path, skip_not_installed) diff --git a/tests/test_docs_example_quickstart.py b/tests/test_docs_example_quickstart.py index d0d2e0882..db229bac5 100644 --- a/tests/test_docs_example_quickstart.py +++ b/tests/test_docs_example_quickstart.py @@ -44,6 +44,8 @@ def test_example(): globals_example["info"]["sampler"]["mcmc"] = ( globals_example["info"]["sampler"]["mcmc"] or {}) exec(open(os.path.join(docs_src_folder, "run.py")).read(), globals_example) + # Run the minimizer -- output doesn't matter. Just checking that it does not fail + exec(open(os.path.join(docs_src_folder, "run_min.py")).read(), globals_example) # Analyze and plot -- capture print output stream = StringIO() with stdout_redirector(stream): diff --git a/tests/test_pools.py b/tests/test_pools.py new file mode 100644 index 000000000..1650d88e6 --- /dev/null +++ b/tests/test_pools.py @@ -0,0 +1,70 @@ +""" +Tests for the PoolXD classes, which are used to assist caching of computed quantities +which are a function of a finite set of fixed values. +""" + +import pytest +import numpy as np +from flaky import flaky + +from cobaya.tools import Pool1D, Pool2D + +# Max number of tries per test +max_runs = 3 + +# Number of pool and test points +n_pool = 500 +n_test = 100 +# size of the perturbation rel factor and test abs factor +r_perturb = 1e-16 +a_tol_test = 1e-8 + + +@flaky(max_runs=max_runs, min_passes=1) +def test_pool1d(): + values = np.random.random(n_pool) + pool = Pool1D(values) + test_values = np.random.choice(values, n_test) + r_perturb * np.random.random(n_test) + # At least a duplicate, for robustness + test_values[-1] = test_values[0] + indices = pool.find_indices(test_values) + assert(np.all(np.abs(test_values - pool[indices] < a_tol_test))) + + +def test_pool1d_fail(): + values = np.random.random(1) + pool = Pool1D(values) + test_values = [2] # out of range + with pytest.raises(ValueError): + pool.find_indices(test_values) + + +@flaky(max_runs=max_runs, min_passes=1) +def test_pool2d(from_list=False): + if from_list: + # num of combinations N needs to be ~= n_test + # if list of length m (large): N = (m**2 - m) / 2 ~= m**2 / 2 + n_list = int(np.ceil(np.sqrt(2 * n_pool))) + values = np.random.random(n_list) + else: + values = np.random.random(2 * n_pool).reshape((n_pool, 2)) + pool = Pool2D(values) + test_values = pool.values[np.random.choice(range(len(pool.values)), n_test)] + \ + r_perturb * np.random.random(2 * n_test).reshape((n_test, 2)) + # At least a duplicate, for robustness + test_values[-1] = test_values[0] + indices = pool.find_indices(test_values) + assert(np.all(np.abs(test_values - pool[indices]) < a_tol_test)) + + +@flaky(max_runs=max_runs, min_passes=1) +def test_pool2d_from_list(): + test_pool2d(from_list=True) + + +def test_pool2d_fail(): + values = np.random.random(2) + pool = Pool1D(values) + test_values = [2, 2] # out of range + with pytest.raises(ValueError): + pool.find_indices(test_values)