From db872ccd11d4cf0d69fe1a3e74617bcc6f040695 Mon Sep 17 00:00:00 2001 From: James Beilsten-Edmands <30625594+jbeilstenedmands@users.noreply.github.com> Date: Fri, 25 Aug 2023 15:43:02 +0100 Subject: [PATCH] Extra checks to handle potentially bad ssx data --- .../algorithms/scaling/outlier_rejection.py | 5 +++- src/dials/algorithms/symmetry/__init__.py | 25 +++++++++++++++---- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/src/dials/algorithms/scaling/outlier_rejection.py b/src/dials/algorithms/scaling/outlier_rejection.py index 9e94a69ee4..09262fa2fe 100644 --- a/src/dials/algorithms/scaling/outlier_rejection.py +++ b/src/dials/algorithms/scaling/outlier_rejection.py @@ -80,7 +80,10 @@ def reject_outliers(reflection_table, experiment, method="standard", zmax=6.0): def determine_Esq_outlier_index_arrays(Ih_table, experiment, emax=10.0): # first calculate normalised intensities and set in the Ih_table. intensities = Ih_table.as_miller_array(experiment.crystal.get_unit_cell()) - normalised_intensities = quasi_normalisation(intensities) + try: + normalised_intensities = quasi_normalisation(intensities) + except AssertionError: + return [np.array([], dtype=np.uint64)] * Ih_table.n_datasets sel = normalised_intensities.data() > (emax**2) n_e2_outliers = sel.count(True) diff --git a/src/dials/algorithms/symmetry/__init__.py b/src/dials/algorithms/symmetry/__init__.py index cd1c210ae1..741c581347 100644 --- a/src/dials/algorithms/symmetry/__init__.py +++ b/src/dials/algorithms/symmetry/__init__.py @@ -153,7 +153,8 @@ def _normalise(self, method): normalise = self.ml_iso_normalisation elif method == "ml_aniso": normalise = self.ml_aniso_normalisation - + bad_datasets = [] + normalised_intensities = None for i in range(int(flex.max(self.dataset_ids) + 1)): logger.info("\n" + "-" * 80 + "\n") logger.info("Normalising intensities for dataset %i\n", i + 1) @@ -171,12 +172,22 @@ def _normalise(self, method): method, exc_info=True, ) - return - if i == 0: - normalised_intensities = intensities + bad_datasets.append(i) else: - normalised_intensities = normalised_intensities.concatenate(intensities) + if not normalised_intensities: + normalised_intensities = intensities + else: + normalised_intensities = normalised_intensities.concatenate( + intensities + ) + if bad_datasets: + sel = flex.bool(self.dataset_ids.size(), True) + for i in bad_datasets: + bad_sel = self.dataset_ids == i + sel.set_selected(bad_sel, False) + self.dataset_ids = self.dataset_ids.select(sel) + assert self.dataset_ids.size() == normalised_intensities.size() self.intensities = normalised_intensities.set_info( self.intensities.info() ).set_observation_type_xray_intensity() @@ -243,11 +254,15 @@ def _ml_normalisation(intensities, aniso): normalisation = absolute_scaling.ml_aniso_absolute_scaling( intensities, n_residues=n_residues ) + if not normalisation.p_scale: + raise RuntimeError("Unsuccessful normalisation") u_star = normalisation.u_star else: normalisation = absolute_scaling.ml_iso_absolute_scaling( intensities, n_residues=n_residues ) + if not (normalisation.b_wilson and normalisation.p_scale): + raise RuntimeError("Unsuccessful normalisation") u_star = adptbx.b_as_u( adptbx.u_iso_as_u_star(intensities.unit_cell(), normalisation.b_wilson) )