Skip to content

Commit

Permalink
Extra checks to handle potentially bad ssx data
Browse files Browse the repository at this point in the history
  • Loading branch information
jbeilstenedmands committed Oct 27, 2023
1 parent a77bd2e commit db872cc
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 6 deletions.
5 changes: 4 additions & 1 deletion src/dials/algorithms/scaling/outlier_rejection.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,10 @@ def reject_outliers(reflection_table, experiment, method="standard", zmax=6.0):
def determine_Esq_outlier_index_arrays(Ih_table, experiment, emax=10.0):
# first calculate normalised intensities and set in the Ih_table.
intensities = Ih_table.as_miller_array(experiment.crystal.get_unit_cell())
normalised_intensities = quasi_normalisation(intensities)
try:
normalised_intensities = quasi_normalisation(intensities)
except AssertionError:
return [np.array([], dtype=np.uint64)] * Ih_table.n_datasets

sel = normalised_intensities.data() > (emax**2)
n_e2_outliers = sel.count(True)
Expand Down
25 changes: 20 additions & 5 deletions src/dials/algorithms/symmetry/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ def _normalise(self, method):
normalise = self.ml_iso_normalisation
elif method == "ml_aniso":
normalise = self.ml_aniso_normalisation

bad_datasets = []
normalised_intensities = None
for i in range(int(flex.max(self.dataset_ids) + 1)):
logger.info("\n" + "-" * 80 + "\n")
logger.info("Normalising intensities for dataset %i\n", i + 1)
Expand All @@ -171,12 +172,22 @@ def _normalise(self, method):
method,
exc_info=True,
)
return
if i == 0:
normalised_intensities = intensities
bad_datasets.append(i)
else:
normalised_intensities = normalised_intensities.concatenate(intensities)
if not normalised_intensities:
normalised_intensities = intensities
else:
normalised_intensities = normalised_intensities.concatenate(
intensities
)

if bad_datasets:
sel = flex.bool(self.dataset_ids.size(), True)
for i in bad_datasets:
bad_sel = self.dataset_ids == i
sel.set_selected(bad_sel, False)
self.dataset_ids = self.dataset_ids.select(sel)
assert self.dataset_ids.size() == normalised_intensities.size()
self.intensities = normalised_intensities.set_info(
self.intensities.info()
).set_observation_type_xray_intensity()
Expand Down Expand Up @@ -243,11 +254,15 @@ def _ml_normalisation(intensities, aniso):
normalisation = absolute_scaling.ml_aniso_absolute_scaling(
intensities, n_residues=n_residues
)
if not normalisation.p_scale:
raise RuntimeError("Unsuccessful normalisation")
u_star = normalisation.u_star
else:
normalisation = absolute_scaling.ml_iso_absolute_scaling(
intensities, n_residues=n_residues
)
if not (normalisation.b_wilson and normalisation.p_scale):
raise RuntimeError("Unsuccessful normalisation")
u_star = adptbx.b_as_u(
adptbx.u_iso_as_u_star(intensities.unit_cell(), normalisation.b_wilson)
)
Expand Down

0 comments on commit db872cc

Please sign in to comment.