Skip to content

Commit

Permalink
Merge pull request #30 from neutrons/better_dt
Browse files Browse the repository at this point in the history
Implement better dead time correction
  • Loading branch information
mdoucet authored May 1, 2024
2 parents fe4e351 + bac745e commit 788720f
Show file tree
Hide file tree
Showing 7 changed files with 118 additions and 119 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ Reduction scripts for the Liquids Reflectometer. This includes both automated re


## Release notes:
- reduction v2.0.26 [05/2024] Implement a better dead time correction using weighted events
- reduction v2.0.25 [04/2024] Use dead time parameters from template
- reduction v2.0.24 [04/2024] Fix issue with errors when using dead time correction
- reduction v2.0.22 [04/2024] Add dead time correction to scaling factor calculation
Expand Down
2 changes: 1 addition & 1 deletion reduction/lr_reduction/DeadTimeCorrection.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def PyExec(self):

# We don't compute an error on the dead time correction, so set it to zero
counts_ws.setE(0, 0 * corr)

counts_ws.setDistribution(True)
self.setProperty('OutputWorkspace', counts_ws)


Expand Down
2 changes: 1 addition & 1 deletion reduction/lr_reduction/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '2.0.25'
__version__ = '2.0.26'
117 changes: 43 additions & 74 deletions reduction/lr_reduction/event_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,44 @@ def get_q_binning(q_min=0.001, q_max=0.15, q_step=-0.02):
return q_min * np.asarray([_step**i for i in range(n_steps)])


def get_dead_time_correction(ws, template_data):
"""
Compute dead time correction to be applied to the reflectivity curve.
The method will also try to load the error events from each of the
data files to ensure that we properly estimate the dead time correction.
:param ws: workspace with raw data to compute correction for
:param template_data: reduction parameters
"""
tof_min = ws.getTofMin()
tof_max = ws.getTofMax()

run_number = ws.getRun().getProperty("run_number").value
error_ws = api.LoadErrorEventsNexus("REF_L_%s" % run_number)
corr_ws = mantid_algorithm_exec(DeadTimeCorrection.SingleReadoutDeadTimeCorrection,
InputWorkspace=ws,
InputErrorEventsWorkspace=error_ws,
Paralyzable=template_data.paralyzable,
DeadTime=template_data.dead_time_value,
TOFStep=template_data.dead_time_tof_step,
TOFRange=[tof_min, tof_max],
OutputWorkspace="corr")
corr_ws = api.Rebin(corr_ws, [tof_min, 10, tof_max])
return corr_ws

def apply_dead_time_correction(ws, template_data):
"""
Apply dead time correction, and ensure that it is done only once
per workspace.
:param ws: workspace with raw data to compute correction for
:param template_data: reduction parameters
"""
if not "dead_time_applied" in ws.getRun():
corr_ws = get_dead_time_correction(ws, template_data)
ws = api.Multiply(ws, corr_ws, OutputWorkspace=str(ws))
api.AddSampleLog(Workspace=ws, LogName="dead_time_applied", LogText='1', LogType="Number")
return ws


class EventReflectivity(object):
"""
Event based reflectivity calculation.
Expand Down Expand Up @@ -238,61 +276,6 @@ def to_dict(self):
dq0=dq0, dq_over_q=dq_over_q, sequence_number=sequence_number,
sequence_id=sequence_id)

def get_dead_time_correction(self):
"""
Compute dead time correction to be applied to the reflectivity curve.
The method will also try to load the error events from each of the
data files to ensure that we properly estimate the dead time correction.
"""
# Scattering workspace
tof_min = self._ws_sc.getTofMin()
tof_max = self._ws_sc.getTofMax()

run_number = self._ws_sc.getRun().getProperty("run_number").value
error_ws = api.LoadErrorEventsNexus("REF_L_%s" % run_number)
corr_ws = mantid_algorithm_exec(DeadTimeCorrection.SingleReadoutDeadTimeCorrection,
InputWorkspace=self._ws_sc,
InputErrorEventsWorkspace=error_ws,
DeadTime=self.dead_time_value,
TOFStep=self.dead_time_tof_step,
Paralyzable=self.paralyzable,
TOFRange=[tof_min, tof_max],
OutputWorkspace="corr")
corr_sc = corr_ws.readY(0)
wl_bins = corr_ws.readX(0) / self.constant

# Direct beam workspace
run_number = self._ws_db.getRun().getProperty("run_number").value
error_ws = api.LoadErrorEventsNexus("REF_L_%s" % run_number)
corr_ws = mantid_algorithm_exec(DeadTimeCorrection.SingleReadoutDeadTimeCorrection,
InputWorkspace=self._ws_db,
InputErrorEventsWorkspace=error_ws,
DeadTime=self.dead_time_value,
TOFStep=self.dead_time_tof_step,
Paralyzable=self.paralyzable,
TOFRange=[tof_min, tof_max],
OutputWorkspace="corr")
corr_db = corr_ws.readY(0)

# Flip the correction since we are going from TOF to Q
dead_time_per_tof = np.flip(corr_sc / corr_db)

# Compute Q for each TOF bin
d_theta = self.gravity_correction(self._ws_sc, wl_bins)
q_values_edges = np.flip(4.0 * np.pi / wl_bins * np.sin(self.theta - d_theta))
q_values = (q_values_edges[1:] + q_values_edges[:-1]) / 2

# Interpolate to estimate the dead time correction at the Q values we measured
q_middle = (self.q_bins[1:] + self.q_bins[:-1]) / 2

dead_time_corr = np.interp(q_middle, q_values, dead_time_per_tof)

# Cleanup
api.DeleteWorkspace(corr_ws)
api.DeleteWorkspace(error_ws)

return dead_time_corr

def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False,
clean=False, normalize=True):
"""
Expand All @@ -306,11 +289,6 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False,
:param clean: if True, and Q summing is True, then leading artifact will be removed
:param normalize: if True, and tof_weighted is False, normalization will be skipped
"""
# First, let's compute the dead-time correction if we need it
# We do this first because the specular calls below may modify the input workspace
if self.dead_time:
dead_time_corr = self.get_dead_time_correction()

if tof_weighted:
self.specular_weighted(q_summing=q_summing, bck_in_q=bck_in_q)
else:
Expand All @@ -323,17 +301,6 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False,
self.d_refl = self.d_refl[trim:]
self.q_bins = self.q_bins[trim:]

# Dead time correction
if self.dead_time:
i_max = np.argmax(dead_time_corr[trim:])
i_min = np.argmin(dead_time_corr[trim:])
print("Dead time correction: [%g -> %g] at [%g -> %g]" % (dead_time_corr[trim:][i_min],
dead_time_corr[trim:][i_max],
self.q_bins[i_min],
self.q_bins[i_max]))
self.refl *= dead_time_corr[trim:]
self.d_refl *= dead_time_corr[trim:]

# Remove leading artifact from the wavelength coverage
# Remember that q_bins is longer than refl by 1 because
# it contains bin boundaries
Expand Down Expand Up @@ -522,6 +489,7 @@ def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_

# Gravity correction
d_theta = self.gravity_correction(ws, wl_list)
event_weights = evt_list.getWeights()

# Sign will depend on reflect up or down
x_distance = _pixel_width * (j - peak_position)
Expand All @@ -531,8 +499,9 @@ def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_

if wl_dist is not None and wl_bins is not None:
wl_weights = 1.0/np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf)
hist_weigths = wl_weights * qz / wl_list
_counts, _ = np.histogram(qz, bins=_q_bins, weights=hist_weigths)
hist_weights = wl_weights * qz / wl_list
hist_weights *= event_weights
_counts, _ = np.histogram(qz, bins=_q_bins, weights=hist_weights)
_norm, _ = np.histogram(qz, bins=_q_bins)
if sum_pixels:
refl += _counts
Expand All @@ -541,7 +510,7 @@ def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_
refl[j-peak[0]] += _counts
counts[j-peak[0]] += _norm
else:
_counts, _ = np.histogram(qz, bins=_q_bins)
_counts, _ = np.histogram(qz, bins=_q_bins, weights=event_weights)
if sum_pixels:
refl += _counts
else:
Expand Down
7 changes: 7 additions & 0 deletions reduction/lr_reduction/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,18 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False,
if isinstance(template_data, str):
template_data = read_template(template_data, sequence_number)

if template_data.dead_time:
ws_sc = event_reduction.apply_dead_time_correction(ws_sc, template_data)

# Load normalization run
normalize = normalize and template_data.apply_normalization
if ws_db is None and normalize:
ws_db = api.LoadEventNexus("REF_L_%s" % template_data.norm_file)

# Apply dead time correction
if template_data.dead_time:
ws_db = event_reduction.apply_dead_time_correction(ws_db, template_data)

# If we run in theta-theta geometry, we'll need thi
thi_value = ws_sc.getRun()['thi'].value[0]
ths_value = ws_sc.getRun()['ths'].value[0]
Expand Down
1 change: 1 addition & 0 deletions reduction/lr_reduction/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ def reduce_explorer(ws, ws_db, theta_pv=None, center_pixel=145, db_center_pixel=

theta = theta_value * np.pi / 180.

#TODO: dead time correction should be applied here
event_refl = event_reduction.EventReflectivity(ws, ws_db,
signal_peak=peak, signal_bck=peak_bck,
norm_peak=norm_peak, norm_bck=norm_bck,
Expand Down
Loading

0 comments on commit 788720f

Please sign in to comment.