diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml
index 3a42dcf..915d89d 100644
--- a/.github/workflows/actions.yml
+++ b/.github/workflows/actions.yml
@@ -9,42 +9,24 @@ on:
jobs:
tests:
- runs-on: self-hosted
+ runs-on: ubuntu-22.04
defaults:
run:
shell: bash -l {0}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v2
- name: Set up Miniconda
uses: conda-incubator/setup-miniconda@v2
with:
- channels: conda-forge,defaults,mantid
auto-update-conda: true
miniforge-version: latest
- python-version: "3.8"
environment-file: environment.yml
- activate-environment: liquid-ref
- - name: Restore cached conda environment
- id: cache-load
- uses: actions/cache/restore@v3
- with:
- path: /usr/share/miniconda/envs/liquid-ref
- key: ${{ runner.os }}-conda-${{ hashFiles('environment.yml') }}
- - name: Load Environment
- if: steps.cache-load.outputs.cache-hit != 'true'
- run: |
- mamba env update --file environment.yml --prune
- - name: Start xvfb daemon
- run: |
- /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -ac -screen 0 1280x1024x16
- name: Test with pytest
working-directory: ./reduction
run: |
- echo datasearch.directories=/home/cloud/_work/LiquidsReflectometer/LiquidsReflectometer/reduction/tests/data/liquidsreflectometer-data/nexus/ >> ~/.mantid/Mantid.user.properties
- cat ~/.mantid/Mantid.user.properties
git submodule add --force https://code.ornl.gov/sns-hfir-scse/infrastructure/test-data/liquidsreflectometer-data.git tests/data/liquidsreflectometer-data
git submodule update --init
- python -m pytest --cov=. --cov-report=xml --cov-report=term test
+ python -m pytest -vv --cov=. --cov-report=xml --cov-report=term test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
diff --git a/environment.yml b/environment.yml
index 879b7b3..45eab11 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,15 +1,14 @@
name: liquid-ref
channels:
+ - mantid/label/main
- conda-forge
- - default
- - mantid
+ - defaults
dependencies:
- pre-commit
- pytest
- pytest-cov
- numpy
- lmfit
- - python=3.8
- mantidworkbench>=6.7.0
- codecov
- conda-build
diff --git a/reduction/data/template_fbck.xml b/reduction/data/template_fbck.xml
index 5cbe4bf..58f6bcd 100644
--- a/reduction/data/template_fbck.xml
+++ b/reduction/data/template_fbck.xml
@@ -14,6 +14,7 @@
147
N/A
True
+ True
133
150
0
@@ -61,6 +62,7 @@
147
N/A
True
+ True
133
150
0
@@ -108,6 +110,7 @@
147
N/A
True
+ True
133
150
0
@@ -155,6 +158,7 @@
147
N/A
True
+ True
133
150
0
@@ -202,6 +206,7 @@
147
N/A
True
+ True
133
150
0
@@ -249,6 +254,7 @@
147
N/A
True
+ True
126
131
153
@@ -296,6 +302,7 @@
148
N/A
True
+ True
131
151
0
@@ -343,6 +350,7 @@
148
N/A
True
+ True
131
151
0
diff --git a/reduction/lr_reduction/reduction_template_reader.py b/reduction/lr_reduction/reduction_template_reader.py
index 25b8510..e478a7b 100644
--- a/reduction/lr_reduction/reduction_template_reader.py
+++ b/reduction/lr_reduction/reduction_template_reader.py
@@ -21,6 +21,7 @@ def __init__(self):
# Signal selection
self.data_peak_range = [140, 150]
self.subtract_background = True
+ self.two_backgrounds: bool = False
self.background_roi = [137, 153, 0, 0]
self.tof_range = [9600., 21600.]
self.select_tof_range = True
@@ -65,6 +66,20 @@ def __init__(self):
self.incident_medium_index_selected = 0
def from_dict(self, data_dict):
+ r"""
+ Update object's attributes with a dictionary with entries of the type attribute_name: attribute_value.
+
+ Raises
+ ------
+ ValueError
+ if one entry of the dictionary is not an attribute of this object
+ """
+
+ # check all keys are data_dict are attributes of object `self`
+ attribute_names = list(vars(self))
+ if not all(key in attribute_names for key in data_dict):
+ raise ValueError("data_dir contains invalid entries")
+ # update attribute values
for k, v in data_dict.items():
setattr(self, k, v)
@@ -78,6 +93,7 @@ def to_xml(self):
_xml += "%s\n" % str(self.data_peak_range[1])
_xml += "N/A\n"
_xml += "%s\n" % str(self.subtract_background)
+ _xml += "%s\n" % str(self.two_backgrounds)
_xml += "%s\n" % str(self.background_roi[0])
_xml += "%s\n" % str(self.background_roi[1])
_xml += "%s\n" % str(self.background_roi[2])
@@ -162,11 +178,15 @@ def from_xml_element(self, instrument_dom):
self.norm_x_range = [getIntElement(instrument_dom, "norm_x_min"),
getIntElement(instrument_dom, "norm_x_max")]
- #background flag
+ # background flag
self.subtract_background = getBoolElement(instrument_dom, "background_flag",
default=self.subtract_background)
- #background from/to pixels
+ # use two backgrounds flag
+ self.two_backgrounds = getBoolElement(instrument_dom, "two_backgrounds_flag",
+ default=self.two_backgrounds)
+
+ # background from/to pixels
self.background_roi = [getIntElement(instrument_dom, "back_roi1_from"),
getIntElement(instrument_dom, "back_roi1_to"),
getIntElement(instrument_dom, "back_roi2_from"),
@@ -191,6 +211,7 @@ def from_xml_element(self, instrument_dom):
# Background subtraction option
self.subtract_norm_background = getBoolElement(instrument_dom, "norm_background_flag",
default=self.subtract_norm_background)
+
self.norm_background_roi = [getIntElement(instrument_dom, "norm_from_back_pixels"),
getIntElement(instrument_dom, "norm_to_back_pixels")]
@@ -312,7 +333,7 @@ def from_xml(xml_str):
data_set.from_xml_element(item)
data_sets.append(data_set)
- if len(data_sets)==0:
+ if len(data_sets) == 0:
data_sets = [ReductionParameters()]
return data_sets
diff --git a/reduction/lr_reduction/template.py b/reduction/lr_reduction/template.py
index d77fab8..87c7f31 100644
--- a/reduction/lr_reduction/template.py
+++ b/reduction/lr_reduction/template.py
@@ -185,6 +185,8 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False,
peak = template_data.data_peak_range
if template_data.subtract_background:
peak_bck = template_data.background_roi
+ if template_data.two_backgrounds is False:
+ peak_bck = peak_bck[0: 2] # retain only the first background
else:
peak_bck = None
@@ -202,7 +204,7 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False,
else:
norm_low_res = None
- # We are not subtrating background for the direct beam
+ # We are not subtracting background for the direct beam
if template_data.subtract_norm_background:
norm_bck = template_data.norm_background_roi
else:
diff --git a/reduction/lr_reduction/utils.py b/reduction/lr_reduction/utils.py
new file mode 100644
index 0000000..f2d03ae
--- /dev/null
+++ b/reduction/lr_reduction/utils.py
@@ -0,0 +1,68 @@
+# standard imports
+from contextlib import contextmanager
+from copy import deepcopy
+from pathlib import Path
+from typing import Union
+
+# third-party libraries
+from mantid.kernel import ConfigService
+
+
+@contextmanager
+def amend_config(
+ new_config: dict = None, data_dir: Union[str, list] = None, data_dir_insert_mode: str = "prepend"
+) -> None:
+ r"""
+ Context manager to safely modify Mantid Configuration Service while
+ the function is executed.
+
+ Parameters
+ ----------
+ new_config
+ (key, value) pairs to substitute in the configuration service
+ data_dir
+ prepend one (when passing a string) or more (when passing a list)
+ directories to the list of data search directories. Alternatively, replace instead of prepend.
+ data_dir_insert_mode
+ How to insert the data directories. Options are: "prepend" (default) and "replace".
+ """
+ modified_keys = list()
+ backup = dict()
+ config = ConfigService.Instance()
+ if new_config is not None:
+ SEARCH_ARCHIVE = "datasearch.searcharchive"
+ if SEARCH_ARCHIVE not in new_config:
+ new_config[SEARCH_ARCHIVE] = "hfir, sns"
+ DEFAULT_FACILITY = "default.facility"
+ if DEFAULT_FACILITY not in new_config:
+ new_config[DEFAULT_FACILITY] = "SNS"
+ for key, val in new_config.items():
+ backup[key] = config[key]
+ config[key] = val # config does not have an 'update' method
+ modified_keys.append(key)
+ if data_dir is not None:
+ data_dirs = (
+ [
+ data_dir,
+ ]
+ if isinstance(data_dir, str)
+ else data_dir
+ )
+ # make sure the data_dirs exists and are directories
+ for path in data_dirs:
+ if Path(path).is_dir() is False:
+ raise ValueError(f"Data directory: {path} does not exist or is not a directory")
+ key = "datasearch.directories"
+ backup[key] = deepcopy(config[key])
+ # prepend or replace our custom data directories to the list of data search directories
+ if data_dir_insert_mode == "prepend":
+ config.setDataSearchDirs(data_dirs + list(config.getDataSearchDirs()))
+ elif data_dir_insert_mode == "replace":
+ config.setDataSearchDirs(data_dirs)
+ else:
+ raise ValueError(f"Invalid data_dir_insert_mode: {data_dir_insert_mode}")
+ try:
+ yield
+ finally:
+ for key in modified_keys:
+ config[key] = backup[key]
diff --git a/reduction/test/conftest.py b/reduction/test/conftest.py
new file mode 100644
index 0000000..5d2683d
--- /dev/null
+++ b/reduction/test/conftest.py
@@ -0,0 +1,17 @@
+# standard imports
+from pathlib import Path
+
+# third-party imports
+import pytest
+
+
+@pytest.fixture(scope="session")
+def nexus_dir() -> str:
+ r"""Absolute path to the event nexus files"""
+ return str(Path(__file__).parent.parent / "tests/data/liquidsreflectometer-data/nexus")
+
+
+@pytest.fixture(scope="session")
+def template_dir() -> str:
+ r"""Absolute path to reduction/data/ directory"""
+ return str(Path(__file__).parent.parent / "data")
diff --git a/reduction/test/test_reduction.py b/reduction/test/test_reduction.py
index 2ef8321..0adce2b 100644
--- a/reduction/test/test_reduction.py
+++ b/reduction/test/test_reduction.py
@@ -1,15 +1,21 @@
+# standard imports
+from pathlib import Path
import os
+# third-party imports
import mantid
import mantid.simpleapi as mtd_api
import numpy as np
+
+# lr_reduction imports
+from lr_reduction import event_reduction, template, workflow
+from lr_reduction.utils import amend_config
+
+
mtd_api.config["default.facility"] = "SNS"
mtd_api.config["default.instrument"] = "REF_L"
-
mantid.kernel.config.setLogLevel(3)
-from lr_reduction import event_reduction, template, workflow
-
def cleanup_partial_files(output_dir, runs):
"""
@@ -21,17 +27,18 @@ def cleanup_partial_files(output_dir, runs):
os.remove(reduced_path)
-def test_info():
+def test_info(nexus_dir):
"""
Test utility functions to get basic info
"""
- ws_sc = mtd_api.Load("REF_L_198409")
+ with amend_config(data_dir=nexus_dir):
+ ws_sc = mtd_api.Load("REF_L_198409")
wl_min, wl_max = event_reduction.get_wl_range(ws_sc)
assert(wl_min == 13.7)
assert(wl_max == 16.3)
-def test_full_reduction():
+def test_full_reduction(nexus_dir):
"""
Test the fill reduction chain
"""
@@ -41,7 +48,8 @@ def test_full_reduction():
d_refl_all = []
first_run = None
for run_number in range(198409, 198417):
- ws_sc = mtd_api.Load("REF_L_%s" % run_number)
+ with amend_config(data_dir=nexus_dir):
+ ws_sc = mtd_api.Load("REF_L_%s" % run_number)
qz_mid, refl, d_refl = template.process_from_template_ws(ws_sc, template_path)
if first_run is None:
@@ -72,7 +80,7 @@ def test_full_reduction():
cleanup_partial_files(output_dir, range(198409, 198417))
-def test_reduce_workflow():
+def test_reduce_workflow(nexus_dir):
template_path = 'data/template.xml'
output_dir = 'data/'
reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt')
@@ -80,7 +88,8 @@ def test_reduce_workflow():
os.remove(reduced_path)
for i in range(198409, 198417):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
workflow.reduce(ws, template_path, output_dir=output_dir,
average_overlap=False)
@@ -102,7 +111,8 @@ def test_reduce_workflow():
cleanup_partial_files(output_dir, range(198409, 198417))
-def test_reduce_functional_bck():
+def test_reduce_functional_bck(nexus_dir, template_dir):
+ os.chdir(Path(template_dir).parent)
template_path = 'data/template_fbck.xml'
output_dir = 'data/'
reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt')
@@ -110,7 +120,8 @@ def test_reduce_functional_bck():
os.remove(reduced_path)
for i in range(198409, 198417):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
workflow.reduce(ws, template_path, output_dir=output_dir,
average_overlap=False,
functional_background=True)
@@ -136,7 +147,7 @@ def test_reduce_functional_bck():
cleanup_partial_files(output_dir, range(198409, 198417))
-def test_reduce_bck_option_mismatch():
+def test_reduce_bck_option_mismatch(nexus_dir):
"""
Ask for functional background but pass by a background range with
only a single region. This will revert to simple averaging over the range.
@@ -148,7 +159,8 @@ def test_reduce_bck_option_mismatch():
os.remove(reduced_path)
for i in range(198409, 198417):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
sequence_number = ws.getRun().getProperty("sequence_number").value[0]
template_data = template.read_template(template_path, sequence_number)
template_data.background_roi = template_data.background_roi[:2]
@@ -174,7 +186,7 @@ def test_reduce_bck_option_mismatch():
cleanup_partial_files(output_dir, range(198409, 198417))
-def test_reduce_workflow_with_overlap_avg():
+def test_reduce_workflow_with_overlap_avg(nexus_dir):
"""
Test the complete working, but this time we average the point in the
overlap regions.
@@ -186,7 +198,8 @@ def test_reduce_workflow_with_overlap_avg():
os.remove(reduced_path)
for i in range(198409, 198417):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
workflow.reduce(ws, template_path, output_dir=output_dir,
average_overlap=True)
@@ -208,12 +221,13 @@ def test_reduce_workflow_with_overlap_avg():
cleanup_partial_files(output_dir, range(198409, 198417))
-def test_quick_reduce():
+def test_quick_reduce(nexus_dir):
"""
Test the quick reduction workflow
"""
- ws = mtd_api.Load("REF_L_201284")
- ws_db = mtd_api.Load("REF_L_201045")
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_201284")
+ ws_db = mtd_api.Load("REF_L_201045")
_refl = workflow.reduce_explorer(ws, ws_db, center_pixel=145, db_center_pixel=145)
reference_path = 'data/reference_r201284_quick.txt'
@@ -224,7 +238,7 @@ def test_quick_reduce():
assert(np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10)
-def test_reduce_workflow_201282():
+def test_reduce_workflow_201282(nexus_dir):
"""
Test to reproduce autoreduction output
"""
@@ -235,7 +249,8 @@ def test_reduce_workflow_201282():
os.remove(reduced_path)
for i in range(201282, 201289):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
workflow.reduce(ws, template_path, output_dir=output_dir,
average_overlap=False)
@@ -254,7 +269,7 @@ def test_reduce_workflow_201282():
assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01)
-def test_background_subtraction():
+def test_background_subtraction(nexus_dir):
"""
Test with background subtraction off for the data and on for the normalization
"""
@@ -265,7 +280,8 @@ def test_background_subtraction():
os.remove(reduced_path)
for i in range(198388, 198390):
- ws = mtd_api.Load("REF_L_%s" % i)
+ with amend_config(data_dir=nexus_dir):
+ ws = mtd_api.Load("REF_L_%s" % i)
workflow.reduce(ws, template_path, output_dir=output_dir,
average_overlap=False)
diff --git a/reduction/test/test_reduction_template_reader.py b/reduction/test/test_reduction_template_reader.py
new file mode 100644
index 0000000..72b73b4
--- /dev/null
+++ b/reduction/test/test_reduction_template_reader.py
@@ -0,0 +1,29 @@
+# third-party imports
+import pytest
+
+# lr_reduction imports
+from lr_reduction.reduction_template_reader import ReductionParameters
+
+
+class TestReductionParameters:
+
+ def test_two_backgrounds(self):
+ r"""verify the xml dump writes what we want"""
+ redparms = ReductionParameters()
+ redparms.two_backgrounds = True
+ assert "True" in redparms.to_xml()
+
+ def test_from_dict(self):
+ r"""verify method from_dict raises when passed some nonsense"""
+
+ redparms = ReductionParameters()
+ # valid data dictionary
+ redparms.from_dict(dict(two_backgrounds=True))
+ assert redparms.two_backgrounds
+ # invalid data dictionary
+ with pytest.raises(ValueError) as excinfo:
+ redparms.from_dict(dict(nonsense=True))
+ assert "data_dir contains invalid entries" == str(excinfo.value)
+
+if __name__ == "__main__":
+ pytest.main(__file__)
diff --git a/reduction/test/test_time_resolved.py b/reduction/test/test_time_resolved.py
index 34be588..675f47e 100644
--- a/reduction/test/test_time_resolved.py
+++ b/reduction/test/test_time_resolved.py
@@ -1,9 +1,15 @@
+# standard imports
import os
+
+# third-party imports
import numpy as np
+
+# lr_reduction imports
from lr_reduction import time_resolved
+from lr_reduction.utils import amend_config
-def test_reduce_workflow():
+def test_reduce_workflow(nexus_dir):
"""
Test the time-resolved reduction that uses a measured reference.
It is generally used at 30 Hz but it also works at 60 Hz.
@@ -12,11 +18,11 @@ def test_reduce_workflow():
output_dir = 'data/'
reduced_path = 'data/reference_rq_avg_overlap.txt'
ref_data = np.loadtxt(reduced_path).T
-
- reduced = time_resolved.reduce_30Hz_slices(198413, 198413, ref_data_60Hz=reduced_path,
- template_30Hz=template_path,
- time_interval=300, output_dir=output_dir,
- scan_index=5, create_plot=False)
+ with amend_config(data_dir=nexus_dir):
+ reduced = time_resolved.reduce_30Hz_slices(198413, 198413, ref_data_60Hz=reduced_path,
+ template_30Hz=template_path,
+ time_interval=300, output_dir=output_dir,
+ scan_index=5, create_plot=False)
q_long = len(ref_data[0])
q_short = len(reduced[0][0])