diff --git a/README.md b/README.md index d5d23e1..6d0a77f 100644 --- a/README.md +++ b/README.md @@ -8,4 +8,4 @@ Reduction scripts for the Liquids Reflectometer. This includes both automated re - reduction v2.0.8 [04/2023] Move pre/post cuts to template. - reduction v2.0.9 [04/2023] Subtract normalization background & add x-direction option - reduction v2.0.13 [08/2023] Get correct angle with free liquids - \ No newline at end of file + diff --git a/TODO.md b/TODO.md index aa0c57b..ae8b72c 100644 --- a/TODO.md +++ b/TODO.md @@ -7,4 +7,4 @@ - Catch index error in reduce_30Hz_from_ws() **RefRed** -- Add option to LRDirectBeamSort to use the order of the files are given instead of sorting them. \ No newline at end of file +- Add option to LRDirectBeamSort to use the order of the files are given instead of sorting them. diff --git a/launcher/apps/dynamic_30Hz.py b/launcher/apps/dynamic_30Hz.py index 139b43c..b06e550 100644 --- a/launcher/apps/dynamic_30Hz.py +++ b/launcher/apps/dynamic_30Hz.py @@ -1,15 +1,10 @@ #!/usr/bin/python3 -import sys -import os import json +import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QWidget REFERENCE_DIRECTIVE = "Click to choose a 60Hz reference R(Q) file" TEMPLATE_DIRECTIVE = "Click to choose a 30Hz template" @@ -17,24 +12,23 @@ class Dynamic30Hz(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Time-resolved reduction') + self.setWindowTitle("Time-resolved reduction") layout = QGridLayout() self.setLayout(layout) self.settings = QtCore.QSettings() # 30Hz template file - self.choose_template = QPushButton('30Hz template') + self.choose_template = QPushButton("30Hz template") layout.addWidget(self.choose_template, 1, 1) self.template_path = QLabel(self) layout.addWidget(self.template_path, 1, 2) # 60Hz reference file - self.choose_ref = QPushButton('60Hz R(Q) reference') + self.choose_ref = QPushButton("60Hz R(Q) reference") layout.addWidget(self.choose_ref, 2, 1) self.ref_path = QLabel(self) @@ -50,7 +44,7 @@ def __init__(self): # 30Hz data to process self.data_run_number_ledit = QtWidgets.QLineEdit() - #self.data_run_number_ledit.setValidator(QtGui.QIntValidator()) + # self.data_run_number_ledit.setValidator(QtGui.QIntValidator()) layout.addWidget(self.data_run_number_ledit, 4, 1) self.data_run_number_label = QLabel(self) self.data_run_number_label.setText("Enter a 30Hz run number to reduce") @@ -65,20 +59,20 @@ def __init__(self): layout.addWidget(self.time_slice_label, 5, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 6, 1) self.output_dir_label = QLabel(self) layout.addWidget(self.output_dir_label, 6, 2) # Process button - self.perform_reduction_old = QPushButton('Reduce [old]') + self.perform_reduction_old = QPushButton("Reduce [old]") layout.addWidget(self.perform_reduction_old, 7, 1) - self.perform_reduction = QPushButton('Reduce [new]') + self.perform_reduction = QPushButton("Reduce [new]") layout.addWidget(self.perform_reduction, 7, 2) - #self.perform_reduction_q = QPushButton('Reduce [Const-Q binning]') - #layout.addWidget(self.perform_reduction_q, 8, 2) - self.load_settings = QPushButton('Load settings') + # self.perform_reduction_q = QPushButton('Reduce [Const-Q binning]') + # layout.addWidget(self.perform_reduction_q, 8, 2) + self.load_settings = QPushButton("Load settings") layout.addWidget(self.load_settings, 8, 2) # connections @@ -87,7 +81,7 @@ def __init__(self): self.choose_output_dir.clicked.connect(self.output_dir_selection) self.perform_reduction_old.clicked.connect(self.reduce_old) self.perform_reduction.clicked.connect(self.reduce_new) - #self.perform_reduction_q.clicked.connect(self.reduce_q) + # self.perform_reduction_q.clicked.connect(self.reduce_q) self.load_settings.clicked.connect(self.load_settings_from_file) # Populate from previous session @@ -95,54 +89,46 @@ def __init__(self): def load_settings_from_file(self): """ - Load the reduction options from a json file produced as the output - of an earlier reduction. This file is saved in the same directory - as the time-resolved reflectivity curves. + Load the reduction options from a json file produced as the output + of an earlier reduction. This file is saved in the same directory + as the time-resolved reflectivity curves. """ - _settings_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.output_dir_label.text(), - 'Settings file (*.json)') + _settings_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.output_dir_label.text(), "Settings file (*.json)") if os.path.isfile(_settings_file): - with open(_settings_file, 'r') as fd: + with open(_settings_file, "r") as fd: options = json.load(fd) - if 'template_30Hz' in options: - self.template_path.setText(options['template_30Hz']) + if "template_30Hz" in options: + self.template_path.setText(options["template_30Hz"]) - if 'ref_data_60Hz' in options: - self.ref_path.setText(options['ref_data_60Hz']) + if "ref_data_60Hz" in options: + self.ref_path.setText(options["ref_data_60Hz"]) - if 'output_dir' in options: - self.output_dir_label.setText(options['output_dir']) + if "output_dir" in options: + self.output_dir_label.setText(options["output_dir"]) - if 'ref_run_30Hz' in options: - self.ref_run_number_ledit.setText(str(options['ref_run_30Hz'])) + if "ref_run_30Hz" in options: + self.ref_run_number_ledit.setText(str(options["ref_run_30Hz"])) - if 'meas_run_30Hz' in options: - self.data_run_number_ledit.setText(str(options['meas_run_30Hz'])) + if "meas_run_30Hz" in options: + self.data_run_number_ledit.setText(str(options["meas_run_30Hz"])) - if 'time_interval' in options: - self.time_slice_ledit.setText(str(options['time_interval'])) + if "time_interval" in options: + self.time_slice_ledit.setText(str(options["time_interval"])) def template_selection(self): - _template_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.template_path.text(), - 'Template file (*.xml)') + _template_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.template_path.text(), "Template file (*.xml)") if os.path.isfile(_template_file): self.template_path.setText(_template_file) def ref_selection(self): - _ref_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.ref_path.text(), - '60Hz reference file (*.txt)') + _ref_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.ref_path.text(), "60Hz reference file (*.txt)") if os.path.isfile(_ref_file): self.ref_path.setText(_ref_file) def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) @@ -162,22 +148,22 @@ def read_settings(self): _out_dir = OUTPUT_DIR_DIRECTIVE self.output_dir_label.setText(_out_dir) - _ref_run = self.settings.value("30Hz_ref_run_number", '') + _ref_run = self.settings.value("30Hz_ref_run_number", "") self.ref_run_number_ledit.setText(_ref_run) - _data_run = self.settings.value("30Hz_data_run_number", '') + _data_run = self.settings.value("30Hz_data_run_number", "") self.data_run_number_ledit.setText(_data_run) - _interval = self.settings.value("30Hz_time_slice", '') + _interval = self.settings.value("30Hz_time_slice", "") self.time_slice_ledit.setText(_interval) def save_settings(self): - self.settings.setValue('30Hz_template', self.template_path.text()) - self.settings.setValue('30Hz_reference', self.ref_path.text()) - self.settings.setValue('30Hz_ref_run_number', self.ref_run_number_ledit.text()) - self.settings.setValue('30Hz_data_run_number', self.data_run_number_ledit.text()) - self.settings.setValue('30Hz_time_slice', self.time_slice_ledit.text()) - self.settings.setValue('30Hz_output_dir', self.output_dir_label.text()) + self.settings.setValue("30Hz_template", self.template_path.text()) + self.settings.setValue("30Hz_reference", self.ref_path.text()) + self.settings.setValue("30Hz_ref_run_number", self.ref_run_number_ledit.text()) + self.settings.setValue("30Hz_data_run_number", self.data_run_number_ledit.text()) + self.settings.setValue("30Hz_time_slice", self.time_slice_ledit.text()) + self.settings.setValue("30Hz_output_dir", self.output_dir_label.text()) def check_inputs(self): error = None @@ -204,29 +190,29 @@ def show_dialog(self, text): def parse_run_list(self, text): """ - Parse the run list string and expand it. + Parse the run list string and expand it. """ run_list = [] - for _r in text.split(','): + for _r in text.split(","): try: run_list.append(int(_r)) except: - sub_toks = _r.split('-') + sub_toks = _r.split("-") if len(sub_toks) == 2: - run_list.extend(range(int(sub_toks[0]), int(sub_toks[1])+1)) + run_list.extend(range(int(sub_toks[0]), int(sub_toks[1]) + 1)) return run_list def reduce_old(self): - return self.reduce(reduction_script='scripts/template_reduction.py') + return self.reduce(reduction_script="scripts/template_reduction.py") def reduce_new(self): - return self.reduce(reduction_script='scripts/time_resolved_reduction.py') - + return self.reduce(reduction_script="scripts/time_resolved_reduction.py") + def reduce_q(self): - return self.reduce(reduction_script='scripts/time_resolved_reduction.py', q_summing=True) + return self.reduce(reduction_script="scripts/time_resolved_reduction.py", q_summing=True) - def reduce(self, reduction_script='scripts/time_resolved_reduction.py', q_summing=False): + def reduce(self, reduction_script="scripts/time_resolved_reduction.py", q_summing=False): if not self.check_inputs(): print("Invalid inputs found") return @@ -238,15 +224,19 @@ def reduce(self, reduction_script='scripts/time_resolved_reduction.py', q_summin run_list = self.parse_run_list(self.data_run_number_ledit.text()) for run in run_list: # python3 template_reduction.py dynamic30Hz - args = ['python3', reduction_script, 'dynamic30Hz', - str(run), - self.ref_run_number_ledit.text(), - self.ref_path.text(), - self.template_path.text(), - self.time_slice_ledit.text(), - self.output_dir_label.text()] + args = [ + "python3", + reduction_script, + "dynamic30Hz", + str(run), + self.ref_run_number_ledit.text(), + self.ref_path.text(), + self.template_path.text(), + self.time_slice_ledit.text(), + self.output_dir_label.text(), + ] if not run == run_list[-1]: - args.append('--no-plot') + args.append("--no-plot") if q_summing: - args.append('--qsumming') + args.append("--qsumming") subprocess.run(args) diff --git a/launcher/apps/dynamic_60Hz.py b/launcher/apps/dynamic_60Hz.py index 1741f91..8d165fa 100644 --- a/launcher/apps/dynamic_60Hz.py +++ b/launcher/apps/dynamic_60Hz.py @@ -1,31 +1,25 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QWidget TEMPLATE_DIRECTIVE = "Click to choose a 60Hz template" OUTPUT_DIR_DIRECTIVE = "Click to choose an output directory" class Dynamic60Hz(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Time-resolved reduction') + self.setWindowTitle("Time-resolved reduction") layout = QGridLayout() self.setLayout(layout) self.settings = QtCore.QSettings() # 30Hz template file - self.choose_template = QPushButton('Template') + self.choose_template = QPushButton("Template") layout.addWidget(self.choose_template, 1, 1) self.template_path = QLabel(self) @@ -56,14 +50,14 @@ def __init__(self): layout.addWidget(self.idx_label, 6, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 7, 1) self.output_dir_label = QLabel(self) layout.addWidget(self.output_dir_label, 7, 2) # Process button - self.perform_reduction = QPushButton('Reduce') + self.perform_reduction = QPushButton("Reduce") layout.addWidget(self.perform_reduction, 8, 1) # connections @@ -75,16 +69,12 @@ def __init__(self): self.read_settings() def template_selection(self): - _template_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.template_path.text(), - 'Template file (*.xml)') + _template_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.template_path.text(), "Template file (*.xml)") if os.path.isfile(_template_file): self.template_path.setText(_template_file) def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) @@ -99,21 +89,21 @@ def read_settings(self): _out_dir = OUTPUT_DIR_DIRECTIVE self.output_dir_label.setText(_out_dir) - _data_run = self.settings.value("60Hz_data_run_number", '') + _data_run = self.settings.value("60Hz_data_run_number", "") self.data_run_number_ledit.setText(_data_run) - _interval = self.settings.value("60Hz_time_slice", '') + _interval = self.settings.value("60Hz_time_slice", "") self.time_slice_ledit.setText(_interval) - _interval = self.settings.value("60Hz_scan_index", '') + _interval = self.settings.value("60Hz_scan_index", "") self.idx_ledit.setText(_interval) def save_settings(self): - self.settings.setValue('60Hz_template', self.template_path.text()) - self.settings.setValue('60Hz_data_run_number', self.data_run_number_ledit.text()) - self.settings.setValue('60Hz_time_slice', self.time_slice_ledit.text()) - self.settings.setValue('60Hz_scan_index', self.idx_ledit.text()) - self.settings.setValue('60Hz_output_dir', self.output_dir_label.text()) + self.settings.setValue("60Hz_template", self.template_path.text()) + self.settings.setValue("60Hz_data_run_number", self.data_run_number_ledit.text()) + self.settings.setValue("60Hz_time_slice", self.time_slice_ledit.text()) + self.settings.setValue("60Hz_scan_index", self.idx_ledit.text()) + self.settings.setValue("60Hz_output_dir", self.output_dir_label.text()) def check_inputs(self): error = None @@ -146,9 +136,16 @@ def reduce(self): print("Reduce!") # python3 template_reduction.py dynamic60Hz - subprocess.run(['python3', 'scripts/template_reduction.py', 'dynamic60Hz', - self.data_run_number_ledit.text(), - self.template_path.text(), - self.time_slice_ledit.text(), self.output_dir_label.text(), - '--scan_index', self.idx_ledit.text()]) - + subprocess.run( + [ + "python3", + "scripts/template_reduction.py", + "dynamic60Hz", + self.data_run_number_ledit.text(), + self.template_path.text(), + self.time_slice_ledit.text(), + self.output_dir_label.text(), + "--scan_index", + self.idx_ledit.text(), + ] + ) diff --git a/launcher/apps/off_spec.py b/launcher/apps/off_spec.py index e7a57d6..cac46af 100644 --- a/launcher/apps/off_spec.py +++ b/launcher/apps/off_spec.py @@ -1,25 +1,18 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox, - QSpacerItem) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QSpacerItem, QWidget DATA_FILE_DIRECTIVE = "Click to choose a file to process" OUTPUT_DIR_DIRECTIVE = "Click to choose an output directory" class OffSpec(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Export off-spec data') + self.setWindowTitle("Export off-spec data") layout = QGridLayout() layout.setColumnStretch(1, 0) layout.setColumnStretch(2, 1) @@ -44,25 +37,23 @@ def __init__(self): layout.addWidget(self.wl_step_label, 2, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 3, 1) self.output_dir_label = QLabel(self) layout.addWidget(self.output_dir_label, 3, 2) - spacer = QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, - QtWidgets.QSizePolicy.Minimum) + spacer = QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) layout.addItem(spacer, 4, 1) # Process button - self.perform_reduction = QPushButton('Process') + self.perform_reduction = QPushButton("Process") self.perform_reduction.setStyleSheet("background-color : green") layout.addWidget(self.perform_reduction, 5, 1) - spacer = QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, - QtWidgets.QSizePolicy.Expanding) + spacer = QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout.addItem(spacer, 6, 1) - + # connections self.choose_output_dir.clicked.connect(self.output_dir_selection) self.perform_reduction.clicked.connect(self.reduce) @@ -71,17 +62,15 @@ def __init__(self): self.read_settings() def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) def read_settings(self): - _run_number = self.settings.value("offspec_run_number", '') + _run_number = self.settings.value("offspec_run_number", "") self.run_number_ledit.setText(_run_number) - _wl_step = self.settings.value("offspec_wl_step", '') + _wl_step = self.settings.value("offspec_wl_step", "") self.wl_step_ledit.setText(_wl_step) _out_dir = self.settings.value("offspec_output_dir", OUTPUT_DIR_DIRECTIVE) @@ -90,9 +79,9 @@ def read_settings(self): self.output_dir_label.setText(_out_dir) def save_settings(self): - self.settings.setValue('offspec_run_number', self.run_number_ledit.text()) - self.settings.setValue('offspec_wl_step', self.wl_step_ledit.text()) - self.settings.setValue('offspec_output_dir', self.output_dir_label.text()) + self.settings.setValue("offspec_run_number", self.run_number_ledit.text()) + self.settings.setValue("offspec_wl_step", self.wl_step_ledit.text()) + self.settings.setValue("offspec_output_dir", self.output_dir_label.text()) def check_inputs(self): error = None @@ -122,6 +111,6 @@ def reduce(self): print("Processing!") - subprocess.run(['python3', 'scripts/off_spec.py', - self.run_number_ledit.text(), self.wl_step_ledit.text(), self.output_dir_label.text()]) - + subprocess.run( + ["python3", "scripts/off_spec.py", self.run_number_ledit.text(), self.wl_step_ledit.text(), self.output_dir_label.text()] + ) diff --git a/launcher/apps/quick_reduce.py b/launcher/apps/quick_reduce.py index ecb62fd..595e48b 100644 --- a/launcher/apps/quick_reduce.py +++ b/launcher/apps/quick_reduce.py @@ -1,25 +1,18 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox, - QSpacerItem) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QSpacerItem, QWidget DATA_FILE_DIRECTIVE = "Click to choose a file to process" OUTPUT_DIR_DIRECTIVE = os.path.expanduser("~") class QuickReduce(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Quick reduce') + self.setWindowTitle("Quick reduce") layout = QGridLayout() layout.setColumnStretch(1, 0) layout.setColumnStretch(2, 1) @@ -58,25 +51,23 @@ def __init__(self): layout.addWidget(self.db_peak_pixel_label, 4, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 5, 1) self.output_dir_label = QLabel(self) layout.addWidget(self.output_dir_label, 5, 2) - spacer = QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, - QtWidgets.QSizePolicy.Minimum) + spacer = QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) layout.addItem(spacer, 6, 1) # Process button - self.perform_reduction = QPushButton('Process') + self.perform_reduction = QPushButton("Process") self.perform_reduction.setStyleSheet("background-color : green") layout.addWidget(self.perform_reduction, 7, 1) - spacer = QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, - QtWidgets.QSizePolicy.Expanding) + spacer = QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout.addItem(spacer, 8, 1) - + # connections self.choose_output_dir.clicked.connect(self.output_dir_selection) self.perform_reduction.clicked.connect(self.reduce) @@ -85,17 +76,15 @@ def __init__(self): self.read_settings() def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) def read_settings(self): - _run_number = self.settings.value("quick_run_number", '') + _run_number = self.settings.value("quick_run_number", "") self.run_number_ledit.setText(_run_number) - _db_run_number = self.settings.value("quick_db_run_number", '') + _db_run_number = self.settings.value("quick_db_run_number", "") self.db_run_number_ledit.setText(_db_run_number) _pixel = self.settings.value("quick_pixel", 145) @@ -110,12 +99,12 @@ def read_settings(self): self.output_dir_label.setText(_out_dir) def save_settings(self): - self.settings.setValue('quick_run_number', self.run_number_ledit.text()) - self.settings.setValue('quick_db_run_number', self.db_run_number_ledit.text()) - self.settings.setValue('quick_pixel', self.peak_pixel_ledit.text()) - self.settings.setValue('quick_db_pixel', self.db_peak_pixel_ledit.text()) + self.settings.setValue("quick_run_number", self.run_number_ledit.text()) + self.settings.setValue("quick_db_run_number", self.db_run_number_ledit.text()) + self.settings.setValue("quick_pixel", self.peak_pixel_ledit.text()) + self.settings.setValue("quick_db_pixel", self.db_peak_pixel_ledit.text()) - self.settings.setValue('quick_output_dir', self.output_dir_label.text()) + self.settings.setValue("quick_output_dir", self.output_dir_label.text()) def check_inputs(self): error = None @@ -150,6 +139,15 @@ def reduce(self): print("Processing!") - subprocess.run(['nsd-conda-wrap.sh', 'mantid', 'scripts/quick_reduce.py', - self.run_number_ledit.text(), self.db_run_number_ledit.text(), - self.peak_pixel_ledit.text(), self.db_peak_pixel_ledit.text(), self.output_dir_label.text()]) + subprocess.run( + [ + "nsd-conda-wrap.sh", + "mantid", + "scripts/quick_reduce.py", + self.run_number_ledit.text(), + self.db_run_number_ledit.text(), + self.peak_pixel_ledit.text(), + self.db_peak_pixel_ledit.text(), + self.output_dir_label.text(), + ] + ) diff --git a/launcher/apps/reduction.py b/launcher/apps/reduction.py index 9e28d50..2a5755b 100644 --- a/launcher/apps/reduction.py +++ b/launcher/apps/reduction.py @@ -1,30 +1,24 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QWidget TEMPLATE_DIRECTIVE = "Click to choose a 60Hz template" class Reduction(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Batch reduction') + self.setWindowTitle("Batch reduction") layout = QGridLayout() self.setLayout(layout) self.settings = QtCore.QSettings() # Standard template file - self.choose_template = QPushButton('Template') + self.choose_template = QPushButton("Template") layout.addWidget(self.choose_template, 1, 1) self.template_path = QLabel(self) @@ -74,7 +68,7 @@ def __init__(self): layout.addWidget(self.average_overlapp_check, 7, 2) # Process button - self.perform_reduction = QPushButton('Reduce') + self.perform_reduction = QPushButton("Reduce") layout.addWidget(self.perform_reduction, 8, 1) # connections @@ -94,9 +88,7 @@ def select_version(self): self.const_q_label.setEnabled(not use_old) def template_selection(self): - _template_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.template_path.text(), - 'Template file (*.xml)') + _template_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.template_path.text(), "Template file (*.xml)") if os.path.isfile(_template_file): self.template_path.setText(_template_file) @@ -106,26 +98,26 @@ def read_settings(self): _template_file = TEMPLATE_DIRECTIVE self.template_path.setText(_template_file) - _first_run = self.settings.value("reduction_first_run_number", '') + _first_run = self.settings.value("reduction_first_run_number", "") self.first_run_number_ledit.setText(_first_run) - _interval = self.settings.value("reduction_last_run_number", '') + _interval = self.settings.value("reduction_last_run_number", "") self.last_run_number_ledit.setText(_interval) _use_old = self.settings.value("reduction_use_old", "false") - self.select_version_check.setChecked(_use_old=='true') + self.select_version_check.setChecked(_use_old == "true") _avg = self.settings.value("reduction_avg_overlap", "true") - self.average_overlapp_check.setChecked(_avg=='true') + self.average_overlapp_check.setChecked(_avg == "true") _const_q = self.settings.value("reduction_const_q", "false") - self.const_q_check.setChecked(_const_q=='true') + self.const_q_check.setChecked(_const_q == "true") def save_settings(self): - self.settings.setValue('reduction_template', self.template_path.text()) - self.settings.setValue('reduction_first_run_number', self.first_run_number_ledit.text()) - self.settings.setValue('reduction_last_run_number', self.last_run_number_ledit.text()) - self.settings.setValue('reduction_use_old', self.select_version_check.isChecked()) - self.settings.setValue('reduction_avg_overlap', self.average_overlapp_check.isChecked()) - self.settings.setValue('reduction_const_q', self.const_q_check.isChecked()) + self.settings.setValue("reduction_template", self.template_path.text()) + self.settings.setValue("reduction_first_run_number", self.first_run_number_ledit.text()) + self.settings.setValue("reduction_last_run_number", self.last_run_number_ledit.text()) + self.settings.setValue("reduction_use_old", self.select_version_check.isChecked()) + self.settings.setValue("reduction_avg_overlap", self.average_overlapp_check.isChecked()) + self.settings.setValue("reduction_const_q", self.const_q_check.isChecked()) def check_inputs(self): error = None @@ -152,8 +144,8 @@ def reduce(self): return # Get the IPTS from the template file - toks = self.template_path.text().split('/') - if toks[3].startswith('IPTS'): + toks = self.template_path.text().split("/") + if toks[3].startswith("IPTS"): ipts = toks[3] else: self.show_dialog("The chosen template is not in an IPTS folder, please select another one.") @@ -163,21 +155,24 @@ def reduce(self): print("Reduce!") - options = ['python3', '/SNS/REF_L/shared/batch_reduce.py', - ipts, - self.first_run_number_ledit.text(), - self.last_run_number_ledit.text()] + options = [ + "python3", + "/SNS/REF_L/shared/batch_reduce.py", + ipts, + self.first_run_number_ledit.text(), + self.last_run_number_ledit.text(), + ] if not self.select_version_check.isChecked(): - options.append('new') + options.append("new") options.append(self.template_path.text()) options.append(str(self.average_overlapp_check.isChecked())) options.append(str(self.const_q_check.isChecked())) else: - options.append('old') + options.append("old") options.append(self.template_path.text()) # python3 batch_reduce.py - print(' '.join(options)) + print(" ".join(options)) subprocess.run(options) self.show_dialog("Task completed: please verify!", "Task completed") diff --git a/launcher/apps/refracted.py b/launcher/apps/refracted.py index 02ee65c..3c92aa1 100644 --- a/launcher/apps/refracted.py +++ b/launcher/apps/refracted.py @@ -1,24 +1,18 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox) - +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QWidget OUTPUT_DIR_DIRECTIVE = "Click to choose an output directory" DEFAULT_MATERIAL = "Si" class Refracted(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Refracted beam') + self.setWindowTitle("Refracted beam") layout = QGridLayout() self.setLayout(layout) @@ -33,7 +27,7 @@ def __init__(self): layout.addWidget(self.run_number_label, 1, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 2, 1) self.output_dir_label = QLabel(self) @@ -47,7 +41,7 @@ def __init__(self): layout.addWidget(self.material_label, 3, 2) # Process button - self.perform_reduction = QPushButton('Process') + self.perform_reduction = QPushButton("Process") layout.addWidget(self.perform_reduction, 4, 1) # connections @@ -58,9 +52,7 @@ def __init__(self): self.read_settings() def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) @@ -70,7 +62,7 @@ def read_settings(self): _out_dir = OUTPUT_DIR_DIRECTIVE self.output_dir_label.setText(_out_dir) - _run = self.settings.value("refracted_run_number", '') + _run = self.settings.value("refracted_run_number", "") self.run_number_ledit.setText(_run) _material = self.settings.value("refracted_material", "Si") @@ -79,9 +71,9 @@ def read_settings(self): self.material_ledit.setText(_material) def save_settings(self): - self.settings.setValue('refracted_output_dir', self.output_dir_label.text()) - self.settings.setValue('refracted_run_number', self.run_number_ledit.text()) - self.settings.setValue('refracted_material', self.material_ledit.text()) + self.settings.setValue("refracted_output_dir", self.output_dir_label.text()) + self.settings.setValue("refracted_run_number", self.run_number_ledit.text()) + self.settings.setValue("refracted_material", self.material_ledit.text()) def check_inputs(self): error = None @@ -112,7 +104,13 @@ def reduce(self): print("Process!") # python3 template_reduction.py dynamic60Hz - subprocess.run(['python3', 'scripts/refracted_beam.py', - self.run_number_ledit.text(), self.output_dir_label.text(), - '--material', self.material_ledit.text()]) - + subprocess.run( + [ + "python3", + "scripts/refracted_beam.py", + self.run_number_ledit.text(), + self.output_dir_label.text(), + "--material", + self.material_ledit.text(), + ] + ) diff --git a/launcher/apps/sld_calculator.py b/launcher/apps/sld_calculator.py index ffb6531..45f09a3 100644 --- a/launcher/apps/sld_calculator.py +++ b/launcher/apps/sld_calculator.py @@ -1,19 +1,16 @@ #!/usr/bin/python3 import sys + import periodictable.nsf as nsf import periodictable.xsf as xsf - -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QLabel, QPushButton, QMessageBox) +from qtpy import QtCore, QtGui, QtWidgets +from qtpy.QtWidgets import QGridLayout, QLabel, QMessageBox, QPushButton, QWidget class SLD(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Rigaku XRR reduction') + self.setWindowTitle("Rigaku XRR reduction") layout = QGridLayout() self.setLayout(layout) @@ -47,7 +44,7 @@ def __init__(self): self.output.setReadOnly(True) # Process button - self.calculate = QPushButton('Calculate') + self.calculate = QPushButton("Calculate") layout.addWidget(self.calculate, 5, 1) # connections @@ -57,14 +54,14 @@ def __init__(self): self.read_settings() def read_settings(self): - _composition = self.settings.value("sld_composition", 'Si') + _composition = self.settings.value("sld_composition", "Si") self.composition_ledit.setText(_composition) - _wl = self.settings.value("sld_wavelength", '1.54') + _wl = self.settings.value("sld_wavelength", "1.54") self.wl_ledit.setText(_wl) def save_settings(self): - self.settings.setValue('sld_composition', self.composition_ledit.text()) - self.settings.setValue('sld_wavelength', self.wl_ledit.text()) + self.settings.setValue("sld_composition", self.composition_ledit.text()) + self.settings.setValue("sld_wavelength", self.wl_ledit.text()) def show_dialog(self, text): msgBox = QMessageBox() @@ -87,8 +84,8 @@ def compute_sld(self): sld, im_sld, incoh = nsf.neutron_sld(compound=composition, wavelength=wavelength, density=density) x_sld, x_im_sld = xsf.xray_sld(compound=composition, wavelength=wavelength, density=density) - output_text = "%-15s %6.6f\n" % ("Neutron SLD:", sld) - output_text += "%-15s %6.6f\n" % (" Imag SLD:", im_sld) + output_text = "%-15s %6.6f\n" % ("Neutron SLD:", sld) + output_text += "%-15s %6.6f\n" % (" Imag SLD:", im_sld) output_text += "%-15s %6.6f\n\n" % (" Incoh SLD:", incoh) output_text += "%-15s %6.6f\n" % ("X-ray SLD:", x_sld) diff --git a/launcher/apps/xrr.py b/launcher/apps/xrr.py index c3da876..dcf1936 100644 --- a/launcher/apps/xrr.py +++ b/launcher/apps/xrr.py @@ -1,45 +1,39 @@ #!/usr/bin/python3 -import sys import os import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QWidget, QGridLayout, - QFileDialog, QLabel, - QPushButton, QMessageBox) - +from qtpy import QtCore +from qtpy.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox, QPushButton, QWidget DATA_FILE_DIRECTIVE = "Click to choose a file to process" OUTPUT_DIR_DIRECTIVE = "Click to choose an output directory" class XRR(QWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Rigaku XRR reduction') + self.setWindowTitle("Rigaku XRR reduction") layout = QGridLayout() self.setLayout(layout) self.settings = QtCore.QSettings() # Data file - self.choose_data = QPushButton('Data file') + self.choose_data = QPushButton("Data file") layout.addWidget(self.choose_data, 1, 1) self.data_path = QLabel(self) layout.addWidget(self.data_path, 1, 2) # Output directory - self.choose_output_dir = QPushButton('Output directory') + self.choose_output_dir = QPushButton("Output directory") layout.addWidget(self.choose_output_dir, 2, 1) self.output_dir_label = QLabel(self) layout.addWidget(self.output_dir_label, 2, 2) # Process button - self.perform_reduction = QPushButton('Process') + self.perform_reduction = QPushButton("Process") layout.addWidget(self.perform_reduction, 3, 1) # connections @@ -51,16 +45,12 @@ def __init__(self): self.read_settings() def data_selection(self): - _data_file, _ = QFileDialog.getOpenFileName(self, 'Open file', - self.data_path.text(), - 'Rigaku data file (*.ras)') + _data_file, _ = QFileDialog.getOpenFileName(self, "Open file", self.data_path.text(), "Rigaku data file (*.ras)") if os.path.isfile(_data_file): self.data_path.setText(_data_file) def output_dir_selection(self): - _dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', - self.output_dir_label.text(), - QFileDialog.ShowDirsOnly) + _dir = QFileDialog.getExistingDirectory(None, "Select a folder:", self.output_dir_label.text(), QFileDialog.ShowDirsOnly) if os.path.isdir(_dir): self.output_dir_label.setText(_dir) @@ -76,8 +66,8 @@ def read_settings(self): self.output_dir_label.setText(_out_dir) def save_settings(self): - self.settings.setValue('xrr_data_file', self.data_path.text()) - self.settings.setValue('output_dir', self.output_dir_label.text()) + self.settings.setValue("xrr_data_file", self.data_path.text()) + self.settings.setValue("output_dir", self.output_dir_label.text()) def check_inputs(self): error = None @@ -110,6 +100,4 @@ def reduce(self): print("Reduce!") # python3 template_reduction.py dynamic60Hz - subprocess.run(['python3', 'scripts/xrr_reduction.py', - self.data_path.text(), self.output_dir_label.text()]) - + subprocess.run(["python3", "scripts/xrr_reduction.py", self.data_path.text(), self.output_dir_label.text()]) diff --git a/launcher/launcher.py b/launcher/launcher.py index daec811..68c8321 100755 --- a/launcher/launcher.py +++ b/launcher/launcher.py @@ -1,22 +1,15 @@ #!/usr/bin/python3 import sys -import os -import subprocess -from qtpy import QtWidgets, QtGui, QtCore - -from qtpy.QtWidgets import (QApplication, QWidget, QGridLayout, - QTabWidget, QFileDialog, QLabel, - QPushButton, QMessageBox) - -from apps.dynamic_60Hz import Dynamic60Hz from apps.dynamic_30Hz import Dynamic30Hz -from apps.xrr import XRR -from apps.reduction import Reduction -from apps.refracted import Refracted -from apps.sld_calculator import SLD +from apps.dynamic_60Hz import Dynamic60Hz from apps.off_spec import OffSpec from apps.quick_reduce import QuickReduce +from apps.reduction import Reduction +from apps.sld_calculator import SLD +from apps.xrr import XRR +from qtpy import QtCore +from qtpy.QtWidgets import QApplication, QGridLayout, QTabWidget, QWidget REFERENCE_DIRECTIVE = "Click to choose a 60Hz reference R(Q) file" TEMPLATE_DIRECTIVE = "Click to choose a 30Hz template" @@ -24,10 +17,9 @@ class ReductionInterface(QTabWidget): - def __init__(self): QWidget.__init__(self) - self.setWindowTitle('Reflectometry Launcher') + self.setWindowTitle("Reflectometry Launcher") layout = QGridLayout() self.setLayout(layout) @@ -76,12 +68,13 @@ def __init__(self): self.setTabText(tab_id, "SLD calculator") # Refracted beam analysis - #tab_id += 1 - #self.refracted_tab = Refracted() - #self.addTab(self.refracted_tab, "Refraction analysis") - #self.setTabText(tab_id, "Refraction analysis") + # tab_id += 1 + # self.refracted_tab = Refracted() + # self.addTab(self.refracted_tab, "Refraction analysis") + # self.setTabText(tab_id, "Refraction analysis") + -if __name__ == '__main__': +if __name__ == "__main__": app = QApplication([]) window = ReductionInterface() window.show() diff --git a/launcher/nr_launcher.desktop b/launcher/nr_launcher.desktop index 1493ce1..ac45c43 100644 --- a/launcher/nr_launcher.desktop +++ b/launcher/nr_launcher.desktop @@ -8,4 +8,3 @@ Name[en_US]=NR Launcher Exec=/SNS/REF_L/shared/launcher/nr_launcher.sh Name=nr_launcher Icon=mate-panel-launcher - diff --git a/launcher/scripts/off_spec.py b/launcher/scripts/off_spec.py index e5e6119..6c470aa 100644 --- a/launcher/scripts/off_spec.py +++ b/launcher/scripts/off_spec.py @@ -1,12 +1,11 @@ -import sys -import os import argparse -import numpy as np - -from matplotlib import pyplot as plt +import os import mantid import mantid.simpleapi as api +import numpy as np +from matplotlib import pyplot as plt + mantid.kernel.config.setLogLevel(3) mantid.ConfigService.Instance().setString("default.instrument", "REF_L") @@ -17,12 +16,12 @@ DEFAULT_4B_SOURCE_DET_DISTANCE = 15.75 -class PixelData(): +class PixelData: source_detector_distance = DEFAULT_4B_SOURCE_DET_DISTANCE det_distance = DEFAULT_4B_SAMPLE_DET_DISTANCE def __init__(self, run_number): - self.ws = api.LoadEventNexus("REF_L_%s" % run_number, OutputWorkspace='r%s' % run_number) + self.ws = api.LoadEventNexus("REF_L_%s" % run_number, OutputWorkspace="r%s" % run_number) self.run_number = run_number self.get_parameters() @@ -51,7 +50,7 @@ def get_parameters(self): self.pixel_width = float(self.ws.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0 - self.tthd = self.ws.getRun()['tthd'].value[0] + self.tthd = self.ws.getRun()["tthd"].value[0] h = 6.626e-34 # m^2 kg s^-1 m = 1.675e-27 # kg @@ -76,40 +75,39 @@ def process(self, output_dir, wavelength_step=1): # Use center of wl bins wl = x[0] / self.constant - wl = (wl[1:] + wl[:-1])/2.0 + wl = (wl[1:] + wl[:-1]) / 2.0 - with open(os.path.join(output_dir, 'r%s-wl.txt' % self.run_number), 'w') as fd: + with open(os.path.join(output_dir, "r%s-wl.txt" % self.run_number), "w") as fd: fd.write(str(self)) fd.write("# pixel\t wavelength\t signal\t error\n") for i in np.arange(p_vs_t.shape[0]): for i_wl in range(len(wl)): fd.write("%8g %8g %8g %8g\n" % (i, wl[i_wl], p_vs_t[i][i_wl], err[i][i_wl])) - plot_data(p_vs_t, wl, 'run %s' % self.run_number, - os.path.join(output_dir, 'r%s-counts.png' % self.run_number)) + plot_data(p_vs_t, wl, "run %s" % self.run_number, os.path.join(output_dir, "r%s-counts.png" % self.run_number)) def plot_data(counts, wl, title, file_path, show=True): counts_vs_wl = np.sum(counts, axis=0) counts_vs_pixel = np.sum(counts, axis=1) - fig, ax = plt.subplots(2, 1, figsize=(6,10)) + fig, ax = plt.subplots(2, 1, figsize=(6, 10)) plt.subplot(2, 1, 1) plt.plot(np.arange(counts_vs_pixel.shape[0]), counts_vs_pixel) - plt.title('Total counts per pixel - %s' % title) - plt.xlabel('pixel number') - plt.ylabel('Counts') - ax[0].set_yscale('log') + plt.title("Total counts per pixel - %s" % title) + plt.xlabel("pixel number") + plt.ylabel("Counts") + ax[0].set_yscale("log") plt.subplot(2, 1, 2) plt.plot(wl, counts_vs_wl) - plt.title('Total counts vs wavelength - %s' % title) - plt.xlabel('Wavelength [$\AA$]') - plt.ylabel('Counts') - ax[1].set_yscale('log') + plt.title("Total counts vs wavelength - %s" % title) + plt.xlabel(r"Wavelength [$\AA$]") + plt.ylabel("Counts") + ax[1].set_yscale("log") plt.savefig(file_path) @@ -117,16 +115,12 @@ def plot_data(counts, wl, title, file_path, show=True): plt.show() - if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=True) - parser.add_argument('run_number', type=str, - help='Run number to process') - parser.add_argument('wl_step', type=float, - help='Wavelength bin width', default=0.1) - parser.add_argument('output_dir', type=str, - help='Output directory') + parser.add_argument("run_number", type=str, help="Run number to process") + parser.add_argument("wl_step", type=float, help="Wavelength bin width", default=0.1) + parser.add_argument("output_dir", type=str, help="Output directory") # Parse arguments args = parser.parse_args() diff --git a/launcher/scripts/quick_reduce.py b/launcher/scripts/quick_reduce.py index dc760ed..de69740 100644 --- a/launcher/scripts/quick_reduce.py +++ b/launcher/scripts/quick_reduce.py @@ -1,10 +1,11 @@ -import sys -import os import argparse -import numpy as np +import os +import sys import mantid import mantid.simpleapi as api +import numpy as np + mantid.kernel.config.setLogLevel(3) from matplotlib import pyplot as plt @@ -15,22 +16,21 @@ def process(run_number, db_run_number, peak_pixel, db_peak_pixel, output_dir=None): - def _tof(_ws): tof_min = _ws.getTofMin() tof_max = _ws.getTofMax() _ws = api.Rebin(InputWorkspace=_ws, Params="%s,100,%s" % (tof_min, tof_max)) - y=_ws.extractY() - x=_ws.extractX() + y = _ws.extractY() + x = _ws.extractX() charge = _ws.getRun().getProtonCharge() - tof = (x[0][1:] + x[0][:-1])/2 - counts = np.sum(y, axis=0)/charge + tof = (x[0][1:] + x[0][:-1]) / 2 + counts = np.sum(y, axis=0) / charge return tof, counts - + ws = api.Load("REF_L_%s" % run_number) ws_db = api.Load("REF_L_%s" % db_run_number) - fig, ax = plt.subplots(2, 1, figsize=(10,10)) + fig, ax = plt.subplots(2, 1, figsize=(10, 10)) ax = plt.subplot(2, 1, 1) tof, counts = _tof(ws) @@ -39,36 +39,37 @@ def _tof(_ws): tof, counts = _tof(ws_db) plt.plot(tof, counts, label="DB %s" % db_run_number) - plt.xlabel('TOF') - plt.ylabel('Counts') - ax.set_yscale('linear') - ax.set_xscale('linear') + plt.xlabel("TOF") + plt.ylabel("Counts") + ax.set_yscale("linear") + ax.set_xscale("linear") plt.legend() qz, r, dr = workflow.reduce_explorer(ws, ws_db, center_pixel=peak_pixel, db_center_pixel=db_peak_pixel) # Save reduced data np.savetxt(os.path.join(output_dir, "r%s_quick_reduce.txt" % run_number), np.asarray([qz, r, dr]).T) - + ax = plt.subplot(2, 1, 2) plt.errorbar(qz, r, yerr=dr) - plt.xlabel('q [$1/\AA$]') - plt.ylabel('R(q)') - ax.set_yscale('log') - ax.set_xscale('log') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + ax.set_yscale("log") + ax.set_xscale("log") plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=True) - parser.add_argument('run_number', type=str, help='Run number to process') - parser.add_argument('db_run_number', type=str, help='Direct beam run number') - parser.add_argument('pixel', type=float, help='Peak pixel') - parser.add_argument('db_pixel', type=float, help='Direct beam peak pixel') - parser.add_argument('output_dir', type=str, - help='Output directory') + parser.add_argument("run_number", type=str, help="Run number to process") + parser.add_argument("db_run_number", type=str, help="Direct beam run number") + parser.add_argument("pixel", type=float, help="Peak pixel") + parser.add_argument("db_pixel", type=float, help="Direct beam peak pixel") + parser.add_argument("output_dir", type=str, help="Output directory") # Parse arguments args = parser.parse_args() - process(args.run_number, args.db_run_number, np.rint(args.pixel).astype(int), np.rint(args.db_pixel).astype(int), output_dir=args.output_dir) + process( + args.run_number, args.db_run_number, np.rint(args.pixel).astype(int), np.rint(args.db_pixel).astype(int), output_dir=args.output_dir + ) diff --git a/launcher/scripts/refracted_beam.py b/launcher/scripts/refracted_beam.py index 2f379a0..c72361d 100644 --- a/launcher/scripts/refracted_beam.py +++ b/launcher/scripts/refracted_beam.py @@ -1,41 +1,37 @@ -import sys -import os import argparse -import numpy as np - -from matplotlib import pyplot as plt -import matplotlib.lines as mlines +import os import mantid import mantid.simpleapi as api +import numpy as np +from matplotlib import pyplot as plt + mantid.kernel.config.setLogLevel(3) import lmfit -from lmfit.models import GaussianModel, Model - +from lmfit.models import GaussianModel SLD = dict(Si=2.07e-6, Quartz=4.18e-6) class Refracted(object): - - def __init__(self, ws, material='Si', tof_bin=200, offset=0.01, pixel_size=0.00072): + def __init__(self, ws, material="Si", tof_bin=200, offset=0.01, pixel_size=0.00072): """ - Initial process of the raw data to organize it. + Initial process of the raw data to organize it. """ - self.run_number = ws.getRun()['run_number'].value + self.run_number = ws.getRun()["run_number"].value # Establish the useful TOF range - self.tof_min = ws.getTofMin()+1100 - self.tof_max = ws.getTofMax()-1100 + self.tof_min = ws.getTofMin() + 1100 + self.tof_max = ws.getTofMax() - 1100 print("TOF range used: %g %g" % (self.tof_min, self.tof_max)) _ws = api.Rebin(InputWorkspace=ws, Params="%s,%s,%s" % (self.tof_min, tof_bin, self.tof_max)) self.counts = _ws.extractY() - + # The x-axis (TOF in this case) in Mantid is stored with bin boundaries. self.tof_boundaries = _ws.extractX()[0] - self.tof_center = (self.tof_boundaries[1:]+self.tof_boundaries[:-1])/2.0 + self.tof_center = (self.tof_boundaries[1:] + self.tof_boundaries[:-1]) / 2.0 print("TOF bin boundaries: %s" % len(self.tof_boundaries)) # Reshape the count array to map it to the detector geometry (256x304 pixels) @@ -49,17 +45,17 @@ def __init__(self, ws, material='Si', tof_bin=200, offset=0.01, pixel_size=0.000 self.y_counts = np.sum(self.p_vs_t, axis=1) # pixel number (center of the pixel) - self.y_pixel = np.arange(self.y_counts.shape[0])+0.5 + self.y_pixel = np.arange(self.y_counts.shape[0]) + 0.5 # Collect useful meta-data self.sld = SLD[material] - self.theta_sample = ws.getRun()['ths'].value[0] - tthd = ws.getRun()['tthd'].value[0] - self.wl_request = ws.getRun()['LambdaRequest'].value[0] + self.theta_sample = ws.getRun()["ths"].value[0] + tthd = ws.getRun()["tthd"].value[0] + self.wl_request = ws.getRun()["LambdaRequest"].value[0] - _n=1-2.07e-6*self.wl_request**2/2/np.pi - theta_c = self.wl_request*np.sqrt(self.sld/np.pi) - self.qc = 4*np.sqrt(np.pi*self.sld) + _n = 1 - 2.07e-6 * self.wl_request**2 / 2 / np.pi + theta_c = self.wl_request * np.sqrt(self.sld / np.pi) + self.qc = 4 * np.sqrt(np.pi * self.sld) print("ths=%g; tthd=%g" % (self.theta_sample, tthd)) print("n=%g" % _n) @@ -68,7 +64,7 @@ def __init__(self, ws, material='Si', tof_bin=200, offset=0.01, pixel_size=0.000 # Computed values self.offset = offset - #self.theta = self.theta_sample + self.offset + # self.theta = self.theta_sample + self.offset self.pixel_size = pixel_size # Useful constants @@ -82,36 +78,34 @@ def __init__(self, ws, material='Si', tof_bin=200, offset=0.01, pixel_size=0.000 self.wl_boundaries = self.tof_boundaries / self.constant self.wl_center = self.tof_center / self.constant - def fit_refracted_pixel(self): - """ - Fit the refracted peak as a function of wavelength and obtain the center pixel and width. - Using this we will be able to find the best offset to map the center pixel to theory - as a function of wavelength - - TODO: auomatically determine the fitting range + """ + Fit the refracted peak as a function of wavelength and obtain the center pixel and width. + Using this we will be able to find the best offset to map the center pixel to theory + as a function of wavelength + + TODO: auomatically determine the fitting range """ refracted_pixel = [] d_refracted_pixel = [] - # Compute a good starting point. When fitting the angle, the best guess is - #best_guess = -self.theta_sample/2.0 - best_guess = self.center_db + (self.center_r-self.center_db)/4.0 - + # Compute a good starting point. When fitting the angle, the best guess is + # best_guess = -self.theta_sample/2.0 + best_guess = self.center_db + (self.center_r - self.center_db) / 4.0 + # Determine a good fitting range y_pixel_min = int(np.ceil(self.center_db + 5)) y_pixel_max = int(np.floor(self.center_r - 15)) for i in range(self.p_vs_t.shape[1]): - gauss = GaussianModel(prefix='g_') + gauss = GaussianModel(prefix="g_") pars = gauss.make_params(amplitude=100, center=best_guess, sigma=5) - fit = gauss.fit(self.p_vs_t[y_pixel_min:y_pixel_max,i], pars, method='leastsq', - x=self.y_pixel[y_pixel_min:y_pixel_max]) + fit = gauss.fit(self.p_vs_t[y_pixel_min:y_pixel_max, i], pars, method="leastsq", x=self.y_pixel[y_pixel_min:y_pixel_max]) - a=fit.params['g_amplitude'].value - c=fit.params['g_center'].value - width=fit.params['g_sigma'].value + fit.params["g_amplitude"].value + c = fit.params["g_center"].value + width = fit.params["g_sigma"].value refracted_pixel.append(c) d_refracted_pixel.append(width) @@ -123,92 +117,94 @@ def fit_refracted_pixel(self): return refracted_pixel, d_refracted_pixel def pixel_prediction(self, wl, offset=0.01): - """ Compute the expected pixel position given a wavelength and offset parameter """ + """Compute the expected pixel position given a wavelength and offset parameter""" # First get the refracted angle _angle = self.compute_refracted_angle(wl, offset) # Compute the expected pixel position _theta_specular = self.theta_sample + offset - return np.sin(np.pi/180*(_angle + _theta_specular))*self.det_distance/self.pixel_size+self.center_db + return np.sin(np.pi / 180 * (_angle + _theta_specular)) * self.det_distance / self.pixel_size + self.center_db def pixel_prediction_with_size(self, wl, offset=0.01, pixel_size=0.00072): - """ Compute the expected pixel position given a wavelength and offset parameter """ + """Compute the expected pixel position given a wavelength and offset parameter""" # First get the refracted angle _angle = self.compute_refracted_angle(wl, offset) # Compute the expected pixel position _theta_specular = self.theta_sample + offset - return np.sin(np.pi/180*(_angle + _theta_specular))*self.det_distance/pixel_size+self.center_db + return np.sin(np.pi / 180 * (_angle + _theta_specular)) * self.det_distance / pixel_size + self.center_db def compute_angle_from_pixel(self, pixels, offset, pixel_size=None): - """ Compute the scattering angle from the pixel coordinate """ + """Compute the scattering angle from the pixel coordinate""" if pixel_size is None: pixel_size = self.pixel_size _theta_specular = self.theta_sample + offset - return 180.0/np.pi * np.arcsin((pixel_size*(pixels-self.center_db)/self.det_distance)) - _theta_specular + return 180.0 / np.pi * np.arcsin((pixel_size * (pixels - self.center_db) / self.det_distance)) - _theta_specular def compute_refracted_angle(self, wl, offset): - """ Compute refracted angle as a function of wavelength - """ + """Compute refracted angle as a function of wavelength""" _theta_specular = self.theta_sample + offset n = 1 - self.sld * wl**2 / 2 / np.pi - return -np.arccos(np.cos(np.pi/180*_theta_specular)/n) * 180/np.pi + return -np.arccos(np.cos(np.pi / 180 * _theta_specular) / n) * 180 / np.pi def residuals(self, pars, wl, data, err): parvals = pars.valuesdict() - offset = parvals['offset'] - pixel_size = parvals['pixel_size'] + offset = parvals["offset"] + pixel_size = parvals["pixel_size"] # First get the refracted angle _angle = self.compute_refracted_angle(wl, offset) # Compute the expected pixel position _theta_specular = self.theta_sample + offset - _refr_pixel = np.sin(np.pi/180*(_angle + _theta_specular))*self.det_distance/pixel_size+self.center_db + _refr_pixel = np.sin(np.pi / 180 * (_angle + _theta_specular)) * self.det_distance / pixel_size + self.center_db # Compute the reflected pixel position _theta_specular = self.theta_sample + offset - _spec_pixel = np.sin(np.pi/180*(2*_theta_specular))*self.det_distance/pixel_size+self.center_db + _spec_pixel = np.sin(np.pi / 180 * (2 * _theta_specular)) * self.det_distance / pixel_size + self.center_db - resid = (_refr_pixel-data)**2/err**2 + (_spec_pixel - self.center_r)**2/self.d_center_r**2 + resid = (_refr_pixel - data) ** 2 / err**2 + (_spec_pixel - self.center_r) ** 2 / self.d_center_r**2 return np.sqrt(resid) def fit_offset_and_pixel(self, margin=0.5): """ - Fit the position of the refracted beam to extract the calibration offset to ths - and the pixel size - - - margin is a range close to the critical edge where computing the refracted angle - might return a NaN. + Fit the position of the refracted beam to extract the calibration offset to ths + and the pixel size + + - margin is a range close to the critical edge where computing the refracted angle + might return a NaN. """ # Compute wavelength for Qc - wl_c = 4*np.pi*np.sin(np.pi/180*(self.theta_sample + self.offset))/self.qc - margin + wl_c = 4 * np.pi * np.sin(np.pi / 180 * (self.theta_sample + self.offset)) / self.qc - margin wl = self.tof_center / self.constant - index_c = np.max(np.where(wl _min_q-_tolerance) & (data_60Hz[0] < _max_q+_tolerance)))[0] - _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q-_tolerance) & (r_meas[0] < _max_q+_tolerance)))[0] - _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q-_tolerance) & (r_ref[0] < _max_q+_tolerance)))[0] + + _q_idx_60 = np.asarray(np.where((data_60Hz[0] > _min_q - _tolerance) & (data_60Hz[0] < _max_q + _tolerance)))[0] + _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q - _tolerance) & (r_meas[0] < _max_q + _tolerance)))[0] + _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q - _tolerance) & (r_ref[0] < _max_q + _tolerance)))[0] if not data_60Hz[0][_q_idx_60].shape[0] == r_meas[0][_q_idx_ref30].shape[0]: print("60Hz reference may have been reduced with different binning!") # Confirm identical binning - _sum = np.sum(data_60Hz[0][_q_idx_60]-r_ref[0][_q_idx_ref30]) - if _sum > r_ref[0][0]/100: + _sum = np.sum(data_60Hz[0][_q_idx_60] - r_ref[0][_q_idx_ref30]) + if _sum > r_ref[0][0] / 100: print("Binning 60Hz and ref 30Hz not identical!") - - _sum = np.sum(data_60Hz[0][_q_idx_60]-r_meas[0][_q_idx_meas30]) - if _sum > r_ref[0][0]/100: + _sum = np.sum(data_60Hz[0][_q_idx_60] - r_meas[0][_q_idx_meas30]) + if _sum > r_ref[0][0] / 100: print("Binning 60Hz and meas 30Hz not identical!") - - r_q_final = r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60] - dr_q_final = np.sqrt((r_meas[2][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[2][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]**2*data_60Hz[1][_q_idx_60]*r_ref[2][_q_idx_ref30])**2) + r_q_final = r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60] + + dr_q_final = np.sqrt( + (r_meas[2][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[2][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] ** 2 * data_60Hz[1][_q_idx_60] * r_ref[2][_q_idx_ref30]) ** 2 + ) print("Q range: %s - %s" % (r_meas[0][0], r_meas[0][_q_idx_meas30][-1])) q = r_meas[0][_q_idx_meas30] @@ -206,34 +205,57 @@ def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, sca return np.asarray([q[_idx], r_q_final[_idx], dr_q_final[_idx], dq]) -def reduce_30Hz_slices(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, template_reference=None): - +def reduce_30Hz_slices( + meas_run_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, +): meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) - return reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=scan_index, create_plot=create_plot, - template_reference=template_reference) + return reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=scan_index, + create_plot=create_plot, + template_reference=template_reference, + ) -def reduce_60Hz_slices(meas_run, template_file, - time_interval, output_dir, scan_index=1, create_plot=True): +def reduce_60Hz_slices(meas_run, template_file, time_interval, output_dir, scan_index=1, create_plot=True): meas_ws = api.LoadEventNexus("REF_L_%s" % meas_run) - return reduce_60Hz_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=scan_index, create_plot=create_plot) + return reduce_60Hz_slices_ws(meas_ws, template_file, time_interval, output_dir, scan_index=scan_index, create_plot=create_plot) + -def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, - template_reference=None): +def reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, +): """ - Perform 30Hz reduction - @param meas_ws_30Hz: workspace of the data we want to reduce - @param ref_ws_30Hz: workspace of the reference data, take with the same config - @param ref_data_60Hz: file path of the reduce data file at 60Hz - @param template_30Hz: file path of the template file for 30Hz - @param time_interval: time step in seconds - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws_30Hz: workspace of the data we want to reduce + @param ref_ws_30Hz: workspace of the reference data, take with the same config + @param ref_data_60Hz: file path of the reduce data file at 60Hz + @param template_30Hz: file path of the template file for 30Hz + @param time_interval: time step in seconds + @param scan_index: scan index to use within the template. """ # Load the template print("Reading template") @@ -248,16 +270,16 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 # Reduce the sample data at 30Hz print("Reading sample data at 30Hz") - #meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) + # meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws_30Hz.getRun()['duration'].value + duration = meas_ws_30Hz.getRun()["duration"].value except: duration = 0 try: - meas_run_30Hz = meas_ws_30Hz.getRun()['run_number'].value + meas_run_30Hz = meas_ws_30Hz.getRun()["run_number"].value except: meas_run_30Hz = 0 @@ -265,17 +287,19 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws_30Hz, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws_30Hz, + api.FilterEvents( + InputWorkspace=meas_ws_30Hz, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -289,30 +313,34 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 tmpws = api.mtd[name] print("workspace %s has %d events" % (name, tmpws.getNumberEvents())) try: - _reduced = reduce_30Hz_from_ws(tmpws, ref_ws_30Hz, data_60Hz, template_data, - scan_index=scan_index, template_reference=template_reference) + _reduced = reduce_30Hz_from_ws( + tmpws, ref_ws_30Hz, data_60Hz, template_data, scan_index=scan_index, template_reference=template_reference + ) reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run_30Hz, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run_30Hz, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print(sys.exc_info()[0]) total_time += time_interval if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run_30Hz)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run_30Hz), + ) return reduced -def reduce_60Hz_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=1, create_plot=True): + +def reduce_60Hz_slices_ws(meas_ws, template_file, time_interval, output_dir, scan_index=1, create_plot=True): """ - Perform 30Hz reduction - @param meas_ws: workspace of the data we want to reduce - @param template_file: autoreduction template file - @param time_interval: time step in seconds - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws: workspace of the data we want to reduce + @param template_file: autoreduction template file + @param time_interval: time step in seconds + @param scan_index: scan index to use within the template. """ # Load the template @@ -322,11 +350,11 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws.getRun()['duration'].value + duration = meas_ws.getRun()["duration"].value except: duration = 0 try: - meas_run = meas_ws.getRun()['run_number'].value + meas_run = meas_ws.getRun()["run_number"].value except: meas_run = 0 @@ -334,17 +362,19 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws, + api.FilterEvents( + InputWorkspace=meas_ws, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -356,39 +386,42 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, try: _reduced = reduce_with_mantid(tmpws, template_data, apply_scaling_factor=True) reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print(sys.exc_info()[0]) total_time += time_interval if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run), + ) return reduced + def plot_slices(reduced, title, time_interval, file_path, offset=10): - fig, ax = plt.subplots(figsize=(6,6)) + fig, ax = plt.subplots(figsize=(6, 6)) total_time = 0 - _running_offset = 1. + _running_offset = 1.0 for _data in reduced: qz, refl, d_refl, _ = _data - plt.errorbar(qz, refl*_running_offset, yerr=d_refl*_running_offset, markersize=4, marker='o', - label='T=%g s' % total_time) + plt.errorbar(qz, refl * _running_offset, yerr=d_refl * _running_offset, markersize=4, marker="o", label="T=%g s" % total_time) total_time += time_interval _running_offset *= offset plt.legend() plt.title(title) - plt.xlabel('q [$1/\AA$]') - plt.ylabel('R(q)') - ax.set_yscale('log') - ax.set_xscale('log') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + ax.set_yscale("log") + ax.set_xscale("log") plt.show() plt.savefig(file_path) @@ -396,49 +429,47 @@ def plot_slices(reduced, title, time_interval, file_path, offset=10): if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=True) - subparsers = parser.add_subparsers(dest='command', help='Available commands') + subparsers = parser.add_subparsers(dest="command", help="Available commands") # Time-resolved at 30Hz - dynanic30_parser = subparsers.add_parser('dynamic30Hz', help='Reduce time-resolved 30Hz [-h for help]') - dynanic30_parser.add_argument('meas_run_30Hz', type=int, - help='Run number for the data to be processed') - dynanic30_parser.add_argument('ref_run_30Hz', type=str, - help='Run number for the reference 30Hz data, measured at the same settings as the data to be processed') - dynanic30_parser.add_argument('ref_data_60Hz', type=str, - help='Reference R(Q), measured at 60Hz') - dynanic30_parser.add_argument('template_30Hz', type=str, - help='File path for the 30Hz reduction template') - dynanic30_parser.add_argument('time_interval', type=float, - help='Time interval to use, in seconds') - dynanic30_parser.add_argument('output_dir', type=str, - help='Output directory') - dynanic30_parser.add_argument('--scan_index', type=int, dest='scan_index', - help='Template scan index', required=False, default=1) - dynanic30_parser.add_argument('--no-plot', dest='create_plot', action='store_false') + dynanic30_parser = subparsers.add_parser("dynamic30Hz", help="Reduce time-resolved 30Hz [-h for help]") + dynanic30_parser.add_argument("meas_run_30Hz", type=int, help="Run number for the data to be processed") + dynanic30_parser.add_argument( + "ref_run_30Hz", type=str, help="Run number for the reference 30Hz data, measured at the same settings as the data to be processed" + ) + dynanic30_parser.add_argument("ref_data_60Hz", type=str, help="Reference R(Q), measured at 60Hz") + dynanic30_parser.add_argument("template_30Hz", type=str, help="File path for the 30Hz reduction template") + dynanic30_parser.add_argument("time_interval", type=float, help="Time interval to use, in seconds") + dynanic30_parser.add_argument("output_dir", type=str, help="Output directory") + dynanic30_parser.add_argument("--scan_index", type=int, dest="scan_index", help="Template scan index", required=False, default=1) + dynanic30_parser.add_argument("--no-plot", dest="create_plot", action="store_false") dynanic30_parser.set_defaults(create_plot=True) # Time-resolved at 60Hz - dynanic60_parser = subparsers.add_parser('dynamic60Hz', help='Reduce time-resolved 60Hz [-h for help]') - dynanic60_parser.add_argument('meas_run_60Hz', type=int, - help='Run number for the data to be processed') - dynanic60_parser.add_argument('template_60Hz', type=str, - help='File path for the 60Hz reduction template') - dynanic60_parser.add_argument('time_interval', type=float, - help='Time interval to use, in seconds') - dynanic60_parser.add_argument('output_dir', type=str, - help='Output directory') - dynanic60_parser.add_argument('--scan_index', type=int, dest='scan_index', - help='Template scan index', required=True, default=1) + dynanic60_parser = subparsers.add_parser("dynamic60Hz", help="Reduce time-resolved 60Hz [-h for help]") + dynanic60_parser.add_argument("meas_run_60Hz", type=int, help="Run number for the data to be processed") + dynanic60_parser.add_argument("template_60Hz", type=str, help="File path for the 60Hz reduction template") + dynanic60_parser.add_argument("time_interval", type=float, help="Time interval to use, in seconds") + dynanic60_parser.add_argument("output_dir", type=str, help="Output directory") + dynanic60_parser.add_argument("--scan_index", type=int, dest="scan_index", help="Template scan index", required=True, default=1) # Parse arguments args = parser.parse_args() - if args.command=='dynamic30Hz': + if args.command == "dynamic30Hz": print("Time-resolved reduction at 30Hz: run %s" % args.meas_run_30Hz) - reduced = reduce_30Hz_slices(args.meas_run_30Hz, args.ref_run_30Hz, args.ref_data_60Hz, args.template_30Hz, - time_interval=args.time_interval, output_dir=args.output_dir, - scan_index=args.scan_index, create_plot=args.create_plot) - elif args.command=='dynamic60Hz': + reduced = reduce_30Hz_slices( + args.meas_run_30Hz, + args.ref_run_30Hz, + args.ref_data_60Hz, + args.template_30Hz, + time_interval=args.time_interval, + output_dir=args.output_dir, + scan_index=args.scan_index, + create_plot=args.create_plot, + ) + elif args.command == "dynamic60Hz": print("Time-resolved reduction at 60Hz: run %s" % args.meas_run_60Hz) - reduced = reduce_60Hz_slices(args.meas_run_60Hz, args.template_60Hz, - time_interval=args.time_interval, output_dir=args.output_dir, scan_index=args.scan_index) + reduced = reduce_60Hz_slices( + args.meas_run_60Hz, args.template_60Hz, time_interval=args.time_interval, output_dir=args.output_dir, scan_index=args.scan_index + ) diff --git a/launcher/scripts/time_resolved_reduction.py b/launcher/scripts/time_resolved_reduction.py index 8c70c29..af53e1a 100644 --- a/launcher/scripts/time_resolved_reduction.py +++ b/launcher/scripts/time_resolved_reduction.py @@ -1,43 +1,44 @@ -import sys import argparse +import sys sys.path.append("/SNS/REF_L/shared/reduction") from lr_reduction import time_resolved - if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=True) - subparsers = parser.add_subparsers(dest='command', help='Available commands') + subparsers = parser.add_subparsers(dest="command", help="Available commands") # Time-resolved at 30Hz - dynanic30_parser = subparsers.add_parser('dynamic30Hz', help='Reduce time-resolved 30Hz [-h for help]') - dynanic30_parser.add_argument('meas_run_30Hz', type=int, - help='Run number for the data to be processed') - dynanic30_parser.add_argument('ref_run_30Hz', type=str, - help='Run number for the reference 30Hz data, measured at the same settings as the data to be processed') - dynanic30_parser.add_argument('ref_data_60Hz', type=str, - help='Reference R(Q), measured at 60Hz') - dynanic30_parser.add_argument('template_30Hz', type=str, - help='File path for the 30Hz reduction template') - dynanic30_parser.add_argument('time_interval', type=float, - help='Time interval to use, in seconds') - dynanic30_parser.add_argument('output_dir', type=str, - help='Output directory') - dynanic30_parser.add_argument('--scan_index', type=int, dest='scan_index', - help='Template scan index', required=False, default=1) - dynanic30_parser.add_argument('--no-plot', dest='create_plot', action='store_false') + dynanic30_parser = subparsers.add_parser("dynamic30Hz", help="Reduce time-resolved 30Hz [-h for help]") + dynanic30_parser.add_argument("meas_run_30Hz", type=int, help="Run number for the data to be processed") + dynanic30_parser.add_argument( + "ref_run_30Hz", type=str, help="Run number for the reference 30Hz data, measured at the same settings as the data to be processed" + ) + dynanic30_parser.add_argument("ref_data_60Hz", type=str, help="Reference R(Q), measured at 60Hz") + dynanic30_parser.add_argument("template_30Hz", type=str, help="File path for the 30Hz reduction template") + dynanic30_parser.add_argument("time_interval", type=float, help="Time interval to use, in seconds") + dynanic30_parser.add_argument("output_dir", type=str, help="Output directory") + dynanic30_parser.add_argument("--scan_index", type=int, dest="scan_index", help="Template scan index", required=False, default=1) + dynanic30_parser.add_argument("--no-plot", dest="create_plot", action="store_false") dynanic30_parser.set_defaults(create_plot=True) - dynanic30_parser.add_argument('--qsumming', dest='q_summing', action='store_true') + dynanic30_parser.add_argument("--qsumming", dest="q_summing", action="store_true") dynanic30_parser.set_defaults(q_summing=False) # Parse arguments args = parser.parse_args() - if args.command=='dynamic30Hz': + if args.command == "dynamic30Hz": print("Time-resolved reduction at 30Hz: run %s" % args.meas_run_30Hz) - reduced = time_resolved.reduce_30Hz_slices(args.meas_run_30Hz, args.ref_run_30Hz, args.ref_data_60Hz, args.template_30Hz, - time_interval=args.time_interval, output_dir=args.output_dir, - scan_index=args.scan_index, create_plot=args.create_plot, - q_summing=args.q_summing) + reduced = time_resolved.reduce_30Hz_slices( + args.meas_run_30Hz, + args.ref_run_30Hz, + args.ref_data_60Hz, + args.template_30Hz, + time_interval=args.time_interval, + output_dir=args.output_dir, + scan_index=args.scan_index, + create_plot=args.create_plot, + q_summing=args.q_summing, + ) diff --git a/launcher/scripts/xrr_reduction.py b/launcher/scripts/xrr_reduction.py index b1dced9..5fa3704 100644 --- a/launcher/scripts/xrr_reduction.py +++ b/launcher/scripts/xrr_reduction.py @@ -1,15 +1,15 @@ """ Process Rigaku .ras file and produce R(Q) """ -import os -import numpy as np import argparse +import os +import warnings +import numpy as np from matplotlib import pyplot as plt -import warnings -warnings.filterwarnings('ignore', module='numpy') -warnings.filterwarnings('ignore') +warnings.filterwarnings("ignore", module="numpy") +warnings.filterwarnings("ignore") WAVELENGTH_META = "HW_XG_WAVE_LENGTH_ALPHA1" @@ -17,12 +17,12 @@ def process_xrr(data_file, output_dir=None): """ - Process Rigaku .ras files to produce R(Q). + Process Rigaku .ras files to produce R(Q). - data_file: full file path of the data file to process - output_dir: optional output directory + data_file: full file path of the data file to process + output_dir: optional output directory """ - data = np.loadtxt(data_file, comments=['#','*']).T + data = np.loadtxt(data_file, comments=["#", "*"]).T # If no output directory was provided, use the location of the data file if output_dir is None: @@ -32,13 +32,13 @@ def process_xrr(data_file, output_dir=None): meta_data = dict() with open(data_file) as fd: for line in fd: - if line.startswith('*'): + if line.startswith("*"): toks = line.split() - if len(toks)<2: + if len(toks) < 2: # Single keywords are used to define meta data sections, skip them pass else: - value = toks[1].replace('"','') + value = toks[1].replace('"', "") try: value = float(value) except: @@ -60,7 +60,7 @@ def process_xrr(data_file, output_dir=None): ttheta = data[0] counts = data[1] - q = 4*np.pi/wl*np.sin(ttheta/2*np.pi/180) + q = 4 * np.pi / wl * np.sin(ttheta / 2 * np.pi / 180) # Select only points in a useful range _q_idx = (q > q_min) & (q < q_max) @@ -70,7 +70,7 @@ def process_xrr(data_file, output_dir=None): # R(q) will be normalized to the average between q_min and norm_q_max norm_q_max = 0.01 _q_idx = (q > q_min) & (q < norm_q_max) - _norm = np.sum(r[_q_idx])/len(q[_q_idx]) + _norm = np.sum(r[_q_idx]) / len(q[_q_idx]) r /= _norm err = r * 0.05 @@ -81,17 +81,17 @@ def process_xrr(data_file, output_dir=None): _name, _ext = os.path.splitext(_filename) print("DIR %s" % output_dir) - #_output_rq = output_dir+"/%s-Rq.txt" % _name + # _output_rq = output_dir+"/%s-Rq.txt" % _name _output_rq = os.path.join(output_dir, "%s-Rq.txt" % _name) print("saving %s" % _output_rq) np.savetxt(_output_rq, _rq_data) - print('saved') - plt.figure(figsize=(10,6)) + print("saved") + plt.figure(figsize=(10, 6)) plt.plot(q, r) - plt.xlabel('q [$1/\AA$]') - plt.ylabel('R(q)') - plt.yscale('log') - plt.xscale('linear') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + plt.yscale("log") + plt.xscale("linear") plt.savefig(os.path.join(output_dir, "%s-Rq.png" % _name)) plt.show() @@ -99,10 +99,8 @@ def process_xrr(data_file, output_dir=None): if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=True) - parser.add_argument('data_file', type=str, - help='File path of .ras file to process') - parser.add_argument('output_dir', type=str, - help='Output directory') + parser.add_argument("data_file", type=str, help="File path of .ras file to process") + parser.add_argument("output_dir", type=str, help="Output directory") # Parse arguments args = parser.parse_args() diff --git a/reduction/README.md b/reduction/README.md index c77be5d..4ba9c69 100644 --- a/reduction/README.md +++ b/reduction/README.md @@ -38,4 +38,4 @@ It can read and write the LR standard output files. - Constant-q binning: sign of delta pixel may depend on whether we reflect up or down - Capture tthd when calibrating for fixed tthd so that we can use the calibration if tthd is moved - In template.process_from_template_ws(), we should fit the peak center when doing const-q binning - instead of taking the center of the peak range. \ No newline at end of file + instead of taking the center of the peak range. diff --git a/reduction/data/reference_rq.txt b/reduction/data/reference_rq.txt index ccba737..01bb3bb 100644 --- a/reduction/data/reference_rq.txt +++ b/reduction/data/reference_rq.txt @@ -7,17 +7,17 @@ # TOF weighted: False # Bck in Q: False # DataRun NormRun TwoTheta(deg) LambdaMin(A) LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B -# 198409 198399 0.0209573 13.0547 16.3931 0.005 0.0100 1 0 -# 198410 198400 0.020946 10.4888 13.8272 0.005 0.0125 1 0 -# 198411 198401 0.020946 7.89154 11.2299 0.005 0.0166 1.1051 -5.536 -# 198412 198402 0.0209403 5.24418 8.58252 0.005 0.0250 6.6243 1.9231 -# 198413 198403 0.0209346 2.5026 5.84094 0.005 0.0525 28.319 0.0002 -# 198414 198403 0.0412845 2.5026 5.84094 0.005 0.1036 95.488 0.0016 -# 198415 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 -# 198416 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 +# 198409 198399 0.0209573 13.0547 16.3931 0.005 0.0100 1 0 +# 198410 198400 0.020946 10.4888 13.8272 0.005 0.0125 1 0 +# 198411 198401 0.020946 7.89154 11.2299 0.005 0.0166 1.1051 -5.536 +# 198412 198402 0.0209403 5.24418 8.58252 0.005 0.0250 6.6243 1.9231 +# 198413 198403 0.0209346 2.5026 5.84094 0.005 0.0525 28.319 0.0002 +# 198414 198403 0.0412845 2.5026 5.84094 0.005 0.1036 95.488 0.0016 +# 198415 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 +# 198416 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 # dQ0[1/Angstrom] = 0 # dQ/Q = 0.0275925 -# Q [1/Angstrom] R dR dQ [FWHM] +# Q [1/Angstrom] R dR dQ [FWHM] 0.0081226081098499 0.9548169246185914 0.0665249567890163 0.0002262313573802 0.0082850602720469 0.8799194292970859 0.0594772200234560 0.0002307559845278 0.0084507614774878 0.9880664878997731 0.0591553597730947 0.0002353711042183 diff --git a/reduction/data/reference_rq_201282.txt b/reduction/data/reference_rq_201282.txt index 8d4b763..1f4e3b6 100644 --- a/reduction/data/reference_rq_201282.txt +++ b/reduction/data/reference_rq_201282.txt @@ -7,16 +7,16 @@ # TOF weighted: False # Bck in Q: False # DataRun NormRun TwoTheta(deg) LambdaMin(A) LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B -# 201282 201043 0.020946 13.055 16.3935 0.005 0.0100 1 0 -# 201283 201044 0.020946 10.4891 13.8275 0.005 0.0125 1 0 -# 201284 201045 0.020946 7.89174 11.2302 0.005 0.0166 1.1671 -4.575 -# 201285 201048 0.0209403 5.24432 8.58274 0.005 0.0250 8.1865 3.7959 -# 201286 201051 0.0209573 2.50266 5.84109 0.005 0.0526 26.686 0.0006 -# 201287 201051 0.0412789 2.50266 5.84109 0.005 0.1036 108.65 0.0028 -# 201288 201051 0.0817804 2.50266 5.84109 0.005 0.2052 374.58 0.0141 +# 201282 201043 0.020946 13.055 16.3935 0.005 0.0100 1 0 +# 201283 201044 0.020946 10.4891 13.8275 0.005 0.0125 1 0 +# 201284 201045 0.020946 7.89174 11.2302 0.005 0.0166 1.1671 -4.575 +# 201285 201048 0.0209403 5.24432 8.58274 0.005 0.0250 8.1865 3.7959 +# 201286 201051 0.0209573 2.50266 5.84109 0.005 0.0526 26.686 0.0006 +# 201287 201051 0.0412789 2.50266 5.84109 0.005 0.1036 108.65 0.0028 +# 201288 201051 0.0817804 2.50266 5.84109 0.005 0.2052 374.58 0.0141 # dQ0[1/Angstrom] = 0 # dQ/Q = 0.0275897 -# Q [1/Angstrom] R dR dQ [FWHM] +# Q [1/Angstrom] R dR dQ [FWHM] 0.0079921268598835 1.0464165998640591 0.0868809413396315 0.0002210220391791 0.0081120087627818 1.0201670196357679 0.0796796914810166 0.0002243373697667 0.0082336888942235 1.0079439046970262 0.0766198987827083 0.0002277024303133 diff --git a/reduction/data/reference_rq_avg_overlap.txt b/reduction/data/reference_rq_avg_overlap.txt index e112f7e..6b8f18d 100644 --- a/reduction/data/reference_rq_avg_overlap.txt +++ b/reduction/data/reference_rq_avg_overlap.txt @@ -7,17 +7,17 @@ # TOF weighted: False # Bck in Q: False # DataRun NormRun TwoTheta(deg) LambdaMin(A) LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B -# 198409 198399 0.0209573 13.0547 16.3931 0.005 0.0100 1 0 -# 198410 198400 0.020946 10.4888 13.8272 0.005 0.0125 1 0 -# 198411 198401 0.020946 7.89154 11.2299 0.005 0.0166 1.1051 -5.536 -# 198412 198402 0.0209403 5.24418 8.58252 0.005 0.0250 6.6243 1.9231 -# 198413 198403 0.0209346 2.5026 5.84094 0.005 0.0525 28.319 0.0002 -# 198414 198403 0.0412845 2.5026 5.84094 0.005 0.1036 95.488 0.0016 -# 198415 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 -# 198416 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 +# 198409 198399 0.0209573 13.0547 16.3931 0.005 0.0100 1 0 +# 198410 198400 0.020946 10.4888 13.8272 0.005 0.0125 1 0 +# 198411 198401 0.020946 7.89154 11.2299 0.005 0.0166 1.1051 -5.536 +# 198412 198402 0.0209403 5.24418 8.58252 0.005 0.0250 6.6243 1.9231 +# 198413 198403 0.0209346 2.5026 5.84094 0.005 0.0525 28.319 0.0002 +# 198414 198403 0.0412845 2.5026 5.84094 0.005 0.1036 95.488 0.0016 +# 198415 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 +# 198416 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 # dQ0[1/Angstrom] = 0 # dQ/Q = 0.0275925 -# Q [1/Angstrom] R dR dQ [FWHM] +# Q [1/Angstrom] R dR dQ [FWHM] 0.0081226081098499 0.9548169246185914 0.0665249567890163 0.0002262313573802 0.0082850602720469 0.8799194292970859 0.0594772200234560 0.0002307559845278 0.0084507614774878 0.9880664878997731 0.0591553597730947 0.0002353711042183 diff --git a/reduction/data/reference_short_nobck.txt b/reduction/data/reference_short_nobck.txt index 6df1e43..f185d98 100644 --- a/reduction/data/reference_short_nobck.txt +++ b/reduction/data/reference_short_nobck.txt @@ -7,11 +7,11 @@ # TOF weighted: False # Bck in Q: False # DataRun NormRun TwoTheta(deg) LambdaMin(A) LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B -# 198388 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 -# 198389 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 +# 198388 198403 0.0817804 2.5026 5.84094 0.005 0.2052 332.49 0.0087 +# 198389 198403 0.161934 2.50312 5.84062 0.005 0.4060 1251.1 0.0344 # dQ0[1/Angstrom] = 0 # dQ/Q = 0.0276199 -# Q [1/Angstrom] R dR dQ [FWHM] +# Q [1/Angstrom] R dR dQ [FWHM] 0.0891900245767729 0.0002447540638881 0.0000133962937753 0.0024623432623375 0.0909738250683083 0.0002503773807390 0.0000135922222283 0.0025115901275842 0.0927933015696745 0.0002379958246820 0.0000128307329373 0.0025618219301359 diff --git a/reduction/data/template.xml b/reduction/data/template.xml index 18b611e..a624436 100644 --- a/reduction/data/template.xml +++ b/reduction/data/template.xml @@ -1,7 +1,7 @@ REFL Wednesday, 07. December 2022 09:37AM - 3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) + 3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) [GCC 9.4.0] Linux x86_64 diff --git a/reduction/data/template_201282.xml b/reduction/data/template_201282.xml index ee2d103..7d054f8 100644 --- a/reduction/data/template_201282.xml +++ b/reduction/data/template_201282.xml @@ -1,7 +1,7 @@ REFL Tuesday, 31. January 2023 05:39PM - 3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) + 3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) [GCC 9.4.0] Linux x86_64 diff --git a/reduction/lr_reduction/__init__.py b/reduction/lr_reduction/__init__.py index 79d5280..0610187 100644 --- a/reduction/lr_reduction/__init__.py +++ b/reduction/lr_reduction/__init__.py @@ -1 +1 @@ -__version__ = '2.0.13' +__version__ = "2.0.13" diff --git a/reduction/lr_reduction/event_reduction.py b/reduction/lr_reduction/event_reduction.py index f29d02c..148dd39 100644 --- a/reduction/lr_reduction/event_reduction.py +++ b/reduction/lr_reduction/event_reduction.py @@ -1,49 +1,48 @@ """ Event based reduction for the Liquids Reflectometer """ -import sys import time -import mantid import mantid.simpleapi as api - import numpy as np def get_wl_range(ws): """ - Determine TOF range from the data - :param workspace ws: workspace to work with + Determine TOF range from the data + :param workspace ws: workspace to work with """ run_object = ws.getRun() - wl = run_object.getProperty('LambdaRequest').value[0] - chopper_speed = run_object.getProperty('SpeedRequest1').value[0] + wl = run_object.getProperty("LambdaRequest").value[0] + chopper_speed = run_object.getProperty("SpeedRequest1").value[0] # Cut the edges by using a width of 2.6 A - wl_min = (wl - 1.3 * 60.0 / chopper_speed) - wl_max = (wl + 1.3 * 60.0 / chopper_speed) + wl_min = wl - 1.3 * 60.0 / chopper_speed + wl_max = wl + 1.3 * 60.0 / chopper_speed return [wl_min, wl_max] + def get_q_binning(q_min=0.001, q_max=0.15, q_step=-0.02): """ - Determine Q binning + Determine Q binning """ if q_step > 0: - n_steps = int((q_max-q_min)/q_step) + n_steps = int((q_max - q_min) / q_step) return q_min + np.asarray([q_step * i for i in range(n_steps)]) else: - _step = 1.0+np.abs(q_step) - n_steps = int(np.log(q_max/q_min)/np.log(_step)) + _step = 1.0 + np.abs(q_step) + n_steps = int(np.log(q_max / q_min) / np.log(_step)) return q_min * np.asarray([_step**i for i in range(n_steps)]) + def quicknxs_scale(theta, peak, low_res, norm_peak, norm_low_res): """ - Scaling factor to multiply by to be compatible with QuickNXS 1.0. + Scaling factor to multiply by to be compatible with QuickNXS 1.0. """ - quicknxs_scale = (float(norm_peak[1])-float(norm_peak[0])) * (float(norm_low_res[1])-float(norm_low_res[0])) - quicknxs_scale /= (float(peak[1])-float(peak[0])) * (float(low_res[1])-float(low_res[0])) + quicknxs_scale = (float(norm_peak[1]) - float(norm_peak[0])) * (float(norm_low_res[1]) - float(norm_low_res[0])) + quicknxs_scale /= (float(peak[1]) - float(peak[0])) * (float(low_res[1]) - float(low_res[0])) _scale = 0.005 / np.fabs(np.sin(theta)) if theta > 0.0002 else 1.0 quicknxs_scale *= _scale return quicknxs_scale @@ -51,13 +50,14 @@ def quicknxs_scale(theta, peak, low_res, norm_peak, norm_low_res): class EventReflectivity(object): """ - Event based reflectivity calculation. - List of items to be taken care of outside this class: - - Edge points cropping - - Angle offset - - Putting runs together in one R(q) curve - - Scaling factors + Event based reflectivity calculation. + List of items to be taken care of outside this class: + - Edge points cropping + - Angle offset + - Putting runs together in one R(q) curve + - Scaling factors """ + QX_VS_QZ = 0 KZI_VS_KZF = 1 DELTA_KZ_VS_QZ = 3 @@ -67,28 +67,41 @@ class EventReflectivity(object): DEFAULT_4B_SAMPLE_DET_DISTANCE = 1.83 DEFAULT_4B_SOURCE_DET_DISTANCE = 15.75 - def __init__(self, scattering_workspace, direct_workspace, - signal_peak, signal_bck, norm_peak, norm_bck, - specular_pixel, signal_low_res, norm_low_res, - q_min=None, q_step=-0.02, q_max=None, - tof_range=None, theta=1.0, instrument=None): - """ - Pixel ranges include the min and max pixels. - - :param scattering_workspace: Mantid workspace containing the reflected data - :param direct_workspace: Mantid workspace containing the direct beam data [if None, normalization won't be applied] - :param signal_peak: pixel min and max for the specular peak - :param signal_bck: pixel range of the background [if None, the background won't be subtracted] - :param norm_peak: pixel range of the direct beam peak - :param norm_bck: direct background subtraction is not used [deprecated] - :param specular_pixel: pixel of the specular peak - :param signal_low_res: pixel range of the specular peak out of the scattering plane - :param norm_low_res: pixel range of the direct beam out of the scattering plane - :param q_min: value of lowest q point - :param q_step: step size in Q. Enter a negative value to get a log scale - :param q_min: value of largest q point - :param tof_range: TOF range,or None - :param theta: theta scattering angle in radians + def __init__( + self, + scattering_workspace, + direct_workspace, + signal_peak, + signal_bck, + norm_peak, + norm_bck, + specular_pixel, + signal_low_res, + norm_low_res, + q_min=None, + q_step=-0.02, + q_max=None, + tof_range=None, + theta=1.0, + instrument=None, + ): + """ + Pixel ranges include the min and max pixels. + + :param scattering_workspace: Mantid workspace containing the reflected data + :param direct_workspace: Mantid workspace containing the direct beam data [if None, normalization won't be applied] + :param signal_peak: pixel min and max for the specular peak + :param signal_bck: pixel range of the background [if None, the background won't be subtracted] + :param norm_peak: pixel range of the direct beam peak + :param norm_bck: direct background subtraction is not used [deprecated] + :param specular_pixel: pixel of the specular peak + :param signal_low_res: pixel range of the specular peak out of the scattering plane + :param norm_low_res: pixel range of the direct beam out of the scattering plane + :param q_min: value of lowest q point + :param q_step: step size in Q. Enter a negative value to get a log scale + :param q_min: value of largest q point + :param tof_range: TOF range,or None + :param theta: theta scattering angle in radians """ if instrument in [self.INSTRUMENT_4A, self.INSTRUMENT_4B]: self.instrument = instrument @@ -112,13 +125,13 @@ def __init__(self, scattering_workspace, direct_workspace, # Process workspaces if self.tof_range is not None: - self._ws_sc = api.CropWorkspace(InputWorkspace=scattering_workspace, - XMin=tof_range[0], XMax=tof_range[1], - OutputWorkspace='_'+str(scattering_workspace)) + self._ws_sc = api.CropWorkspace( + InputWorkspace=scattering_workspace, XMin=tof_range[0], XMax=tof_range[1], OutputWorkspace="_" + str(scattering_workspace) + ) if direct_workspace is not None: - self._ws_db = api.CropWorkspace(InputWorkspace=direct_workspace, - XMin=tof_range[0], XMax=tof_range[1], - OutputWorkspace='_'+str(direct_workspace)) + self._ws_db = api.CropWorkspace( + InputWorkspace=direct_workspace, XMin=tof_range[0], XMax=tof_range[1], OutputWorkspace="_" + str(direct_workspace) + ) else: self._ws_db = None else: @@ -130,7 +143,7 @@ def __init__(self, scattering_workspace, direct_workspace, def extract_meta_data(self): """ - Extract meta data from the data file. + Extract meta data from the data file. """ # Set up basic data self.n_x = int(self._ws_sc.getInstrument().getNumberParameter("number-of-x-pixels")[0]) @@ -150,41 +163,41 @@ def extract_meta_data(self): if self.tof_range is None: self.wl_range = get_wl_range(self._ws_sc) else: - self.wl_range = [self.tof_range[0] / self.constant, self.tof_range[1] / self.constant] + self.wl_range = [self.tof_range[0] / self.constant, self.tof_range[1] / self.constant] if self.q_min is None: - self.q_min = 4.0*np.pi/self.wl_range[1] * np.fabs(np.sin(self.theta)) + self.q_min = 4.0 * np.pi / self.wl_range[1] * np.fabs(np.sin(self.theta)) if self.q_max is None: - self.q_max = 4.0*np.pi/self.wl_range[0] * np.fabs(np.sin(self.theta)) + self.q_max = 4.0 * np.pi / self.wl_range[0] * np.fabs(np.sin(self.theta)) # Q binning to use self.q_bins = get_q_binning(self.q_min, self.q_max, self.q_step) # Catch options that can be turned off if self.signal_low_res is None: - self.signal_low_res = [1, self.n_x-1] + self.signal_low_res = [1, self.n_x - 1] if self.norm_low_res is None: - self.norm_low_res = [1, self.n_x-1] + self.norm_low_res = [1, self.n_x - 1] def extract_meta_data_4A(self): """ - 4A-specific meta data + 4A-specific meta data """ run_object = self._ws_sc.getRun() - self.det_distance = run_object['SampleDetDis'].getStatistics().mean - source_sample_distance = run_object['ModeratorSamDis'].getStatistics().mean - if not run_object['SampleDetDis'].units in ['m', 'meter']: + self.det_distance = run_object["SampleDetDis"].getStatistics().mean + source_sample_distance = run_object["ModeratorSamDis"].getStatistics().mean + if run_object["SampleDetDis"].units not in ["m", "meter"]: self.det_distance /= 1000.0 - if not run_object['ModeratorSamDis'].units in ['m', 'meter']: + if run_object["ModeratorSamDis"].units not in ["m", "meter"]: source_sample_distance /= 1000.0 self.source_detector_distance = source_sample_distance + self.det_distance def extract_meta_data_4B(self): """ - 4B-specific meta data + 4B-specific meta data - Distance from source to sample was 13.63 meters prior to the source - to detector distance being determined with Bragg edges to be 15.75 m. + Distance from source to sample was 13.63 meters prior to the source + to detector distance being determined with Bragg edges to be 15.75 m. """ if self._ws_sc.getInstrument().hasParameter("sample-det-distance"): self.det_distance = self._ws_sc.getInstrument().getNumberParameter("sample-det-distance")[0] @@ -206,12 +219,12 @@ def __repr__(self): def to_dict(self): """ - Returns meta-data to be used/stored. + Returns meta-data to be used/stored. """ if self._ws_sc.getRun().hasProperty("start_time"): start_time = self._ws_sc.getRun().getProperty("start_time").value else: - start_time = 'live' + start_time = "live" experiment = self._ws_sc.getRun().getProperty("experiment_identifier").value run_number = self._ws_sc.getRun().getProperty("run_number").value sequence_number = int(self._ws_sc.getRun().getProperty("sequence_number").value[0]) @@ -225,25 +238,35 @@ def to_dict(self): dq0 = 0 dq_over_q = compute_resolution(self._ws_sc, theta=self.theta) - return dict(wl_min=self.wl_range[0], wl_max=self.wl_range[1], - q_min=self.q_min, q_max=self.q_max, theta=self.theta, - start_time=start_time, experiment=experiment, run_number=run_number, - run_title=run_title, norm_run=norm_run, time=time.ctime(), - dq0=dq0, dq_over_q=dq_over_q, sequence_number=sequence_number, - sequence_id=sequence_id) - - def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False, - clean=False, normalize=True): - """ - Compute specular reflectivity. - - For constant-Q binning, it's preferred to use tof_weighted=True. - - :param q_summing: turns on constant-Q binning - :param tof_weighted: if True, binning will be done by weighting each event to the DB distribution - :param bck_in_q: if True, the background will be estimated in Q space using the constant-Q binning approach - :param clean: if True, and Q summing is True, then leading artifact will be removed - :param normalize: if True, and tof_weighted is False, normalization will be skipped + return dict( + wl_min=self.wl_range[0], + wl_max=self.wl_range[1], + q_min=self.q_min, + q_max=self.q_max, + theta=self.theta, + start_time=start_time, + experiment=experiment, + run_number=run_number, + run_title=run_title, + norm_run=norm_run, + time=time.ctime(), + dq0=dq0, + dq_over_q=dq_over_q, + sequence_number=sequence_number, + sequence_id=sequence_id, + ) + + def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False, clean=False, normalize=True): + """ + Compute specular reflectivity. + + For constant-Q binning, it's preferred to use tof_weighted=True. + + :param q_summing: turns on constant-Q binning + :param tof_weighted: if True, binning will be done by weighting each event to the DB distribution + :param bck_in_q: if True, the background will be estimated in Q space using the constant-Q binning approach + :param clean: if True, and Q summing is True, then leading artifact will be removed + :param normalize: if True, and tof_weighted is False, normalization will be skipped """ if tof_weighted: self.specular_weighted(q_summing=q_summing, bck_in_q=bck_in_q) @@ -251,7 +274,7 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False, self.specular_unweighted(q_summing=q_summing, normalize=normalize) # Remove leading zeros - r = np.trim_zeros(self.refl, 'f') + r = np.trim_zeros(self.refl, "f") trim = len(self.refl) - len(r) self.refl = self.refl[trim:] self.d_refl = self.d_refl[trim:] @@ -259,8 +282,8 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False, # Dead time correction # dead_time = 4e-6 - #self.refl = self.refl * t_corr_sc / t_corr_db - #self.d_refl = self.d_refl * t_corr_sc / t_corr_db + # self.refl = self.refl * t_corr_sc / t_corr_db + # self.d_refl = self.d_refl * t_corr_sc / t_corr_db # Remove leading artifact from the wavelength coverage # Remember that q_bins is longer than refl by 1 because @@ -276,15 +299,20 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False, def specular_unweighted(self, q_summing=False, normalize=True): """ - Simple specular reflectivity calculation. This is the same approach as the - original LR reduction, which sums up pixels without constant-Q binning. - The original approach bins in TOF, then rebins the final results after - transformation to Q. This approach bins directly to Q. + Simple specular reflectivity calculation. This is the same approach as the + original LR reduction, which sums up pixels without constant-Q binning. + The original approach bins in TOF, then rebins the final results after + transformation to Q. This approach bins directly to Q. """ # Scattering data - refl, d_refl = self._reflectivity(self._ws_sc, peak_position=self.specular_pixel, - peak=self.signal_peak, low_res=self.signal_low_res, - theta=self.theta, q_summing=q_summing) + refl, d_refl = self._reflectivity( + self._ws_sc, + peak_position=self.specular_pixel, + peak=self.signal_peak, + low_res=self.signal_low_res, + theta=self.theta, + q_summing=q_summing, + ) # Remove background if self.signal_bck is not None: @@ -298,19 +326,21 @@ def specular_unweighted(self, q_summing=False, normalize=True): # we can bin the DB according to the same transform instead of binning and dividing in TOF. # This is mathematically equivalent and convenient in terms of abstraction for later # use for the constant-Q calculation elsewhere in the code. - norm, d_norm = self._reflectivity(self._ws_db, peak_position=0, - peak=self.norm_peak, low_res=self.norm_low_res, - theta=self.theta, q_summing=False) + norm, d_norm = self._reflectivity( + self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, theta=self.theta, q_summing=False + ) # Direct beam background could be added here. The effect will be negligible. if self.norm_bck is not None: norm_bck, d_norm_bck = self.norm_bck_subtraction() norm -= norm_bck d_norm = np.sqrt(d_norm**2 + d_norm_bck**2) - db_bins = norm>0 + db_bins = norm > 0 - refl[db_bins] = refl[db_bins]/norm[db_bins] - d_refl[db_bins] = np.sqrt(d_refl[db_bins]**2 / norm[db_bins]**2 + refl[db_bins]**2 * d_norm[db_bins]**2 / norm[db_bins]**4) + refl[db_bins] = refl[db_bins] / norm[db_bins] + d_refl[db_bins] = np.sqrt( + d_refl[db_bins] ** 2 / norm[db_bins] ** 2 + refl[db_bins] ** 2 * d_norm[db_bins] ** 2 / norm[db_bins] ** 4 + ) # Clean up points where we have no direct beam zero_db = [not v for v in db_bins] @@ -323,24 +353,30 @@ def specular_unweighted(self, q_summing=False, normalize=True): def specular_weighted(self, q_summing=True, bck_in_q=False): """ - Compute reflectivity by weighting each event by flux. - This allows for summing in Q and to estimate the background in either Q - or pixels next to the peak. + Compute reflectivity by weighting each event by flux. + This allows for summing in Q and to estimate the background in either Q + or pixels next to the peak. """ # Event weights for normalization db_charge = self._ws_db.getRun().getProtonCharge() wl_events = self._get_events(self._ws_db, self.norm_peak, self.norm_low_res) wl_dist, wl_bins = np.histogram(wl_events, bins=60) - wl_dist = wl_dist/db_charge/(wl_bins[1]-wl_bins[0]) - wl_middle = [(wl_bins[i+1]+wl_bins[i])/2.0 for i in range(len(wl_bins)-1)] - - refl, d_refl = self._reflectivity(self._ws_sc, peak_position=self.specular_pixel, - peak=self.signal_peak, low_res=self.signal_low_res, - theta=self.theta, q_summing=q_summing, wl_dist=wl_dist, wl_bins=wl_middle) + wl_dist = wl_dist / db_charge / (wl_bins[1] - wl_bins[0]) + wl_middle = [(wl_bins[i + 1] + wl_bins[i]) / 2.0 for i in range(len(wl_bins) - 1)] + + refl, d_refl = self._reflectivity( + self._ws_sc, + peak_position=self.specular_pixel, + peak=self.signal_peak, + low_res=self.signal_low_res, + theta=self.theta, + q_summing=q_summing, + wl_dist=wl_dist, + wl_bins=wl_middle, + ) if self.signal_bck is not None: - refl_bck, d_refl_bck = self.bck_subtraction(wl_dist=wl_dist, wl_bins=wl_middle, - q_summing=bck_in_q) + refl_bck, d_refl_bck = self.bck_subtraction(wl_dist=wl_dist, wl_bins=wl_middle, q_summing=bck_in_q) refl -= refl_bck d_refl = np.sqrt(d_refl**2 + d_refl_bck**2) @@ -350,135 +386,148 @@ def specular_weighted(self, q_summing=True, bck_in_q=False): def _roi_integration(self, ws, peak, low_res, q_bins=None, wl_dist=None, wl_bins=None, q_summing=False): """ - Integrate a region of interest and normalize by the number of included pixels. + Integrate a region of interest and normalize by the number of included pixels. - The options are the same as for the reflectivity calculation. - If wl_dist and wl_bins are supplied, the events will be weighted by flux. - If q_summing is True, the angle of each neutron will be recalculated according to - their position on the detector and place in the proper Q bin. + The options are the same as for the reflectivity calculation. + If wl_dist and wl_bins are supplied, the events will be weighted by flux. + If q_summing is True, the angle of each neutron will be recalculated according to + their position on the detector and place in the proper Q bin. """ q_bins = self.q_bins if q_bins is None else q_bins - refl_bck, d_refl_bck = self._reflectivity(ws, peak_position=0, q_bins=q_bins, - peak=peak, low_res=low_res, - theta=self.theta, q_summing=q_summing, - wl_dist=wl_dist, wl_bins=wl_bins) - - _pixel_area = (peak[1]-peak[0]+1.0) + refl_bck, d_refl_bck = self._reflectivity( + ws, + peak_position=0, + q_bins=q_bins, + peak=peak, + low_res=low_res, + theta=self.theta, + q_summing=q_summing, + wl_dist=wl_dist, + wl_bins=wl_bins, + ) + + _pixel_area = peak[1] - peak[0] + 1.0 refl_bck /= _pixel_area d_refl_bck /= _pixel_area return refl_bck, d_refl_bck - def _bck_subtraction(self, ws, peak, bck, low_res, normalize_to_single_pixel=False, - q_bins=None, wl_dist=None, wl_bins=None, q_summing=False): + def _bck_subtraction( + self, ws, peak, bck, low_res, normalize_to_single_pixel=False, q_bins=None, wl_dist=None, wl_bins=None, q_summing=False + ): """ - Abstracted out background subtraction process. + Abstracted out background subtraction process. - The options are the same as for the reflectivity calculation. - If wl_dist and wl_bins are supplied, the events will be weighted by flux. - If q_summing is True, the angle of each neutron will be recalculated according to - their position on the detector and place in the proper Q bin. + The options are the same as for the reflectivity calculation. + If wl_dist and wl_bins are supplied, the events will be weighted by flux. + If q_summing is True, the angle of each neutron will be recalculated according to + their position on the detector and place in the proper Q bin. """ q_bins = self.q_bins if q_bins is None else q_bins # Background on the left of the peak only. We allow the user to overlap the peak on the right, # but only use the part left of the peak. - if bck[0] < peak[0]-1 and bck[1] < peak[1]+1: - right_side = min(bck[1], peak[0]-1) + if bck[0] < peak[0] - 1 and bck[1] < peak[1] + 1: + right_side = min(bck[1], peak[0] - 1) _left = [bck[0], right_side] print("Left side background: [%s, %s]" % (_left[0], _left[1])) - refl_bck, d_refl_bck = self._roi_integration(ws, peak=_left, low_res=low_res, - q_bins=q_bins, wl_dist=wl_dist, - wl_bins=wl_bins, q_summing=q_summing) + refl_bck, d_refl_bck = self._roi_integration( + ws, peak=_left, low_res=low_res, q_bins=q_bins, wl_dist=wl_dist, wl_bins=wl_bins, q_summing=q_summing + ) # Background on the right of the peak only. We allow the user to overlap the peak on the left, # but only use the part right of the peak. - elif bck[0] > peak[0]-1 and bck[1] > peak[1]+1: - left_side = max(bck[0], peak[1]+1) + elif bck[0] > peak[0] - 1 and bck[1] > peak[1] + 1: + left_side = max(bck[0], peak[1] + 1) _right = [left_side, bck[1]] print("Right side background: [%s, %s]" % (_right[0], _right[1])) - refl_bck, d_refl_bck = self._roi_integration(ws, peak=_right, low_res=low_res, - q_bins=q_bins, wl_dist=wl_dist, - wl_bins=wl_bins, q_summing=q_summing) + refl_bck, d_refl_bck = self._roi_integration( + ws, peak=_right, low_res=low_res, q_bins=q_bins, wl_dist=wl_dist, wl_bins=wl_bins, q_summing=q_summing + ) # Background on both sides - elif bck[0] < peak[0]-1 and bck[1] > peak[1]+1: - _left = [bck[0], peak[0]-1] - refl_bck, d_refl_bck = self._roi_integration(ws, peak=_left, low_res=low_res, - q_bins=q_bins, wl_dist=wl_dist, - wl_bins=wl_bins, q_summing=q_summing) - _right = [peak[1]+1, bck[1]] - _refl_bck, _d_refl_bck = self._roi_integration(ws, peak=_right, low_res=low_res, - q_bins=q_bins, wl_dist=wl_dist, - wl_bins=wl_bins, q_summing=q_summing) + elif bck[0] < peak[0] - 1 and bck[1] > peak[1] + 1: + _left = [bck[0], peak[0] - 1] + refl_bck, d_refl_bck = self._roi_integration( + ws, peak=_left, low_res=low_res, q_bins=q_bins, wl_dist=wl_dist, wl_bins=wl_bins, q_summing=q_summing + ) + _right = [peak[1] + 1, bck[1]] + _refl_bck, _d_refl_bck = self._roi_integration( + ws, peak=_right, low_res=low_res, q_bins=q_bins, wl_dist=wl_dist, wl_bins=wl_bins, q_summing=q_summing + ) print("Background on both sides: [%s %s] [%s %s]" % (_left[0], _left[1], _right[0], _right[1])) - refl_bck = (refl_bck + _refl_bck)/2.0 - d_refl_bck = np.sqrt(d_refl_bck**2 + _d_refl_bck**2)/2.0 + refl_bck = (refl_bck + _refl_bck) / 2.0 + d_refl_bck = np.sqrt(d_refl_bck**2 + _d_refl_bck**2) / 2.0 else: print("Invalid background: [%s %s]" % (bck[0], bck[1])) - refl_bck = np.zeros(q_bins.shape[0]-1) + refl_bck = np.zeros(q_bins.shape[0] - 1) d_refl_bck = refl_bck # At this point we have integrated the region of interest and obtain the average per # pixel, so unless that's what we want we need to multiply by the number of pixels # used to integrate the signal. if not normalize_to_single_pixel: - _pixel_area = peak[1] - peak[0]+1.0 + _pixel_area = peak[1] - peak[0] + 1.0 refl_bck *= _pixel_area d_refl_bck *= _pixel_area return refl_bck, d_refl_bck - def bck_subtraction(self, normalize_to_single_pixel=False, q_bins=None, wl_dist=None, wl_bins=None, - q_summing=False): + def bck_subtraction(self, normalize_to_single_pixel=False, q_bins=None, wl_dist=None, wl_bins=None, q_summing=False): """ - Higher-level call for background subtraction. Hides the ranges needed to define the ROI. + Higher-level call for background subtraction. Hides the ranges needed to define the ROI. """ - return self._bck_subtraction(self._ws_sc, self.signal_peak, self.signal_bck, self.signal_low_res, - normalize_to_single_pixel=normalize_to_single_pixel, q_bins=q_bins, - wl_dist=wl_dist, wl_bins=wl_bins, q_summing=q_summing) + return self._bck_subtraction( + self._ws_sc, + self.signal_peak, + self.signal_bck, + self.signal_low_res, + normalize_to_single_pixel=normalize_to_single_pixel, + q_bins=q_bins, + wl_dist=wl_dist, + wl_bins=wl_bins, + q_summing=q_summing, + ) def norm_bck_subtraction(self): """ - Higher-level call for background subtraction for the normalization run. + Higher-level call for background subtraction for the normalization run. """ - return self._bck_subtraction(self._ws_db, self.norm_peak, self.norm_bck, self.norm_low_res, - normalize_to_single_pixel=False) + return self._bck_subtraction(self._ws_db, self.norm_peak, self.norm_bck, self.norm_low_res, normalize_to_single_pixel=False) - def slice(self, x_min=0.002, x_max=0.004, x_bins=None, z_bins=None, - refl=None, d_refl=None, normalize=False): + def slice(self, x_min=0.002, x_max=0.004, x_bins=None, z_bins=None, refl=None, d_refl=None, normalize=False): """ - Retrieve a slice from the off-specular data. + Retrieve a slice from the off-specular data. """ x_bins = self._offspec_x_bins if x_bins is None else x_bins z_bins = self._offspec_z_bins if z_bins is None else z_bins refl = self._offspec_refl if refl is None else refl d_refl = self._offspec_d_refl if d_refl is None else d_refl - i_min = len(x_bins[x_bins 0 - d_refl_sq[non_zero] = refl[non_zero] / np.sqrt(counts[non_zero]) / charge / bin_size[non_zero] + d_refl_sq[non_zero] = refl[non_zero] / np.sqrt(counts[non_zero]) / charge / bin_size[non_zero] refl[non_zero] = refl[non_zero] / charge / bin_size[non_zero] else: d_refl_sq = np.sqrt(np.fabs(refl)) / charge - refl /= charge + refl /= charge return refl, d_refl_sq def _get_events(self, ws, peak, low_res): """ - Return an array of wavelengths for a given workspace. + Return an array of wavelengths for a given workspace. """ wl_events = np.asarray([]) - for i in range(low_res[0], int(low_res[1]+1)): - for j in range(peak[0], int(peak[1]+1)): + for i in range(low_res[0], int(low_res[1] + 1)): + for j in range(peak[0], int(peak[1] + 1)): if self.instrument == self.INSTRUMENT_4A: pixel = j * self.n_y + i else: @@ -551,17 +600,16 @@ def _get_events(self, ws, peak, low_res): return wl_events - def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50, - z_min=None, z_max=None, z_npts=-120, bck_in_q=None): + def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50, z_min=None, z_max=None, z_npts=-120, bck_in_q=None): """ - Compute off-specular - :param x_axis: Axis selection - :param x_min: Min value on x-axis - :param x_max: Max value on x-axis - :param x_npts: Number of points in x (negative will produce a log scale) - :param z_min: Min value on z-axis (if none, default Qz will be used) - :param z_max: Max value on z-axis (if none, default Qz will be used) - :param z_npts: Number of points in z (negative will produce a log scale) + Compute off-specular + :param x_axis: Axis selection + :param x_min: Min value on x-axis + :param x_max: Max value on x-axis + :param x_npts: Number of points in x (negative will produce a log scale) + :param z_min: Min value on z-axis (if none, default Qz will be used) + :param z_max: Max value on z-axis (if none, default Qz will be used) + :param z_npts: Number of points in z (negative will produce a log scale) """ # Z axis binning qz_bins = self.q_bins @@ -579,23 +627,23 @@ def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50, wl_events = self._get_events(self._ws_db, self.norm_peak, self.norm_low_res) wl_dist, wl_bins = np.histogram(wl_events, bins=60) - wl_middle = [(wl_bins[i+1]+wl_bins[i])/2.0 for i in range(len(wl_bins)-1)] + wl_middle = [(wl_bins[i + 1] + wl_bins[i]) / 2.0 for i in range(len(wl_bins) - 1)] - _refl, _d_refl = self._off_specular(self._ws_sc, wl_dist, wl_middle, qx_bins, qz_bins, - self.specular_pixel, self.theta, x_axis=x_axis) + _refl, _d_refl = self._off_specular( + self._ws_sc, wl_dist, wl_middle, qx_bins, qz_bins, self.specular_pixel, self.theta, x_axis=x_axis + ) db_charge = self._ws_db.getRun().getProtonCharge() - _refl *= db_charge * (wl_bins[1]-wl_bins[0]) - _d_refl *= db_charge * (wl_bins[1]-wl_bins[0]) + _refl *= db_charge * (wl_bins[1] - wl_bins[0]) + _d_refl *= db_charge * (wl_bins[1] - wl_bins[0]) # Background if self.signal_bck: if bck_in_q is None: print("Not implemented") else: - _, refl_bck, d_refl_bck = self.slice(bck_in_q[0], bck_in_q[1], - x_bins=qx_bins, z_bins=qz_bins, - refl=_refl, d_refl=_d_refl, - normalize=True) + _, refl_bck, d_refl_bck = self.slice( + bck_in_q[0], bck_in_q[1], x_bins=qx_bins, z_bins=qz_bins, refl=_refl, d_refl=_d_refl, normalize=True + ) _refl -= refl_bck _d_refl = np.sqrt(_d_refl**2 + d_refl_bck**2) @@ -608,12 +656,12 @@ def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50, def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, theta, x_axis=None): charge = ws.getRun().getProtonCharge() - refl = np.zeros([len(x_bins)-1, len(z_bins)-1]) - counts = np.zeros([len(x_bins)-1, len(z_bins)-1]) + refl = np.zeros([len(x_bins) - 1, len(z_bins) - 1]) + counts = np.zeros([len(x_bins) - 1, len(z_bins) - 1]) for j in range(0, self.n_x): wl_list = np.asarray([]) - for i in range(self.signal_low_res[0], int(self.signal_low_res[1]+1)): + for i in range(self.signal_low_res[0], int(self.signal_low_res[1] + 1)): if self.instrument == self.INSTRUMENT_4A: pixel = j * self.n_y + i else: @@ -623,10 +671,10 @@ def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, the wl_list = np.concatenate((wl_events, wl_list)) k = 2.0 * np.pi / wl_list - wl_weights = 1.0/np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf) + wl_weights = 1.0 / np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf) - #TODO: Sign with depend on reflect up or down - x_distance = float(j-peak_position) * self.pixel_width + # TODO: Sign with depend on reflect up or down + x_distance = float(j - peak_position) * self.pixel_width delta_theta_f = np.arctan(x_distance / self.det_distance) theta_f = theta + delta_theta_f @@ -639,13 +687,13 @@ def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, the _x = qx _z = qz if x_axis == EventReflectivity.DELTA_KZ_VS_QZ: - _x = (ki_z - kf_z) + _x = ki_z - kf_z elif x_axis == EventReflectivity.KZI_VS_KZF: _x = ki_z _z = kf_z elif x_axis == EventReflectivity.THETAF_VS_WL: _x = wl_list - _z = np.ones(len(wl_list))*theta_f + _z = np.ones(len(wl_list)) * theta_f histo_weigths = wl_weights * _z / wl_list _counts, _, _ = np.histogram2d(_x, _z, bins=[x_bins, z_bins], weights=histo_weigths) @@ -654,14 +702,14 @@ def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, the counts += _counts bin_size = z_bins[1] - z_bins[0] - d_refl_sq = refl / np.sqrt(counts) / charge / bin_size + d_refl_sq = refl / np.sqrt(counts) / charge / bin_size refl /= charge * bin_size return refl, d_refl_sq def gravity_correction(self, ws, wl_list): """ - Gravity correction for each event + Gravity correction for each event """ # Xi reference would be the position of xi if the si slit were to be positioned # at the sample. The distance from the sample to si is then xi_reference - xi. @@ -672,7 +720,7 @@ def gravity_correction(self, ws, wl_list): # Distance between the s1 and the sample s1_sample_distance = 1485 if ws.getInstrument().hasParameter("s1-sample-distance"): - s1_sample_distance = ws.getInstrument().getNumberParameter("s1-sample-distance")[0]*1000 + s1_sample_distance = ws.getInstrument().getNumberParameter("s1-sample-distance")[0] * 1000 xi = 310 if ws.getInstrument().hasParameter("BL4B:Mot:xi.RBV"): @@ -682,42 +730,42 @@ def gravity_correction(self, ws, wl_list): slit_distance = s1_sample_distance - sample_si_distance # Angle of the incident beam on a horizontal sample - theta_in=-4.0 + theta_in = -4.0 # Calculation from the ILL paper. This works for inclined beams. # Calculated theta is the angle on the sample - g = 9.8067 # m/s^2 - h = 6.6260715e-34 # Js=kg m^2/s - mn = 1.67492749804e-27 # kg + g = 9.8067 # m/s^2 + h = 6.6260715e-34 # Js=kg m^2/s + mn = 1.67492749804e-27 # kg - v = h/(mn*wl_list*1e-10) - k = g/(2*v**2) + v = h / (mn * wl_list * 1e-10) + k = g / (2 * v**2) # Define the sample position as x=0, y=0. increasing x is towards moderator - xs=0 + xs = 0 # positions of slits x1 = sample_si_distance / 1000 x2 = (sample_si_distance + slit_distance) / 1000 - #height of slits determined by incident theta, y=0 is the sample height - y1=x1*np.tan(theta_in*np.pi/180) - y2=x2*np.tan(theta_in*np.pi/180) + # height of slits determined by incident theta, y=0 is the sample height + y1 = x1 * np.tan(theta_in * np.pi / 180) + y2 = x2 * np.tan(theta_in * np.pi / 180) # This is the location of the top of the parabola - x0=(y1-y2+k*(x1**2-x2**2))/(2*k*(x1-x2)) + x0 = (y1 - y2 + k * (x1**2 - x2**2)) / (2 * k * (x1 - x2)) # Angle is arctan(dy/dx) at sample - theta_sample = np.arctan(2*k*(x0-xs)) * 180/np.pi + theta_sample = np.arctan(2 * k * (x0 - xs)) * 180 / np.pi - return (theta_sample-theta_in) * np.pi / 180.0 + return (theta_sample - theta_in) * np.pi / 180.0 def compute_resolution(ws, default_dq=0.027, theta=None): """ - Compute the Q resolution from the meta data. - :param theta: scattering angle in radians + Compute the Q resolution from the meta data. + :param theta: scattering angle in radians """ # We can't compute the resolution if the value of xi is not in the logs. # Since it was not always logged, check for it here. @@ -736,11 +784,11 @@ def compute_resolution(ws, default_dq=0.027, theta=None): # Distance between the s1 and the sample s1_sample_distance = 1485 if ws.getInstrument().hasParameter("s1-sample-distance"): - s1_sample_distance = ws.getInstrument().getNumberParameter("s1-sample-distance")[0]*1000 + s1_sample_distance = ws.getInstrument().getNumberParameter("s1-sample-distance")[0] * 1000 s1h = abs(ws.getRun().getProperty("S1VHeight").value[0]) if theta is None: - theta = abs(ws.getRun().getProperty("ths").value[0]) * np.pi / 180. + theta = abs(ws.getRun().getProperty("ths").value[0]) * np.pi / 180.0 xi = abs(ws.getRun().getProperty("BL4B:Mot:xi.RBV").value[0]) sample_si_distance = xi_reference - xi slit_distance = s1_sample_distance - sample_si_distance diff --git a/reduction/lr_reduction/output.py b/reduction/lr_reduction/output.py index fb35332..49b8f9a 100644 --- a/reduction/lr_reduction/output.py +++ b/reduction/lr_reduction/output.py @@ -2,14 +2,17 @@ Write R(q) output """ import json + import numpy as np from . import __version__ as VERSION -class RunCollection(): + +class RunCollection: """ - A collection of runs to assemble into a single R(Q) + A collection of runs to assemble into a single R(Q) """ + def __init__(self, average_overlap=False): self.collection = [] self.average_overlap = average_overlap @@ -20,16 +23,16 @@ def __init__(self, average_overlap=False): def add(self, q, r, dr, meta_data, dq=None): """ - Add a partial R(q) to the collection + Add a partial R(q) to the collection """ if dq is None: - resolution = meta_data['dq_over_q'] + resolution = meta_data["dq_over_q"] dq = resolution * q self.collection.append(dict(q=q, r=r, dr=dr, dq=dq, info=meta_data)) def merge(self): """ - Merge the collection of runs + Merge the collection of runs """ qz_all = [] refl_all = [] @@ -37,11 +40,11 @@ def merge(self): d_qz_all = [] for item in self.collection: - for i in range(len(item['q'])): - qz_all.append(item['q'][i]) - refl_all.append(item['r'][i]) - d_refl_all.append(item['dr'][i]) - d_qz_all.append(item['dq'][i]) + for i in range(len(item["q"])): + qz_all.append(item["q"][i]) + refl_all.append(item["r"][i]) + d_refl_all.append(item["dr"][i]) + d_qz_all.append(item["dq"][i]) qz_all = np.asarray(qz_all) refl_all = np.asarray(refl_all) @@ -64,28 +67,28 @@ def merge(self): # Average information for groups of points qz = self.qz_all[0] total = self.refl_all[0] - err2 = self.d_refl_all[0]**2 + err2 = self.d_refl_all[0] ** 2 dq = self.d_qz_all[0] - npts = 1. + npts = 1.0 for i in range(1, len(self.qz_all)): - if (self.qz_all[i] - qz)/qz > 0.000001: + if (self.qz_all[i] - qz) / qz > 0.000001: # Store the previous point qz_all.append(qz) - refl_all.append(total/npts) - d_refl_all.append(np.sqrt(err2)/npts) - d_qz_all .append(dq) + refl_all.append(total / npts) + d_refl_all.append(np.sqrt(err2) / npts) + d_qz_all.append(dq) # Start a new point qz = self.qz_all[i] total = self.refl_all[i] - err2 = self.d_refl_all[i]**2 + err2 = self.d_refl_all[i] ** 2 dq = self.d_qz_all[i] - npts = 1. + npts = 1.0 else: total += self.refl_all[i] - err2 += self.d_refl_all[i]**2 - npts += 1. + err2 += self.d_refl_all[i] ** 2 + npts += 1.0 self.qz_all = np.asarray(qz_all) self.refl_all = np.asarray(refl_all) self.d_refl_all = np.asarray(d_refl_all) @@ -93,54 +96,62 @@ def merge(self): def save_ascii(self, file_path, meta_as_json=False): """ - Save R(Q) in ascii format + Save R(Q) in ascii format """ self.merge() - with open(file_path, 'w') as fd: + with open(file_path, "w") as fd: # Write meta data initial_entry_written = False for item in self.collection: - _meta = item['info'] + _meta = item["info"] if not initial_entry_written: - fd.write("# Experiment %s Run %s\n" % (_meta['experiment'], _meta['run_number'])) + fd.write("# Experiment %s Run %s\n" % (_meta["experiment"], _meta["run_number"])) fd.write("# Reduction %s\n" % VERSION) - fd.write("# Run title: %s\n" % _meta['run_title']) - fd.write("# Run start time: %s\n" % _meta['start_time']) - fd.write("# Reduction time: %s\n" % _meta['time']) - if 'q_summing' in _meta: - fd.write("# Q summing: %s\n" % _meta['q_summing']) - if 'tof_weighted' in _meta: - fd.write("# TOF weighted: %s\n" % _meta['tof_weighted']) - if 'bck_in_q' in _meta: - fd.write("# Bck in Q: %s\n" % _meta['bck_in_q']) + fd.write("# Run title: %s\n" % _meta["run_title"]) + fd.write("# Run start time: %s\n" % _meta["start_time"]) + fd.write("# Reduction time: %s\n" % _meta["time"]) + if "q_summing" in _meta: + fd.write("# Q summing: %s\n" % _meta["q_summing"]) + if "tof_weighted" in _meta: + fd.write("# TOF weighted: %s\n" % _meta["tof_weighted"]) + if "bck_in_q" in _meta: + fd.write("# Bck in Q: %s\n" % _meta["bck_in_q"]) if meta_as_json: fd.write("# Meta:%s\n" % json.dumps(_meta)) fd.write("# DataRun NormRun TwoTheta(deg) LambdaMin(A) ") fd.write("LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B\n") fd.write("") - if 'scaling_factors' in _meta: - a = _meta['scaling_factors']['a'] - b = _meta['scaling_factors']['b'] + if "scaling_factors" in _meta: + a = _meta["scaling_factors"]["a"] + b = _meta["scaling_factors"]["b"] else: a = 1 b = 0 - value_list = (_meta['run_number'], _meta['norm_run'], _meta['theta']*2.0, - _meta['wl_min'], _meta['wl_max'], _meta['q_min'], _meta['q_max'], - a, b) + value_list = ( + _meta["run_number"], + _meta["norm_run"], + _meta["theta"] * 2.0, + _meta["wl_min"], + _meta["wl_max"], + _meta["q_min"], + _meta["q_max"], + a, + b, + ) fd.write("# %-9s %-9s %-14.6g %-14.6g %-12.6g %-12.6s %-12.6s %-12.6s %-12.6s\n" % value_list) initial_entry_written = True # Write R(q) - fd.write('# dQ0[1/Angstrom] = %g\n' % _meta['dq0']) - fd.write('# dQ/Q = %g\n' % _meta['dq_over_q']) - fd.write('# %-21s %-21s %-21s %-21s\n' % ('Q [1/Angstrom]', 'R', 'dR', 'dQ [FWHM]')) + fd.write("# dQ0[1/Angstrom] = %g\n" % _meta["dq0"]) + fd.write("# dQ/Q = %g\n" % _meta["dq_over_q"]) + fd.write("# %-21s %-21s %-21s %-21s\n" % ("Q [1/Angstrom]", "R", "dR", "dQ [FWHM]")) for i in range(len(self.qz_all)): - fd.write('%20.16f %20.16f %20.16f %20.16f\n' % (self.qz_all[i], self.refl_all[i], self.d_refl_all[i], self.d_qz_all[i])) + fd.write("%20.16f %20.16f %20.16f %20.16f\n" % (self.qz_all[i], self.refl_all[i], self.d_refl_all[i], self.d_qz_all[i])) def add_from_file(self, file_path): """ - Read a partial result file and add it to the collection + Read a partial result file and add it to the collection """ _q, _r, _dr, _dq, _meta = read_file(file_path) self.add(_q, _r, _dr, _meta, dq=_dq) @@ -148,13 +159,13 @@ def add_from_file(self, file_path): def read_file(file_path): """ - Read a data file and extract meta data + Read a data file and extract meta data """ _meta = dict() - with open(file_path, 'r') as fd: + with open(file_path, "r") as fd: for l in fd.readlines(): if l.startswith("# Meta:"): - _meta = json.loads(l[len("# Meta:"):-1]) + _meta = json.loads(l[len("# Meta:") : -1]) try: _q, _r, _dr, _dq = np.loadtxt(file_path).T except: diff --git a/reduction/lr_reduction/peak_finding.py b/reduction/lr_reduction/peak_finding.py index 8e1b785..818f00a 100644 --- a/reduction/lr_reduction/peak_finding.py +++ b/reduction/lr_reduction/peak_finding.py @@ -1,25 +1,24 @@ -import numpy as np import warnings -warnings.filterwarnings('ignore', module='numpy') -warnings.filterwarnings('ignore') -from lmfit.models import GaussianModel, LinearModel, ConstantModel, RectangleModel, QuadraticModel +import numpy as np + +warnings.filterwarnings("ignore", module="numpy") +warnings.filterwarnings("ignore") -import mantid import mantid.simpleapi as api +from lmfit.models import GaussianModel, LinearModel, QuadraticModel, RectangleModel def process_data(workspace, summed=True, tof_step=200): tof_min = workspace.getTofMin() tof_max = workspace.getTofMax() _ws = api.Rebin(InputWorkspace=workspace, Params="%s,%s,%s" % (tof_min, tof_step, tof_max)) - y=_ws.extractY() + y = _ws.extractY() y = np.reshape(y, (256, 304, y.shape[1])) - tof=_ws.extractX()[0] - tof = (tof[:-1]+tof[1:])/2.0 + tof = _ws.extractX()[0] + tof = (tof[:-1] + tof[1:]) / 2.0 - if summed: y = np.sum(y, axis=2) @@ -29,10 +28,10 @@ def process_data(workspace, summed=True, tof_step=200): def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None): - gauss = GaussianModel(prefix='g_') - linear = LinearModel(prefix='l_') - quadratic = QuadraticModel(prefix='q_') - rectangular = RectangleModel(prefix='r_') + gauss = GaussianModel(prefix="g_") + linear = LinearModel(prefix="l_") + QuadraticModel(prefix="q_") + RectangleModel(prefix="r_") amplitude_guess = np.max(y[x_min:x_max]) @@ -42,29 +41,27 @@ def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None): _center = center if sigma is not None: _sigma = sigma - + pars = gauss.make_params(amplitude=amplitude_guess, center=_center, sigma=_sigma) pars.update(linear.make_params(a=0, b=0)) - #if center is not None: + # if center is not None: # pars['g_center'].vary=False if sigma is not None: - pars['g_sigma'].vary=False - pars['g_amplitude'].min=0 - pars['g_center'].min=_center-2 - pars['g_center'].max=_center+2 - - weights=1/np.sqrt(y) - weights[y<1]=1 - + pars["g_sigma"].vary = False + pars["g_amplitude"].min = 0 + pars["g_center"].min = _center - 2 + pars["g_center"].max = _center + 2 + + weights = 1 / np.sqrt(y) + weights[y < 1] = 1 + model = gauss + linear - fit = model.fit(y[x_min:x_max], pars, method='leastsq', - x=x[x_min:x_max], - weights=1/weights[x_min:x_max]) - #print(fit.fit_report()) - - a=fit.params['g_amplitude'] - c=fit.params['g_center'] - width=fit.params['g_sigma'] - #print("Gaussian: \t %5.4g +- %5.4g \t %3.3g +- %3.3g \t %3.3g +- %3.3g" % (a.value, a.stderr, c.value, c.stderr, width.value, width.stderr)) - return c, width, fit \ No newline at end of file + fit = model.fit(y[x_min:x_max], pars, method="leastsq", x=x[x_min:x_max], weights=1 / weights[x_min:x_max]) + # print(fit.fit_report()) + + fit.params["g_amplitude"] + c = fit.params["g_center"] + width = fit.params["g_sigma"] + # print("Gaussian: \t %5.4g +- %5.4g \t %3.3g +- %3.3g \t %3.3g +- %3.3g" % (a.value, a.stderr, c.value, c.stderr, width.value, width.stderr)) + return c, width, fit diff --git a/reduction/lr_reduction/reduction_template_reader.py b/reduction/lr_reduction/reduction_template_reader.py index 786fc5c..a05db9d 100644 --- a/reduction/lr_reduction/reduction_template_reader.py +++ b/reduction/lr_reduction/reduction_template_reader.py @@ -10,23 +10,23 @@ # Get the mantid version being used, if available try: import mantid + MANTID_VERSION = mantid.__version__ except: MANTID_VERSION = "None" class ReductionParameters(object): - def __init__(self): # Signal selection self.data_peak_range = [140, 150] self.subtract_background = True - self.background_roi = [137, 153,100, 200] - self.tof_range = [9600., 21600.] + self.background_roi = [137, 153, 100, 200] + self.tof_range = [9600.0, 21600.0] self.select_tof_range = True self.data_x_range_flag = True - self.data_x_range = [115,210] + self.data_x_range = [115, 210] # Normalization self.apply_normalization = True @@ -34,7 +34,7 @@ def __init__(self): self.subtract_norm_background = True self.norm_background_roi = [137, 153] self.norm_x_range_flag = True - self.norm_x_range = [115,210] + self.norm_x_range = [115, 210] # Data files self.data_files = [0] @@ -54,14 +54,14 @@ def __init__(self): self.ths_value = 0 self.angle_offset = 0.0 self.angle_offset_error = 0.0 - + # Scaling factor file - self.scaling_factor_file = '' + self.scaling_factor_file = "" self.scaling_factor_flag = True self.slits_width_flag = True # Incident medium list and selected value - self.incident_medium_list = ['air'] + self.incident_medium_list = ["air"] self.incident_medium_index_selected = 0 def from_dict(self, data_dict): @@ -70,9 +70,9 @@ def from_dict(self, data_dict): def to_xml(self): """ - Create XML from the current data. + Create XML from the current data. """ - _xml = "\n" + _xml = "\n" _xml += "narrow\n" _xml += "%s\n" % str(self.data_peak_range[0]) _xml += "%s\n" % str(self.data_peak_range[1]) @@ -85,7 +85,7 @@ def to_xml(self): _xml += "%s\n" % str(self.select_tof_range) _xml += "%s\n" % str(self.tof_range[0]) _xml += "%s\n" % str(self.tof_range[1]) - _xml += "%s\n" % ','.join([str(i) for i in self.data_files]) + _xml += "%s\n" % ",".join([str(i) for i in self.data_files]) _xml += "%s\n" % str(self.data_x_range[0]) _xml += "%s\n" % str(self.data_x_range[1]) _xml += "%s\n" % str(self.data_x_range_flag) @@ -131,68 +131,64 @@ def to_xml(self): def from_xml_element(self, instrument_dom): """ - Read in data from XML - @param xml_str: text to read the data from + Read in data from XML + @param xml_str: text to read the data from """ - #Peak from/to pixels - self.data_peak_range = [getIntElement(instrument_dom, "from_peak_pixels"), - getIntElement(instrument_dom, "to_peak_pixels")] + # Peak from/to pixels + self.data_peak_range = [getIntElement(instrument_dom, "from_peak_pixels"), getIntElement(instrument_dom, "to_peak_pixels")] - #data metadata + # data metadata _tthd_value = getStringElement(instrument_dom, "tthd_value") - if _tthd_value == '': - _tthd_value = 'N/A' + if _tthd_value == "": + _tthd_value = "N/A" self.tthd_value = _tthd_value _ths_value = getStringElement(instrument_dom, "ths_value") - if _ths_value == '': - _ths_value = 'N/A' + if _ths_value == "": + _ths_value = "N/A" self.ths_value = _ths_value - #low resolution range - self.data_x_range_flag = getBoolElement(instrument_dom, "x_range_flag", - default=self.data_x_range_flag) + # low resolution range + self.data_x_range_flag = getBoolElement(instrument_dom, "x_range_flag", default=self.data_x_range_flag) - self.data_x_range = [getIntElement(instrument_dom, "x_min_pixel"), - getIntElement(instrument_dom, "x_max_pixel")] + self.data_x_range = [getIntElement(instrument_dom, "x_min_pixel"), getIntElement(instrument_dom, "x_max_pixel")] - self.norm_x_range_flag = getBoolElement(instrument_dom, "norm_x_range_flag", - default=self.norm_x_range_flag) + self.norm_x_range_flag = getBoolElement(instrument_dom, "norm_x_range_flag", default=self.norm_x_range_flag) - self.norm_x_range = [getIntElement(instrument_dom, "norm_x_min"), - getIntElement(instrument_dom, "norm_x_max")] + self.norm_x_range = [getIntElement(instrument_dom, "norm_x_min"), getIntElement(instrument_dom, "norm_x_max")] - #background flag - self.subtract_background = getBoolElement(instrument_dom, "background_flag", - default=self.subtract_background) + # background flag + self.subtract_background = getBoolElement(instrument_dom, "background_flag", default=self.subtract_background) - #background from/to pixels - self.background_roi = [getIntElement(instrument_dom, "back_roi1_from"), - getIntElement(instrument_dom, "back_roi1_to"), - getIntElement(instrument_dom, "back_roi2_from"), - getIntElement(instrument_dom, "back_roi2_to")] + # background from/to pixels + self.background_roi = [ + getIntElement(instrument_dom, "back_roi1_from"), + getIntElement(instrument_dom, "back_roi1_to"), + getIntElement(instrument_dom, "back_roi2_from"), + getIntElement(instrument_dom, "back_roi2_to"), + ] # TOF range - self.select_tof_range = getBoolElement(instrument_dom, "tof_range_flag", - default=self.select_tof_range) - self.tof_range = [getFloatElement(instrument_dom, "from_tof_range"), - getFloatElement(instrument_dom, "to_tof_range")] + self.select_tof_range = getBoolElement(instrument_dom, "tof_range_flag", default=self.select_tof_range) + self.tof_range = [getFloatElement(instrument_dom, "from_tof_range"), getFloatElement(instrument_dom, "to_tof_range")] self.data_files = getIntList(instrument_dom, "data_sets") - #with or without norm - self.apply_normalization = getBoolElement(instrument_dom, "norm_flag", - default=self.apply_normalization) + # with or without norm + self.apply_normalization = getBoolElement(instrument_dom, "norm_flag", default=self.apply_normalization) - #Peak from/to pixels - self.norm_peak_range = [getIntElement(instrument_dom, "norm_from_peak_pixels"), - getIntElement(instrument_dom, "norm_to_peak_pixels")] + # Peak from/to pixels + self.norm_peak_range = [ + getIntElement(instrument_dom, "norm_from_peak_pixels"), + getIntElement(instrument_dom, "norm_to_peak_pixels"), + ] # Background subtraction option - self.subtract_norm_background = getBoolElement(instrument_dom, "norm_background_flag", - default=self.subtract_norm_background) - self.norm_background_roi = [getIntElement(instrument_dom, "norm_from_back_pixels"), - getIntElement(instrument_dom, "norm_to_back_pixels")] + self.subtract_norm_background = getBoolElement(instrument_dom, "norm_background_flag", default=self.subtract_norm_background) + self.norm_background_roi = [ + getIntElement(instrument_dom, "norm_from_back_pixels"), + getIntElement(instrument_dom, "norm_to_back_pixels"), + ] self.norm_file = getIntElement(instrument_dom, "norm_dataset") @@ -205,8 +201,7 @@ def from_xml_element(self, instrument_dom): # Angle offset self.angle_offset = getFloatElement(instrument_dom, "angle_offset", default=self.angle_offset) - self.angle_offset_error = getFloatElement(instrument_dom, "angle_offset_error", - default=self.angle_offset_error) + self.angle_offset_error = getFloatElement(instrument_dom, "angle_offset_error", default=self.angle_offset_error) # Scaling factor file and options self.scaling_factor_file = getStringElement(instrument_dom, "scaling_factor_file") @@ -218,14 +213,14 @@ def from_xml_element(self, instrument_dom): self.incident_medium_list = getStringList(instrument_dom, "incident_medium_list") self.incident_medium_index_selected = getIntElement(instrument_dom, "incident_medium_index_selected") else: - self.incident_medium_list = ['H2O'] + self.incident_medium_list = ["H2O"] self.incident_medium_index_selected = 0 ###### Utility functions to read XML content ######################## def getText(nodelist): """ - Utility method to extract text out of an XML node + Utility method to extract text out of an XML node """ rc = "" for node in nodelist: @@ -233,36 +228,43 @@ def getText(nodelist): rc = rc + node.data return rc + def getContent(dom, tag): element_list = dom.getElementsByTagName(tag) return getText(element_list[0].childNodes) if len(element_list) > 0 else None + def getIntElement(dom, tag, default=None): value = getContent(dom, tag) return int(value) if value is not None else default + def getIntList(dom, tag, default=[]): value = getContent(dom, tag) if value is not None and len(value.strip()) > 0: - return list(map(int, value.split(','))) + return list(map(int, value.split(","))) else: return default + def getFloatElement(dom, tag, default=None): value = getContent(dom, tag) return float(value) if value is not None else default + def getFloatList(dom, tag, default=[]): value = getContent(dom, tag) if value is not None and len(value.strip()) > 0: - return list(map(float, value.split(','))) + return list(map(float, value.split(","))) else: return default -def getStringElement(dom, tag, default=''): + +def getStringElement(dom, tag, default=""): value = getContent(dom, tag) return value if value is not None else default + def getStringList(dom, tag, _default=[]): elem_list = [] element_list = dom.getElementsByTagName(tag) @@ -271,7 +273,8 @@ def getStringList(dom, tag, _default=[]): elem_list.append(getText(l.childNodes).strip()) return elem_list -def getBoolElement(dom, tag, true_tag='true', default=False): + +def getBoolElement(dom, tag, true_tag="true", default=False): value = getContent(dom, tag) return value.lower() == true_tag.lower() if value is not None else default @@ -279,7 +282,7 @@ def getBoolElement(dom, tag, true_tag='true', default=False): ###### Functions to read/write a template file ###################### def to_xml(data_sets): """ - Create XML from the current data. + Create XML from the current data. """ _xml = "\n" _xml += " REFL\n" @@ -287,32 +290,33 @@ def to_xml(data_sets): _xml += " %s\n" % VERSION _xml += " %s\n" % MANTID_VERSION _xml += " lr_reduction-%s\n" % VERSION - _xml += "\n" + _xml += "\n" for item in data_sets: _xml += item.to_xml() _xml += "\n" _xml += "\n" return _xml + def from_xml(xml_str): """ - Read in data from XML string + Read in data from XML string """ data_sets = [] dom = xml.dom.minidom.parseString(xml_str) element_list = dom.getElementsByTagName("Data") - if len(element_list)==0: + if len(element_list) == 0: element_list = dom.getElementsByTagName("RefLData") - if len(element_list)>0: + if len(element_list) > 0: for item in element_list: if item is not None: data_set = ReductionParameters() data_set.from_xml_element(item) data_sets.append(data_set) - if len(data_sets)==0: + if len(data_sets) == 0: data_sets = [ReductionParameters()] return data_sets diff --git a/reduction/lr_reduction/template.py b/reduction/lr_reduction/template.py index 747b8ca..e4130fb 100644 --- a/reduction/lr_reduction/template.py +++ b/reduction/lr_reduction/template.py @@ -1,27 +1,23 @@ """ Reduce a data run using a template generated by RefRed """ -import sys import os -import numpy as np +from functools import reduce -import mantid -from mantid.api import * import mantid.simpleapi as api +import numpy as np +from mantid.api import * from mantid.kernel import * -from functools import reduce - -from . import event_reduction -from . import reduction_template_reader +from . import event_reduction, reduction_template_reader TOLERANCE = 0.02 def read_template(template_file, sequence_number): """ - Read template from file. - @param sequence_number: the ID of the data set within the sequence of runs + Read template from file. + @param sequence_number: the ID of the data set within the sequence of runs """ fd = open(template_file, "r") xml_str = fd.read() @@ -37,15 +33,15 @@ def read_template(template_file, sequence_number): def scaling_factor(scaling_factor_file, workspace, match_slit_width=True): """ - Apply scaling factor from reference scaling data - @param workspace: Mantid workspace + Apply scaling factor from reference scaling data + @param workspace: Mantid workspace """ if not os.path.isfile(scaling_factor_file): print("Could not find scaling factor file: %s" % scaling_factor_file) return workspace # Get the wavelength - lr = workspace.getRun().getProperty('LambdaRequest').value[0] + lr = workspace.getRun().getProperty("LambdaRequest").value[0] lr_value = float("{0:.2f}".format(lr)) s1h = abs(workspace.getRun().getProperty("S1VHeight").value[0]) @@ -55,33 +51,32 @@ def scaling_factor(scaling_factor_file, workspace, match_slit_width=True): def _reduce(accumulation, item): """ - Reduce function that accumulates values in a dictionary + Reduce function that accumulates values in a dictionary """ - toks_item = item.split('=') - if len(toks_item)!=2: + toks_item = item.split("=") + if len(toks_item) != 2: return accumulation if isinstance(accumulation, dict): accumulation[toks_item[0].strip()] = toks_item[1].strip() else: - toks_accum = accumulation.split('=') - accumulation = {toks_item[0].strip(): toks_item[1].strip(), - toks_accum[0].strip(): toks_accum[1].strip()} + toks_accum = accumulation.split("=") + accumulation = {toks_item[0].strip(): toks_item[1].strip(), toks_accum[0].strip(): toks_accum[1].strip()} return accumulation def _value_check(key, data, reference): """ - Check an entry against a reference value + Check an entry against a reference value """ if key in data: return abs(abs(float(data[key])) - abs(float(reference))) <= TOLERANCE return False - with open(scaling_factor_file, 'r') as fd: + with open(scaling_factor_file, "r") as fd: file_content = fd.read() data_found = None - for line in file_content.split('\n'): - if line.startswith('#'): + for line in file_content.split("\n"): + if line.startswith("#"): continue # Parse the line of data and produce a dict @@ -91,65 +86,74 @@ def _value_check(key, data, reference): # Get ordered list of keys keys = [] for token in toks: - key_value = token.split('=') - if len(key_value)==2: + key_value = token.split("=") + if len(key_value) == 2: keys.append(key_value[0].strip()) # Skip empty lines - if len(keys)==0: + if len(keys) == 0: continue # Complain if the format is non-standard - elif len(keys)<10: + elif len(keys) < 10: print("Bad scaling factor entry\n %s" % line) continue # Sanity check - if keys[0] != 'IncidentMedium' and keys[1] != 'LambdaRequested' \ - and keys[2] != 'S1H': + if keys[0] != "IncidentMedium" and keys[1] != "LambdaRequested" and keys[2] != "S1H": print("The scaling factor file isn't standard: bad keywords") # The S2H key has been changing in the earlier version of REFL reduction. # Get the key from the data to make sure we are backward compatible. s2h_key = keys[3] s2w_key = keys[5] - if 'IncidentMedium' in data_dict \ - and _value_check('LambdaRequested', data_dict, lr_value) \ - and _value_check('S1H', data_dict, s1h) \ - and _value_check(s2h_key, data_dict, s2h): - - if not match_slit_width or (_value_check('S1W', data_dict, s1w) - and _value_check(s2w_key, data_dict, s2w)): + if ( + "IncidentMedium" in data_dict + and _value_check("LambdaRequested", data_dict, lr_value) + and _value_check("S1H", data_dict, s1h) + and _value_check(s2h_key, data_dict, s2h) + ): + if not match_slit_width or (_value_check("S1W", data_dict, s1w) and _value_check(s2w_key, data_dict, s2w)): data_found = data_dict break if data_found is not None: - a = float(data_found['a']) - b = float(data_found['b']) - a_error = float(data_found['error_a']) - b_error = float(data_found['error_b']) + a = float(data_found["a"]) + b = float(data_found["b"]) + a_error = float(data_found["error_a"]) + b_error = float(data_found["error_b"]) else: return 1, 0, 0, 0 return a, b, a_error, b_error -def process_from_template(run_number, template_path, q_summing=False, normalize=True, - tof_weighted=False, bck_in_q=False, clean=False, info=False): +def process_from_template( + run_number, template_path, q_summing=False, normalize=True, tof_weighted=False, bck_in_q=False, clean=False, info=False +): """ - The clean option removes leading zeros and the drop when doing q-summing + The clean option removes leading zeros and the drop when doing q-summing """ # For backward compatibility, consider the case of a list of run numbers to be added - if ',' in str(run_number): - list_of_runs = str(run_number).split(',') - run_number = '+'.join(list_of_runs) + if "," in str(run_number): + list_of_runs = str(run_number).split(",") + run_number = "+".join(list_of_runs) # Load data ws_sc = api.Load("REF_L_%s" % run_number) - return process_from_template_ws(ws_sc, template_path, q_summing=q_summing, - tof_weighted=tof_weighted, bck_in_q=bck_in_q, - clean=clean, info=info, normalize=normalize) - - -def process_from_template_ws(ws_sc, template_data, q_summing=False, - tof_weighted=False, bck_in_q=False, clean=False, - info=False, normalize=True, theta_value=None, ws_db=None): + return process_from_template_ws( + ws_sc, template_path, q_summing=q_summing, tof_weighted=tof_weighted, bck_in_q=bck_in_q, clean=clean, info=info, normalize=normalize + ) + + +def process_from_template_ws( + ws_sc, + template_data, + q_summing=False, + tof_weighted=False, + bck_in_q=False, + clean=False, + info=False, + normalize=True, + theta_value=None, + ws_db=None, +): # Get the sequence number sequence_number = 1 if ws_sc.getRun().hasProperty("sequence_number"): @@ -165,22 +169,22 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, ws_db = api.LoadEventNexus("REF_L_%s" % template_data.norm_file) # If we run in theta-theta geometry, we'll need thi - thi_value = ws_sc.getRun()['thi'].value[0] - ths_value = ws_sc.getRun()['ths'].value[0] + thi_value = ws_sc.getRun()["thi"].value[0] + ths_value = ws_sc.getRun()["ths"].value[0] # NOTE: An offset is no longer used be default. To use itm we can use # the EventReflectivity directly. - _wl = ws_sc.getRun()['LambdaRequest'].value[0] - print('wl=%g; ths=%g; thi=%g; No offset' % (_wl, ths_value, thi_value)) + _wl = ws_sc.getRun()["LambdaRequest"].value[0] + print("wl=%g; ths=%g; thi=%g; No offset" % (_wl, ths_value, thi_value)) if theta_value is not None: - theta = theta_value * np.pi / 180. + theta = theta_value * np.pi / 180.0 else: - if ws_sc.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': - theta = thi_value * np.pi / 180. + if ws_sc.getRun().getProperty("BL4B:CS:ExpPl:OperatingMode").value[0] == "Free Liquid": + theta = thi_value * np.pi / 180.0 else: - theta = ths_value * np.pi / 180. + theta = ths_value * np.pi / 180.0 # Get the reduction parameters from the template peak = template_data.data_peak_range @@ -189,8 +193,8 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, else: peak_bck = None - #TODO: Fit this peak - peak_center = (peak[0]+peak[1])/2.0 + # TODO: Fit this peak + peak_center = (peak[0] + peak[1]) / 2.0 if template_data.data_x_range_flag: low_res = template_data.data_x_range @@ -214,33 +218,42 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, q_step = -template_data.q_step # Perform the reduction - event_refl = event_reduction.EventReflectivity(ws_sc, ws_db, - signal_peak=peak, signal_bck=peak_bck, - norm_peak=norm_peak, norm_bck=norm_bck, - specular_pixel=peak_center, - signal_low_res=low_res, norm_low_res=norm_low_res, - q_min=q_min, q_step=q_step, q_max=None, - tof_range=[tof_min, tof_max], - theta=np.abs(theta), - instrument=event_reduction.EventReflectivity.INSTRUMENT_4B) + event_refl = event_reduction.EventReflectivity( + ws_sc, + ws_db, + signal_peak=peak, + signal_bck=peak_bck, + norm_peak=norm_peak, + norm_bck=norm_bck, + specular_pixel=peak_center, + signal_low_res=low_res, + norm_low_res=norm_low_res, + q_min=q_min, + q_step=q_step, + q_max=None, + tof_range=[tof_min, tof_max], + theta=np.abs(theta), + instrument=event_reduction.EventReflectivity.INSTRUMENT_4B, + ) # R(Q) - qz, refl, d_refl = event_refl.specular(q_summing=q_summing, tof_weighted=tof_weighted, - bck_in_q=bck_in_q, clean=clean, normalize=normalize) - qz_mid = (qz[:-1] + qz[1:])/2.0 + qz, refl, d_refl = event_refl.specular( + q_summing=q_summing, tof_weighted=tof_weighted, bck_in_q=bck_in_q, clean=clean, normalize=normalize + ) + qz_mid = (qz[:-1] + qz[1:]) / 2.0 print("Normalization options: %s %s" % (normalize, template_data.scaling_factor_flag)) if normalize and template_data.scaling_factor_flag: # Get the scaling factors a, b, err_a, err_b = scaling_factor(template_data.scaling_factor_file, ws_sc) - - _tof = 4*np.pi*np.sin(event_refl.theta)*event_refl.constant/qz - _tof_mid = (_tof[1:] + _tof[:-1])/2.0 - - a_q = _tof_mid*b + a + + _tof = 4 * np.pi * np.sin(event_refl.theta) * event_refl.constant / qz + _tof_mid = (_tof[1:] + _tof[:-1]) / 2.0 + + a_q = _tof_mid * b + a d_a_q = np.sqrt(_tof_mid**2 * err_b**2 + err_a**2) - - d_refl = np.sqrt(d_refl**2/a_q**2 + refl**2*d_a_q**2/a_q**4) + + d_refl = np.sqrt(d_refl**2 / a_q**2 + refl**2 * d_a_q**2 / a_q**4) refl /= a_q else: a = b = 1 @@ -248,17 +261,17 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, # Trim ends as needed npts = len(qz_mid) - qz_mid = qz_mid[template_data.pre_cut:npts-template_data.post_cut] - refl = refl[template_data.pre_cut:npts-template_data.post_cut] - d_refl = d_refl[template_data.pre_cut:npts-template_data.post_cut] + qz_mid = qz_mid[template_data.pre_cut : npts - template_data.post_cut] + refl = refl[template_data.pre_cut : npts - template_data.post_cut] + d_refl = d_refl[template_data.pre_cut : npts - template_data.post_cut] # We can optionally return details about the reduction process if info: meta_data = event_refl.to_dict() - meta_data['scaling_factors'] = dict(a=a, err_a=err_a, b=b, err_b=err_b) - meta_data['q_summing'] = q_summing - meta_data['tof_weighted'] = tof_weighted - meta_data['bck_in_q'] = bck_in_q + meta_data["scaling_factors"] = dict(a=a, err_a=err_a, b=b, err_b=err_b) + meta_data["q_summing"] = q_summing + meta_data["tof_weighted"] = tof_weighted + meta_data["bck_in_q"] = bck_in_q return qz_mid, refl, d_refl, meta_data return qz_mid, refl, d_refl diff --git a/reduction/lr_reduction/time_resolved.py b/reduction/lr_reduction/time_resolved.py index 95d8d7d..1539c41 100644 --- a/reduction/lr_reduction/time_resolved.py +++ b/reduction/lr_reduction/time_resolved.py @@ -1,17 +1,17 @@ """ Time-resolved data reduction """ -import sys -import os -import numpy as np import json - -from matplotlib import pyplot as plt +import os +import sys import mantid -from mantid.api import * import mantid.simpleapi as api +import numpy as np +from mantid.api import * from mantid.kernel import * +from matplotlib import pyplot as plt + mantid.kernel.config.setLogLevel(3) from . import template @@ -20,12 +20,12 @@ def reduce_30Hz(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, scan_index=1, template_reference=None): """ - Perform 30Hz reduction - @param meas_run_30Hz: run number of the data we want to reduce - @param ref_run_30Hz: run number of the reference data, take with the same config - @param ref_data_60Hz: file path of the reduce data file at 60Hz - @param template_30Hz: file path of the template file for 30Hz - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_run_30Hz: run number of the data we want to reduce + @param ref_run_30Hz: run number of the reference data, take with the same config + @param ref_data_60Hz: file path of the reduce data file at 60Hz + @param template_30Hz: file path of the template file for 30Hz + @param scan_index: scan index to use within the template. """ # Load the template template_data = template.read_template(template_30Hz, scan_index) @@ -39,66 +39,65 @@ def reduce_30Hz(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, scan_ # Load the 60Hz reference data data_60Hz = np.loadtxt(ref_data_60Hz).T - return reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, - scan_index=scan_index, template_reference=template_reference) + return reduce_30Hz_from_ws( + meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=scan_index, template_reference=template_reference + ) -def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=1, - template_reference=None, q_summing=False): +def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=1, template_reference=None, q_summing=False): """ - Perform 30Hz reduction - @param meas_ws_30Hz: Mantid workspace of the data we want to reduce - @param ref_ws_30Hz: Mantid workspace of the reference data, take with the same config - @param data_60Hz: reduced reference data at 60Hz - @param template_data: template data object (for 30Hz) - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws_30Hz: Mantid workspace of the data we want to reduce + @param ref_ws_30Hz: Mantid workspace of the reference data, take with the same config + @param data_60Hz: reduced reference data at 60Hz + @param template_data: template data object (for 30Hz) + @param scan_index: scan index to use within the template. """ # Reduce the reference at 30Hz if template_reference is None: - r_ref = template.process_from_template_ws(ref_ws_30Hz, template_data, - q_summing=q_summing, normalize=False) + r_ref = template.process_from_template_ws(ref_ws_30Hz, template_data, q_summing=q_summing, normalize=False) else: - r_ref = template.process_from_template_ws(ref_ws_30Hz, template_reference, - q_summing=q_summing, normalize=False) + r_ref = template.process_from_template_ws(ref_ws_30Hz, template_reference, q_summing=q_summing, normalize=False) # Reduce the sample data at 30Hz - r_meas = template.process_from_template_ws(meas_ws_30Hz, template_data, - q_summing=q_summing, normalize=False) + r_meas = template.process_from_template_ws(meas_ws_30Hz, template_data, q_summing=q_summing, normalize=False) # Identify the bins we need to overlap with the 30Hz measurement # The assumption is that the binning is the same _max_q = min(r_ref[0].max(), r_meas[0].max()) _min_q = max(r_ref[0].min(), r_meas[0].min()) - _binning_60hz = (data_60Hz[0][1]-data_60Hz[0][0])/data_60Hz[0][0] - _binning_ref = (r_ref[0][1]-r_ref[0][0])/r_ref[0][0] - _binning_meas = (r_meas[0][1]-r_meas[0][0])/r_meas[0][0] + _binning_60hz = (data_60Hz[0][1] - data_60Hz[0][0]) / data_60Hz[0][0] + _binning_ref = (r_ref[0][1] - r_ref[0][0]) / r_ref[0][0] + _binning_meas = (r_meas[0][1] - r_meas[0][0]) / r_meas[0][0] if not np.fabs(_binning_60hz - _binning_ref) < 1e-5: print("ERROR: The binning of the 60 Hz reference is not the same as the dynamic template") print(" %s <> %s" % (_binning_60hz, _binning_ref)) # The tolerance will be half a bin - _tolerance = _binning_ref/2.0 + _tolerance = _binning_ref / 2.0 print("60Hz: %g %g [%g]" % (data_60Hz[0].min(), data_60Hz[0].max(), _binning_60hz)) print("Ref 30Hz: %g %g [%g]" % (r_meas[0].min(), r_meas[0].max(), _binning_ref)) print("Meas 30Hz: %g %g [%g]" % (r_ref[0].min(), r_ref[0].max(), _binning_meas)) - _q_idx_60 = np.asarray(np.where((data_60Hz[0] > _min_q*(1-_tolerance)) & (data_60Hz[0] < _max_q*(1+_tolerance))))[0] - _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q*(1-_tolerance)) & (r_meas[0] < _max_q*(1+_tolerance))))[0] - _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q*(1-_tolerance)) & (r_ref[0] < _max_q*(1+_tolerance))))[0] + _q_idx_60 = np.asarray(np.where((data_60Hz[0] > _min_q * (1 - _tolerance)) & (data_60Hz[0] < _max_q * (1 + _tolerance))))[0] + _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q * (1 - _tolerance)) & (r_meas[0] < _max_q * (1 + _tolerance))))[0] + _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q * (1 - _tolerance)) & (r_ref[0] < _max_q * (1 + _tolerance))))[0] if not data_60Hz[0][_q_idx_60].shape[0] == r_ref[0][_q_idx_ref30].shape[0]: print("\n\n60Hz reference may have been reduced with different binning!") print("Array sizes: %g %g %g" % (len(_q_idx_60), len(_q_idx_meas30), len(_q_idx_ref30))) print("Remember to average overlapping points!\n\n") - r_q_final = r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60] + r_q_final = r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60] - dr_q_final = np.sqrt((r_meas[2][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[2][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]**2*data_60Hz[1][_q_idx_60]*r_ref[2][_q_idx_ref30])**2) + dr_q_final = np.sqrt( + (r_meas[2][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[2][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] ** 2 * data_60Hz[1][_q_idx_60] * r_ref[2][_q_idx_ref30]) ** 2 + ) print("Q range: %s - %s" % (r_meas[0][0], r_meas[0][_q_idx_meas30][-1])) print("Constant-Q binning: %s" % str(q_summing)) @@ -118,43 +117,74 @@ def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, sca return np.asarray([q[_idx], r_q_final[_idx], dr_q_final[_idx], dq]) -def reduce_30Hz_slices(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, - template_reference=None, q_summing=False): - +def reduce_30Hz_slices( + meas_run_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, + q_summing=False, +): meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) - return reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=scan_index, create_plot=create_plot, - template_reference=template_reference, q_summing=False) - -def reduce_60Hz_slices(meas_run, template_file, - time_interval, output_dir, scan_index=1, create_plot=True): - + return reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=scan_index, + create_plot=create_plot, + template_reference=template_reference, + q_summing=False, + ) + + +def reduce_60Hz_slices(meas_run, template_file, time_interval, output_dir, scan_index=1, create_plot=True): meas_ws = api.LoadEventNexus("REF_L_%s" % meas_run) - return reduce_60Hz_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=scan_index, create_plot=create_plot) - -def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, - template_reference=None, q_summing=False): + return reduce_60Hz_slices_ws(meas_ws, template_file, time_interval, output_dir, scan_index=scan_index, create_plot=create_plot) + + +def reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, + q_summing=False, +): """ - Perform 30Hz reduction - @param meas_ws_30Hz: workspace of the data we want to reduce - @param ref_ws_30Hz: workspace of the reference data, take with the same config - @param ref_data_60Hz: file path of the reduce data file at 60Hz - @param template_30Hz: file path of the template file for 30Hz - @param time_interval: time step in seconds - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws_30Hz: workspace of the data we want to reduce + @param ref_ws_30Hz: workspace of the reference data, take with the same config + @param ref_data_60Hz: file path of the reduce data file at 60Hz + @param template_30Hz: file path of the template file for 30Hz + @param time_interval: time step in seconds + @param scan_index: scan index to use within the template. """ # Save options - options = dict(meas_run_30Hz=meas_ws_30Hz.getRun()['run_number'].value, - ref_run_30Hz=ref_run_30Hz, ref_data_60Hz=ref_data_60Hz, - template_30Hz=template_30Hz, time_interval=time_interval, - output_dir=output_dir, scan_index=scan_index, - template_reference=template_reference, q_summing=q_summing) - with open(os.path.join(output_dir, 'options.json'), 'w') as fp: + options = dict( + meas_run_30Hz=meas_ws_30Hz.getRun()["run_number"].value, + ref_run_30Hz=ref_run_30Hz, + ref_data_60Hz=ref_data_60Hz, + template_30Hz=template_30Hz, + time_interval=time_interval, + output_dir=output_dir, + scan_index=scan_index, + template_reference=template_reference, + q_summing=q_summing, + ) + with open(os.path.join(output_dir, "options.json"), "w") as fp: json.dump(options, fp) # Load the template @@ -170,16 +200,16 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 # Reduce the sample data at 30Hz print("Reading sample data at 30Hz") - #meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) + # meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws_30Hz.getRun()['duration'].value + duration = meas_ws_30Hz.getRun()["duration"].value except: duration = 0 try: - meas_run_30Hz = meas_ws_30Hz.getRun()['run_number'].value + meas_run_30Hz = meas_ws_30Hz.getRun()["run_number"].value except: meas_run_30Hz = 0 @@ -187,17 +217,19 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws_30Hz, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws_30Hz, + api.FilterEvents( + InputWorkspace=meas_ws_30Hz, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -211,37 +243,46 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 tmpws = api.mtd[name] print("workspace %s has %d events" % (name, tmpws.getNumberEvents())) try: - _reduced = reduce_30Hz_from_ws(tmpws, ref_ws_30Hz, data_60Hz, template_data, - scan_index=scan_index, template_reference=template_reference, - q_summing=q_summing) + _reduced = reduce_30Hz_from_ws( + tmpws, + ref_ws_30Hz, + data_60Hz, + template_data, + scan_index=scan_index, + template_reference=template_reference, + q_summing=q_summing, + ) # Remove first point reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run_30Hz, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run_30Hz, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print("reduce_30Hz_slices_ws: %s" % sys.exc_info()[0]) total_time += time_interval # Save output - output_file = os.path.join(output_dir, 'r%s-time-resolved.json' % meas_run_30Hz) + output_file = os.path.join(output_dir, "r%s-time-resolved.json" % meas_run_30Hz) print("Saving t-NR to %s" % output_file) package_json_data(meas_run_30Hz, output_dir, out_array=output_file) if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run_30Hz)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run_30Hz), + ) return reduced -def reduce_60Hz_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=1, create_plot=True): + +def reduce_60Hz_slices_ws(meas_ws, template_file, time_interval, output_dir, scan_index=1, create_plot=True): """ - Perform 30Hz reduction - @param meas_ws: workspace of the data we want to reduce - @param template_file: autoreduction template file - @param time_interval: time step in seconds - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws: workspace of the data we want to reduce + @param template_file: autoreduction template file + @param time_interval: time step in seconds + @param scan_index: scan index to use within the template. """ # Load the template @@ -251,11 +292,11 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws.getRun()['duration'].value + duration = meas_ws.getRun()["duration"].value except: duration = 0 try: - meas_run = meas_ws.getRun()['run_number'].value + meas_run = meas_ws.getRun()["run_number"].value except: meas_run = 0 @@ -263,17 +304,19 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws, + api.FilterEvents( + InputWorkspace=meas_ws, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -285,64 +328,65 @@ def reduce_60Hz_slices_ws(meas_ws, template_file, try: _reduced = template.process_from_template_ws(tmpws, template_data) reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print("reduce_60Hz_slices_ws: %s" % sys.exc_info()[0]) total_time += time_interval if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run), + ) return reduced def plot_slices(reduced, title, time_interval, file_path, offset=10): - fig, ax = plt.subplots(figsize=(6,6)) + fig, ax = plt.subplots(figsize=(6, 6)) total_time = 0 - _running_offset = 1. + _running_offset = 1.0 for _data in reduced: qz, refl, d_refl, _ = _data - plt.errorbar(qz, refl*_running_offset, yerr=d_refl*_running_offset, markersize=4, marker='o', - label='T=%g s' % total_time) + plt.errorbar(qz, refl * _running_offset, yerr=d_refl * _running_offset, markersize=4, marker="o", label="T=%g s" % total_time) total_time += time_interval _running_offset *= offset plt.legend() plt.title(title) - plt.xlabel('q [$1/\AA$]') - plt.ylabel('R(q)') - ax.set_yscale('log') - ax.set_xscale('log') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + ax.set_yscale("log") + ax.set_xscale("log") plt.show() plt.savefig(file_path) def package_json_data(dynamic_run, dyn_data_dir, out_array=None): - compiled_array = [] compiled_times = [] _file_list = sorted(os.listdir(dyn_data_dir)) # Get only the files for the run we're interested in - _good_files = [_f for _f in _file_list if _f.startswith('r%s_t' % dynamic_run)] + _good_files = [_f for _f in _file_list if _f.startswith("r%s_t" % dynamic_run)] for i, _file in enumerate(_good_files): - if _file.startswith('r%s_t' % dynamic_run): + if _file.startswith("r%s_t" % dynamic_run): _data = np.loadtxt(os.path.join(dyn_data_dir, _file)).T _data_name, _ = os.path.splitext(_file) - _time = int(_data_name.replace('r%s_t' % dynamic_run, '')) + _time = int(_data_name.replace("r%s_t" % dynamic_run, "")) compiled_array.append(_data.tolist()) compiled_times.append(_time) if out_array: - with open(out_array, 'w') as fp: + with open(out_array, "w") as fp: json.dump(dict(times=compiled_times, data=compiled_array), fp) return compiled_times, compiled_array diff --git a/reduction/lr_reduction/workflow.py b/reduction/lr_reduction/workflow.py index 472e0e4..9e585be 100644 --- a/reduction/lr_reduction/workflow.py +++ b/reduction/lr_reduction/workflow.py @@ -1,40 +1,32 @@ """ Autoreduction process for the Liquids Reflectometer """ -import sys -import os import json -import numpy as np +import os -import mantid import mantid.simpleapi as mtd_api +import numpy as np -from . import template -from . import reduction_template_reader -from . import output -from . import event_reduction +from . import event_reduction, output, reduction_template_reader, template -def reduce(ws, template_file, output_dir, average_overlap=False, - q_summing=False, bck_in_q=False, is_live=False): +def reduce(ws, template_file, output_dir, average_overlap=False, q_summing=False, bck_in_q=False, is_live=False): """ - Function called by reduce_REFL.py, which lives in /SNS/REF_L/shared/autoreduce - and is called by the automated reduction workflow. + Function called by reduce_REFL.py, which lives in /SNS/REF_L/shared/autoreduce + and is called by the automated reduction workflow. - If average_overlap is used, overlapping points will be averaged, otherwise they - will be left in the final data file. + If average_overlap is used, overlapping points will be averaged, otherwise they + will be left in the final data file. - :param average_overlap: if True, the overlapping points will be averaged - :param q_summing: if True, constant-Q binning will be used - :param bck_in_q: if True, and constant-Q binning is used, the background will be estimated - along constant-Q lines rather than along TOF/pixel boundaries. + :param average_overlap: if True, the overlapping points will be averaged + :param q_summing: if True, constant-Q binning will be used + :param bck_in_q: if True, and constant-Q binning is used, the background will be estimated + along constant-Q lines rather than along TOF/pixel boundaries. """ # Call the reduction using the template - qz_mid, refl, d_refl, meta_data = template.process_from_template_ws(ws, template_file, - q_summing=q_summing, - tof_weighted=q_summing, - clean=q_summing, - bck_in_q=bck_in_q, info=True) + qz_mid, refl, d_refl, meta_data = template.process_from_template_ws( + ws, template_file, q_summing=q_summing, tof_weighted=q_summing, clean=q_summing, bck_in_q=bck_in_q, info=True + ) # Save partial results coll = output.RunCollection() @@ -42,16 +34,15 @@ def reduce(ws, template_file, output_dir, average_overlap=False, # If this is live data, put it in a separate file to avoid conflict with auto-reduction if is_live: - reduced_file = os.path.join(output_dir, 'REFL_live_partial.txt') + reduced_file = os.path.join(output_dir, "REFL_live_partial.txt") else: - reduced_file = os.path.join(output_dir, 'REFL_%s_%s_%s_partial.txt' % (meta_data['sequence_id'], - meta_data['sequence_number'], - meta_data['run_number'])) + reduced_file = os.path.join( + output_dir, "REFL_%s_%s_%s_partial.txt" % (meta_data["sequence_id"], meta_data["sequence_number"], meta_data["run_number"]) + ) coll.save_ascii(reduced_file, meta_as_json=True) # Assemble partial results into a single R(q) - seq_list, run_list = assemble_results(meta_data['sequence_id'], output_dir, - average_overlap, is_live=is_live) + seq_list, run_list = assemble_results(meta_data["sequence_id"], output_dir, average_overlap, is_live=is_live) # Save template write_template(seq_list, run_list, template_file, output_dir) @@ -62,7 +53,7 @@ def reduce(ws, template_file, output_dir, average_overlap=False, def assemble_results(first_run, output_dir, average_overlap=False, is_live=False): """ - Find related runs and assemble them in one R(q) data set + Find related runs and assemble them in one R(q) data set """ # Keep track of sequence IDs and run numbers so we can make a new template seq_list = [] @@ -71,8 +62,8 @@ def assemble_results(first_run, output_dir, average_overlap=False, is_live=False file_list = sorted(os.listdir(output_dir)) for item in file_list: - if item.startswith("REFL_%s" % first_run) and item.endswith('partial.txt'): - toks = item.split('_') + if item.startswith("REFL_%s" % first_run) and item.endswith("partial.txt"): + toks = item.split("_") if not len(toks) == 5 or int(toks[2]) == 0: continue seq_list.append(int(toks[2])) @@ -80,14 +71,14 @@ def assemble_results(first_run, output_dir, average_overlap=False, is_live=False # Read the partial data and add to a collection _, _, _, _, _meta = output.read_file(os.path.join(output_dir, item)) - if is_live or not _meta['start_time'] == "live": + if is_live or not _meta["start_time"] == "live": coll.add_from_file(os.path.join(output_dir, item)) elif item == "REFL_live_partial.txt": coll.add_from_file(os.path.join(output_dir, item)) - output_file_name = 'REFL_%s_combined_data_auto.txt' % first_run + output_file_name = "REFL_%s_combined_data_auto.txt" % first_run if is_live: - output_file_name = 'REFL_%s_live_estimate.txt' % first_run + output_file_name = "REFL_%s_live_estimate.txt" % first_run coll.save_ascii(os.path.join(output_dir, output_file_name)) return seq_list, run_list @@ -95,8 +86,8 @@ def assemble_results(first_run, output_dir, average_overlap=False, is_live=False def write_template(seq_list, run_list, template_file, output_dir): """ - Read the appropriate entry in a template file and save an updated - copy with the updated run number. + Read the appropriate entry in a template file and save an updated + copy with the updated run number. """ with open(template_file, "r") as fd: xml_str = fd.read() @@ -105,21 +96,21 @@ def write_template(seq_list, run_list, template_file, output_dir): new_data_sets = [] for i in range(len(seq_list)): if len(data_sets) >= seq_list[i]: - data_sets[seq_list[i]-1].data_files = [run_list[i]] - new_data_sets.append(data_sets[seq_list[i]-1]) + data_sets[seq_list[i] - 1].data_files = [run_list[i]] + new_data_sets.append(data_sets[seq_list[i] - 1]) else: print("Too few entries [%s] in template for sequence number %s" % (len(data_sets), seq_list[i])) # Save the template that was used xml_str = reduction_template_reader.to_xml(new_data_sets) - with open(os.path.join(output_dir, 'REF_L_%s_auto_template.xml' % run_list[0]), 'w') as fd: + with open(os.path.join(output_dir, "REF_L_%s_auto_template.xml" % run_list[0]), "w") as fd: fd.write(xml_str) -def reduce_fixed_two_theta(ws, template_file, output_dir, average_overlap=False, - q_summing=False, bck_in_q=False, peak_width=10, offset_from_first=True): - """ - """ +def reduce_fixed_two_theta( + ws, template_file, output_dir, average_overlap=False, q_summing=False, bck_in_q=False, peak_width=10, offset_from_first=True +): + """ """ from . import peak_finding print("\nProcessing: %s" % ws.getRunNumber()) @@ -131,90 +122,92 @@ def reduce_fixed_two_theta(ws, template_file, output_dir, average_overlap=False, sequence_id = ws.getRun().getProperty("sequence_id").value[0] # Theta value that we are aiming for - ths_value = ws.getRun()['ths'].value[0] + ths_value = ws.getRun()["ths"].value[0] # Read template so we can load the direct beam run template_data = template.read_template(template_file, sequence_number) # Load normalization run ws_db = mtd_api.LoadEventNexus("REF_L_%s" % template_data.norm_file) - tthd_value = ws.getRun()['tthd'].value[0] + tthd_value = ws.getRun()["tthd"].value[0] # Look for parameters that might have been determined earlier for this measurement - options_file = os.path.join(output_dir, 'REFL_%s_options.json' % sequence_id) + options_file = os.path.join(output_dir, "REFL_%s_options.json" % sequence_id) if offset_from_first and sequence_number > 1 and os.path.isfile(options_file): - with open(options_file, 'r') as fd: + with open(options_file, "r") as fd: options = json.load(fd) - pixel_offset = options['pixel_offset'] - tthd_calibration = options['tthd_db'] - twotheta = 2*ths_value + options['twotheta_offset'] + pixel_offset = options["pixel_offset"] + options["tthd_db"] + twotheta = 2 * ths_value + options["twotheta_offset"] else: # Fit direct beam position - x_min=template_data.norm_peak_range[0]-25 - x_max=template_data.norm_peak_range[1]+25 + x_min = template_data.norm_peak_range[0] - 25 + x_max = template_data.norm_peak_range[1] + 25 tof, _x, _y = peak_finding.process_data(ws_db, summed=True, tof_step=200) peak_center = np.argmax(_y) db_center, db_width, _ = peak_finding.fit_signal_flat_bck(_x, _y, x_min=x_min, x_max=x_max, center=peak_center) - print(" DB center: %g\t Width: %g from [%g %g]" % (db_center, db_width, - template_data.norm_peak_range[0], - template_data.norm_peak_range[1])) + print( + " DB center: %g\t Width: %g from [%g %g]" + % (db_center, db_width, template_data.norm_peak_range[0], template_data.norm_peak_range[1]) + ) # Fit the reflected beam position - x_min=template_data.data_peak_range[0]-peak_width - x_max=template_data.data_peak_range[1]+peak_width + x_min = template_data.data_peak_range[0] - peak_width + x_max = template_data.data_peak_range[1] + peak_width tof, _x, _y = peak_finding.process_data(ws, summed=True, tof_step=200) peak_center = np.argmax(_y) sc_center, sc_width, _ = peak_finding.fit_signal_flat_bck(_x, _y, x_min=x_min, x_max=x_max, center=peak_center) - pixel_offset = sc_center - (template_data.data_peak_range[1] + template_data.data_peak_range[0])/2.0 + pixel_offset = sc_center - (template_data.data_peak_range[1] + template_data.data_peak_range[0]) / 2.0 print(" SC center: %g\t Width: %g" % (sc_center, sc_width)) pixel_width = float(ws.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0 sample_det_distance = event_reduction.EventReflectivity.DEFAULT_4B_SAMPLE_DET_DISTANCE - twotheta = np.arctan((db_center-sc_center)*pixel_width / sample_det_distance) / 2.0 * 180 / np.pi + twotheta = np.arctan((db_center - sc_center) * pixel_width / sample_det_distance) / 2.0 * 180 / np.pi # Store the tthd of the direct beam and account for the fact that it may be # different from our reflected beam for this calibration data. # This will allow us to be compatible with both fixed and moving detector arm. - tthd_db = ws_db.getRun()['tthd'].value[0] + tthd_db = ws_db.getRun()["tthd"].value[0] twotheta = twotheta + tthd_value - tthd_db # If this is the first angle, keep the value for later - options = dict(twotheta_offset = twotheta - 2*ths_value, - pixel_offset = pixel_offset, - tthd_db = tthd_db, tthd_sc = tthd_value) - with open(options_file, 'w') as fp: + options = dict(twotheta_offset=twotheta - 2 * ths_value, pixel_offset=pixel_offset, tthd_db=tthd_db, tthd_sc=tthd_value) + with open(options_file, "w") as fp: json.dump(options, fp) print(" Two-theta = %g" % twotheta) # Modify the template with the fitted results - print(" Template peak: [%g %g]" % (template_data.data_peak_range[0], - template_data.data_peak_range[1])) + print(" Template peak: [%g %g]" % (template_data.data_peak_range[0], template_data.data_peak_range[1])) template_data.data_peak_range = np.rint(np.asarray(template_data.data_peak_range) + pixel_offset).astype(int) template_data.background_roi = np.rint(np.asarray(template_data.background_roi) + pixel_offset).astype(int) - print(" New peak: [%g %g]" % (template_data.data_peak_range[0], - template_data.data_peak_range[1])) - print(" New bck: [%g %g]" % (template_data.background_roi[0], - template_data.background_roi[1])) + print(" New peak: [%g %g]" % (template_data.data_peak_range[0], template_data.data_peak_range[1])) + print(" New bck: [%g %g]" % (template_data.background_roi[0], template_data.background_roi[1])) # Call the reduction using the template - qz_mid, refl, d_refl, meta_data = template.process_from_template_ws(ws, template_data, - q_summing=q_summing, - tof_weighted=q_summing, - bck_in_q=bck_in_q, info=True, - theta_value = twotheta/2.0, - ws_db = ws_db) + qz_mid, refl, d_refl, meta_data = template.process_from_template_ws( + ws, + template_data, + q_summing=q_summing, + tof_weighted=q_summing, + bck_in_q=bck_in_q, + info=True, + theta_value=twotheta / 2.0, + ws_db=ws_db, + ) # Save partial results coll = output.RunCollection() coll.add(qz_mid, refl, d_refl, meta_data=meta_data) - coll.save_ascii(os.path.join(output_dir, 'REFL_%s_%s_%s_partial.txt' % (meta_data['sequence_id'], - meta_data['sequence_number'], - meta_data['run_number'])), - meta_as_json=True) + coll.save_ascii( + os.path.join( + output_dir, "REFL_%s_%s_%s_partial.txt" % (meta_data["sequence_id"], meta_data["sequence_number"], meta_data["run_number"]) + ), + meta_as_json=True, + ) # Assemble partial results into a single R(q) - seq_list, run_list = assemble_results(meta_data['sequence_id'], output_dir, average_overlap) + seq_list, run_list = assemble_results(meta_data["sequence_id"], output_dir, average_overlap) # Save template write_template(seq_list, run_list, template_file, output_dir) @@ -222,23 +215,23 @@ def reduce_fixed_two_theta(ws, template_file, output_dir, average_overlap=False, # Return the sequence identifier return run_list[0] + def reduce_explorer(ws, ws_db, theta_pv=None, center_pixel=145, db_center_pixel=145, peak_width=10): - """ - """ + """ """ from . import peak_finding if theta_pv is None: - if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': - theta_pv = 'thi' + if ws.getRun().getProperty("BL4B:CS:ExpPl:OperatingMode").value[0] == "Free Liquid": + theta_pv = "thi" else: - theta_pv = 'ths' + theta_pv = "ths" print("\nProcessing: %s" % ws.getRunNumber()) # Theta value that we are aiming for theta_value = np.fabs(ws.getRun()[theta_pv].value[0]) # Load normalization run - tthd_value = ws.getRun()['tthd'].value[0] + tthd_value = ws.getRun()["tthd"].value[0] # Fit direct beam position x_min = center_pixel - 25 @@ -249,8 +242,8 @@ def reduce_explorer(ws, ws_db, theta_pv=None, center_pixel=145, db_center_pixel= print(" DB center: %g\t Width: %g" % (db_center, db_width)) # Fit the reflected beam position - x_min=db_center_pixel-peak_width - x_max=db_center_pixel+peak_width + x_min = db_center_pixel - peak_width + x_max = db_center_pixel + peak_width tof, _x, _y = peak_finding.process_data(ws, summed=True, tof_step=200) peak_center = np.argmax(_y) sc_center, sc_width, _ = peak_finding.fit_signal_flat_bck(_x, _y, x_min=x_min, x_max=x_max, center=peak_center) @@ -258,40 +251,46 @@ def reduce_explorer(ws, ws_db, theta_pv=None, center_pixel=145, db_center_pixel= pixel_width = float(ws.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0 sample_det_distance = event_reduction.EventReflectivity.DEFAULT_4B_SAMPLE_DET_DISTANCE - twotheta = np.arctan((db_center-sc_center)*pixel_width / sample_det_distance) / 2.0 * 180 / np.pi + twotheta = np.arctan((db_center - sc_center) * pixel_width / sample_det_distance) / 2.0 * 180 / np.pi # Store the tthd of the direct beam and account for the fact that it may be # different from our reflected beam for this calibration data. # This will allow us to be compatible with both fixed and moving detector arm. - tthd_db = ws_db.getRun()['tthd'].value[0] + tthd_db = ws_db.getRun()["tthd"].value[0] twotheta = twotheta + tthd_value - tthd_db print(" Theta = %g Two-theta = %g" % (theta_value, twotheta)) # Perform the reduction width_mult = 2.5 - peak = [np.rint(sc_center - width_mult*sc_width).astype(int), np.rint(sc_center + width_mult*sc_width).astype(int)] - norm_peak = [np.rint(db_center - width_mult*db_width).astype(int), np.rint(db_center + width_mult*db_width).astype(int)] - peak_bck = [peak[0]-3, peak[1]+3] - norm_bck = [norm_peak[0]-3, norm_peak[1]+3] + peak = [np.rint(sc_center - width_mult * sc_width).astype(int), np.rint(sc_center + width_mult * sc_width).astype(int)] + norm_peak = [np.rint(db_center - width_mult * db_width).astype(int), np.rint(db_center + width_mult * db_width).astype(int)] + peak_bck = [peak[0] - 3, peak[1] + 3] + norm_bck = [norm_peak[0] - 3, norm_peak[1] + 3] tof_min = ws.getTofMin() tof_max = ws.getTofMax() - theta = theta_value * np.pi / 180. - - event_refl = event_reduction.EventReflectivity(ws, ws_db, - signal_peak=peak, signal_bck=peak_bck, - norm_peak=norm_peak, norm_bck=norm_bck, - specular_pixel=sc_center.value, - signal_low_res=[65,180], norm_low_res=[65,180], - q_min=None, q_max=None, - tof_range = [tof_min, tof_max], - theta=theta) + theta = theta_value * np.pi / 180.0 + + event_refl = event_reduction.EventReflectivity( + ws, + ws_db, + signal_peak=peak, + signal_bck=peak_bck, + norm_peak=norm_peak, + norm_bck=norm_bck, + specular_pixel=sc_center.value, + signal_low_res=[65, 180], + norm_low_res=[65, 180], + q_min=None, + q_max=None, + tof_range=[tof_min, tof_max], + theta=theta, + ) # R(Q) - qz, refl, d_refl = event_refl.specular(q_summing=False, tof_weighted=False, - bck_in_q=False, clean=False, normalize=True) - qz_mid = (qz[:-1] + qz[1:])/2.0 + qz, refl, d_refl = event_refl.specular(q_summing=False, tof_weighted=False, bck_in_q=False, clean=False, normalize=True) + qz_mid = (qz[:-1] + qz[1:]) / 2.0 return qz_mid, refl, d_refl diff --git a/reduction/notebooks/divergent-beam.ipynb b/reduction/notebooks/divergent-beam.ipynb index 22bf689..af5e363 100644 --- a/reduction/notebooks/divergent-beam.ipynb +++ b/reduction/notebooks/divergent-beam.ipynb @@ -10,6 +10,7 @@ { "cell_type": "code", "execution_count": 1, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:00:28.458625Z", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": 4, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:00:33.706114Z", @@ -64,12 +66,14 @@ "source": [ "import mantid\n", "import mantid.simpleapi as api\n", + "\n", "mantid.kernel.config.setLogLevel(3)" ] }, { "cell_type": "code", "execution_count": 5, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:00:35.975239Z", @@ -99,6 +103,7 @@ { "cell_type": "code", "execution_count": 6, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:00:36.538855Z", @@ -112,13 +117,14 @@ "outputs": [], "source": [ "import importlib\n", - "from lr_reduction import event_reduction\n", - "from lr_reduction import template\n" + "\n", + "from lr_reduction import event_reduction, template\n" ] }, { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] @@ -126,6 +132,7 @@ { "cell_type": "code", "execution_count": 8, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:01:44.413450Z", @@ -228,6 +235,7 @@ { "cell_type": "code", "execution_count": 9, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:01:55.047230Z", @@ -240,8 +248,8 @@ }, "outputs": [], "source": [ - "import lmfit\n", - "from lmfit.models import GaussianModel, LinearModel, ConstantModel, RectangleModel, QuadraticModel\n", + "from lmfit.models import GaussianModel, LinearModel, QuadraticModel, RectangleModel\n", + "\n", "\n", "def process_data(workspace, summed=True, tof_step=200):\n", " tof_min = workspace.getTofMin()\n", @@ -253,7 +261,7 @@ " tof=_ws.extractX()[0]\n", " tof = (tof[:-1]+tof[1:])/2.0\n", "\n", - " \n", + "\n", " if summed:\n", " y = np.sum(y, axis=2)\n", "\n", @@ -264,8 +272,8 @@ "def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None):\n", " gauss = GaussianModel(prefix='g_')\n", " linear = LinearModel(prefix='l_')\n", - " quadratic = QuadraticModel(prefix='q_')\n", - " rectangular = RectangleModel(prefix='r_')\n", + " QuadraticModel(prefix='q_')\n", + " RectangleModel(prefix='r_')\n", "\n", " amplitude_guess = np.max(y[x_min:x_max])\n", "\n", @@ -275,7 +283,7 @@ " _center = center\n", " if sigma is not None:\n", " _sigma = sigma\n", - " \n", + "\n", " pars = gauss.make_params(amplitude=amplitude_guess, center=_center, sigma=_sigma)\n", " pars.update(linear.make_params(a=0, b=0))\n", "\n", @@ -286,17 +294,17 @@ " pars['g_amplitude'].min=0\n", " pars['g_center'].min=_center-2\n", " pars['g_center'].max=_center+2\n", - " \n", + "\n", " weights=1/np.sqrt(y)\n", " weights[y<1]=1\n", - " \n", + "\n", " model = gauss + linear\n", " fit = model.fit(y[x_min:x_max], pars, method='leastsq',\n", - " x=x[x_min:x_max], \n", + " x=x[x_min:x_max],\n", " weights=1/weights[x_min:x_max])\n", " #print(fit.fit_report())\n", "\n", - " a=fit.params['g_amplitude']\n", + " fit.params['g_amplitude']\n", " c=fit.params['g_center']\n", " width=fit.params['g_sigma']\n", " #print(\"Gaussian: \\t %5.4g +- %5.4g \\t %3.3g +- %3.3g \\t %3.3g +- %3.3g\" % (a.value, a.stderr, c.value, c.stderr, width.value, width.stderr))\n", @@ -308,6 +316,7 @@ { "cell_type": "code", "execution_count": 10, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:02:02.929372Z", @@ -379,6 +388,7 @@ { "cell_type": "code", "execution_count": 19, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:15:10.910539Z", @@ -468,7 +478,7 @@ "\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -479,6 +489,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] diff --git a/reduction/notebooks/example.ipynb b/reduction/notebooks/example.ipynb index 7205488..c01564f 100644 --- a/reduction/notebooks/example.ipynb +++ b/reduction/notebooks/example.ipynb @@ -10,6 +10,7 @@ { "cell_type": "code", "execution_count": 1, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:45:58.324725Z", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": 3, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:46:02.805840Z", @@ -64,12 +66,14 @@ "source": [ "import mantid\n", "import mantid.simpleapi as api\n", + "\n", "mantid.kernel.config.setLogLevel(3)" ] }, { "cell_type": "code", "execution_count": 4, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:46:03.530155Z", @@ -99,6 +103,7 @@ { "cell_type": "code", "execution_count": 5, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:46:03.994077Z", @@ -112,8 +117,8 @@ "outputs": [], "source": [ "import importlib\n", - "from lr_reduction import event_reduction\n", - "from lr_reduction import template\n" + "\n", + "from lr_reduction import event_reduction, template\n" ] }, { @@ -126,6 +131,7 @@ { "cell_type": "code", "execution_count": 6, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:46:08.610432Z", @@ -245,9 +251,9 @@ " qz_mid = qz_mid[idx][pre_cut:-post_cut]\n", " refl = refl[idx][pre_cut:-post_cut]\n", " d_refl = d_refl[idx][pre_cut:-post_cut]\n", - " \n", + "\n", " plt.errorbar(qz_mid, refl, yerr=d_refl, markersize=4, marker='.', linestyle='', label=run_number)\n", - " \n", + "\n", " for i in range(len(qz_mid)-1, -1, -1):\n", " qz_all.append(qz_mid[i])\n", " refl_all.append(refl[i])\n", @@ -264,7 +270,7 @@ "\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -278,6 +284,7 @@ { "cell_type": "code", "execution_count": 7, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:46:37.527755Z", @@ -361,7 +368,7 @@ "plt.errorbar(_data[0], _data[1], yerr=_data[2], markersize=4, marker='', linestyle='-', label='reference')\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -372,8 +379,8 @@ " fig, ax = plt.subplots(figsize=(10,5))\n", " plt.plot(qz_all, delta)\n", "\n", - " plt.xlabel('q [$1/\\AA$]')\n", - " plt.ylabel('$\\Delta$R/R [%]')\n", + " plt.xlabel(r'q [$1/\\AA$]')\n", + " plt.ylabel(r'$\\Delta$R/R [%]')\n", " ax.set_yscale('linear')\n", " ax.set_xscale('log')\n", " plt.show()\n", @@ -387,6 +394,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] diff --git a/reduction/notebooks/time-resolved.ipynb b/reduction/notebooks/time-resolved.ipynb index c071e94..0939128 100644 --- a/reduction/notebooks/time-resolved.ipynb +++ b/reduction/notebooks/time-resolved.ipynb @@ -10,6 +10,7 @@ { "cell_type": "code", "execution_count": 1, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-10T13:56:17.335713Z", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": 3, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-10T13:56:24.934094Z", @@ -63,13 +65,14 @@ ], "source": [ "import mantid\n", - "import mantid.simpleapi as api\n", + "\n", "mantid.kernel.config.setLogLevel(3)" ] }, { "cell_type": "code", "execution_count": 4, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-10T13:56:26.198398Z", @@ -99,6 +102,7 @@ { "cell_type": "code", "execution_count": 5, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-10T13:56:29.042078Z", @@ -110,18 +114,12 @@ "tags": [] }, "outputs": [], - "source": [ - "import importlib\n", - "from lr_reduction import workflow\n", - "from lr_reduction import template\n", - "from lr_reduction import output\n", - "from lr_reduction import event_reduction\n", - "from lr_reduction import reduction_template_reader" - ] + "source": [] }, { "cell_type": "code", "execution_count": 25, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-11T17:06:01.723461Z", @@ -278,7 +276,7 @@ " plt.errorbar(data_old[0], data_old[1], yerr=data_old[2], markersize=4, marker='', linestyle='-', label=f)\n", "\n", " plt.legend()\n", - " plt.xlabel('q [$1/\\AA$]')\n", + " plt.xlabel(r'q [$1/\\AA$]')\n", " plt.ylabel('R(q)')\n", " ax.set_yscale('log')\n", " ax.set_xscale('log')\n", @@ -288,6 +286,7 @@ { "cell_type": "code", "execution_count": 35, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-01-12T15:46:21.754507Z", diff --git a/reduction/notebooks/workflow-fixed-tthd.ipynb b/reduction/notebooks/workflow-fixed-tthd.ipynb index 0f31ae9..75a6a15 100644 --- a/reduction/notebooks/workflow-fixed-tthd.ipynb +++ b/reduction/notebooks/workflow-fixed-tthd.ipynb @@ -10,6 +10,7 @@ { "cell_type": "code", "execution_count": 1, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:18:48.078572Z", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": 3, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:18:51.168792Z", @@ -64,12 +66,14 @@ "source": [ "import mantid\n", "import mantid.simpleapi as api\n", + "\n", "mantid.kernel.config.setLogLevel(3)" ] }, { "cell_type": "code", "execution_count": 4, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:18:51.758890Z", @@ -99,6 +103,7 @@ { "cell_type": "code", "execution_count": 5, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:18:52.225802Z", @@ -112,17 +117,14 @@ "outputs": [], "source": [ "import importlib\n", - "from lr_reduction import workflow\n", - "from lr_reduction import template\n", - "from lr_reduction import output\n", - "from lr_reduction import event_reduction\n", - "from lr_reduction import reduction_template_reader\n", - "from lr_reduction import peak_finding" + "\n", + "from lr_reduction import event_reduction, output, peak_finding, template, workflow" ] }, { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] @@ -130,6 +132,7 @@ { "cell_type": "code", "execution_count": 6, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:18:53.365861Z", @@ -264,7 +267,7 @@ "\n", " seq += 1\n", "\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -274,6 +277,7 @@ { "cell_type": "code", "execution_count": 7, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-14T21:19:17.943373Z", @@ -325,7 +329,7 @@ "plt.errorbar(_data[0], _data[1]*_data[0]**4, yerr=_data[2]*_data[0]**4, markersize=4, marker='', linestyle='-', label='reference')\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -336,6 +340,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] @@ -343,6 +348,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] diff --git a/reduction/notebooks/workflow.ipynb b/reduction/notebooks/workflow.ipynb index 6c6bf06..b1ee6f2 100644 --- a/reduction/notebooks/workflow.ipynb +++ b/reduction/notebooks/workflow.ipynb @@ -10,6 +10,7 @@ { "cell_type": "code", "execution_count": 1, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:43:50.969336Z", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": 3, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:43:57.913360Z", @@ -64,12 +66,14 @@ "source": [ "import mantid\n", "import mantid.simpleapi as api\n", + "\n", "mantid.kernel.config.setLogLevel(3)" ] }, { "cell_type": "code", "execution_count": 4, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:43:58.743926Z", @@ -99,6 +103,7 @@ { "cell_type": "code", "execution_count": 5, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:44:00.048247Z", @@ -112,16 +117,14 @@ "outputs": [], "source": [ "import importlib\n", - "from lr_reduction import workflow\n", - "from lr_reduction import template\n", - "from lr_reduction import output\n", - "from lr_reduction import event_reduction\n", - "from lr_reduction import reduction_template_reader" + "\n", + "from lr_reduction import event_reduction, output, reduction_template_reader, template, workflow" ] }, { "cell_type": "code", "execution_count": 6, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:44:00.713235Z", @@ -172,6 +175,7 @@ { "cell_type": "code", "execution_count": 7, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:44:50.040080Z", @@ -258,7 +262,7 @@ "plt.errorbar(_data[0], _data[1]*_data[0]**4, yerr=_data[2]*_data[0]**4, markersize=4, marker='', linestyle='-', label='reference')\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -270,8 +274,8 @@ " plt.plot(_refl[0], _refl[3]/_refl[0], label=\"new_reduction\")\n", " plt.plot(_data[0], _data[3]/_data[0], label=\"reference\")\n", "\n", - " plt.xlabel('q [$1/\\AA$]')\n", - " plt.ylabel('$\\Delta q$')\n", + " plt.xlabel(r'q [$1/\\AA$]')\n", + " plt.ylabel(r'$\\Delta q$')\n", " ax.set_yscale('linear')\n", " ax.set_xscale('log')\n", " plt.show()\n", @@ -285,6 +289,7 @@ { "cell_type": "code", "execution_count": 8, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-23T14:44:56.719174Z", @@ -347,7 +352,7 @@ " data_sets = reduction_template_reader.from_xml(xml_str)\n", "\n", " # Left background\n", - " for i in range(8): \n", + " for i in range(8):\n", " print(\"Peak: %s\" % data_sets[i].data_peak_range)\n", " data_sets[i].background_roi[0] = data_sets[i].data_peak_range[0]-5\n", " data_sets[i].background_roi[1] = data_sets[i].data_peak_range[0]\n", @@ -357,7 +362,7 @@ " fd.write(xml_str)\n", "\n", " # Right background\n", - " for i in range(8): \n", + " for i in range(8):\n", " data_sets[i].background_roi[0] = data_sets[i].data_peak_range[1]\n", " data_sets[i].background_roi[1] = data_sets[i].data_peak_range[1]+5\n", "\n", @@ -366,7 +371,7 @@ " fd.write(xml_str)\n", "\n", " # Invalid background\n", - " for i in range(8): \n", + " for i in range(8):\n", " data_sets[i].background_roi[0] = data_sets[i].data_peak_range[0]+1\n", " data_sets[i].background_roi[1] = data_sets[i].data_peak_range[1]-1\n", "\n", @@ -389,6 +394,7 @@ { "cell_type": "code", "execution_count": 12, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-03T20:56:59.629456Z", @@ -465,6 +471,7 @@ { "cell_type": "code", "execution_count": 13, + "id": null, "metadata": { "execution": { "iopub.execute_input": "2023-02-03T20:57:28.354196Z", @@ -517,7 +524,7 @@ "plt.errorbar(_data[0], _data[1], yerr=_data[2], markersize=4, marker='', linestyle='-', label='old')\n", "\n", "plt.legend()\n", - "plt.xlabel('q [$1/\\AA$]')\n", + "plt.xlabel(r'q [$1/\\AA$]')\n", "plt.ylabel('R(q)')\n", "ax.set_yscale('log')\n", "ax.set_xscale('log')\n", @@ -527,6 +534,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] @@ -534,6 +542,7 @@ { "cell_type": "code", "execution_count": null, + "id": null, "metadata": {}, "outputs": [], "source": [] diff --git a/reduction/test/test_reduction.py b/reduction/test/test_reduction.py index d1465c2..2d9cd0a 100644 --- a/reduction/test/test_reduction.py +++ b/reduction/test/test_reduction.py @@ -1,30 +1,27 @@ import os -import pytest -import warnings import sys -import numpy as np import mantid import mantid.simpleapi as mtd_api +import numpy as np + mantid.kernel.config.setLogLevel(3) -from lr_reduction import template -from lr_reduction import event_reduction -from lr_reduction import workflow +from lr_reduction import event_reduction, template, workflow def test_full_reduction(): """ - Test the fill reduction chain + Test the fill reduction chain """ - template_path = 'reduction/data/template.xml' + template_path = "reduction/data/template.xml" qz_all = [] refl_all = [] d_refl_all = [] first_run = None print(os.getcwd()) - #os.chdir('reduction/') + # os.chdir('reduction/') print(sys.path) for run_number in range(198409, 198417): ws_sc = mtd_api.Load("REF_L_%s" % run_number) @@ -48,25 +45,24 @@ def test_full_reduction(): refl_all = np.take_along_axis(refl_all, idx, axis=None) d_refl_all = np.take_along_axis(d_refl_all, idx, axis=None) - #assert(resolution == 0.02785205863936946) - ref_data = np.loadtxt('data/reference_rq.txt').T - assert(len(ref_data[1]) == len(refl_all)) - assert(np.fabs(np.sum(ref_data[1]-refl_all)) < 1e-10) + # assert(resolution == 0.02785205863936946) + ref_data = np.loadtxt("data/reference_rq.txt").T + assert len(ref_data[1]) == len(refl_all) + assert np.fabs(np.sum(ref_data[1] - refl_all)) < 1e-10 def test_reduce_workflow(): - template_path = 'reduction/data/template.xml' - output_dir = '/tmp' - reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt') + template_path = "reduction/data/template.xml" + output_dir = "/tmp" + reduced_path = os.path.join(output_dir, "REFL_198409_combined_data_auto.txt") if os.path.exists(reduced_path): os.remove(reduced_path) for i in range(198409, 198417): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_rq.txt' + reference_path = "data/reference_rq.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -74,29 +70,28 @@ def test_reduce_workflow(): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 def test_reduce_workflow_201282(): """ - Test to reproduce autoreduction output + Test to reproduce autoreduction output """ - template_path = 'reduction/data/template_201282.xml' - output_dir = '/tmp' - reduced_path = os.path.join(output_dir, 'REFL_201282_combined_data_auto.txt') - if os.path.exists(reduced_path): + template_path = "reduction/data/template_201282.xml" + output_dir = "/tmp" + reduced_path = os.path.join(output_dir, "REFL_201282_combined_data_auto.txt") + if os.path.exists(reduced_path): os.remove(reduced_path) for i in range(201282, 201289): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'reduction/data/reference_rq_201282.txt' + reference_path = "reduction/data/reference_rq_201282.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -104,29 +99,28 @@ def test_reduce_workflow_201282(): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 def test_background_subtraction(): """ - Test with background subtraction off for the data and on for the normalization + Test with background subtraction off for the data and on for the normalization """ - template_path = 'reduction/data/template_short_nobck.xml' - output_dir = '/tmp' - reduced_path = os.path.join(output_dir, 'REFL_198382_combined_data_auto.txt') + template_path = "reduction/data/template_short_nobck.xml" + output_dir = "/tmp" + reduced_path = os.path.join(output_dir, "REFL_198382_combined_data_auto.txt") if os.path.exists(reduced_path): os.remove(reduced_path) for i in range(198388, 198390): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'reduction/data/reference_short_nobck.txt' + reference_path = "reduction/data/reference_short_nobck.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -134,8 +128,8 @@ def test_background_subtraction(): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 diff --git a/reduction/test/test_time_resolved.py b/reduction/test/test_time_resolved.py index a453d8a..fe39650 100644 --- a/reduction/test/test_time_resolved.py +++ b/reduction/test/test_time_resolved.py @@ -1,23 +1,27 @@ import numpy as np - -from lr_reduction import workflow from lr_reduction import time_resolved def test_reduce_workflow(): """ - Test the time-resolved reduction that uses a measured reference. - It is generally used at 30 Hz but it also works at 60 Hz. + Test the time-resolved reduction that uses a measured reference. + It is generally used at 30 Hz but it also works at 60 Hz. """ - template_path = 'data/template.xml' - output_dir = '/tmp' - reduced_path = 'data/reference_rq_avg_overlap.txt' + template_path = "data/template.xml" + output_dir = "/tmp" + reduced_path = "data/reference_rq_avg_overlap.txt" ref_data = np.loadtxt(reduced_path).T - reduced = time_resolved.reduce_30Hz_slices(198413, 198413, ref_data_60Hz=reduced_path, - template_30Hz=template_path, - time_interval=300, output_dir=output_dir, - scan_index=5, create_plot=False) + reduced = time_resolved.reduce_30Hz_slices( + 198413, + 198413, + ref_data_60Hz=reduced_path, + template_30Hz=template_path, + time_interval=300, + output_dir=output_dir, + scan_index=5, + create_plot=False, + ) q_long = len(ref_data[0]) q_short = len(reduced[0][0]) @@ -28,7 +32,6 @@ def test_reduce_workflow(): n_pts += 1 for k in range(q_short): if np.fabs(reduced[0][0][k] - ref_data[0][i]) < 0.0001: - assert(np.fabs(reduced[0][1][k] - ref_data[1][i]) < 1e-10) + assert np.fabs(reduced[0][1][k] - ref_data[1][i]) < 1e-10 n_match += 1 - assert(n_pts == n_match) - + assert n_pts == n_match diff --git a/scripts/autoreduce/peak_finding.py b/scripts/autoreduce/peak_finding.py index cdb59fb..aa241d6 100644 --- a/scripts/autoreduce/peak_finding.py +++ b/scripts/autoreduce/peak_finding.py @@ -12,18 +12,19 @@ Copyright (c) 2003-2017 SciPy Developers. All rights reserved. """ -from __future__ import division, print_function, absolute_import +from __future__ import absolute_import, division, print_function import math + import numpy as np -__all__ = ['peak_prominences', 'peak_widths', 'find_peaks'] +__all__ = ["peak_prominences", "peak_widths", "find_peaks"] def peak_prominences(x, peaks, wlen=None): """ Calculate the prominence of each peak in a signal. - + .. versionadded:: 1.1.0 References @@ -32,9 +33,9 @@ def peak_prominences(x, peaks, wlen=None): https://en.wikipedia.org/wiki/Topographic_prominence """ # Inner function expects `x` to be C-contiguous - x = np.asarray(x, order='C', dtype=np.float64) + x = np.asarray(x, order="C", dtype=np.float64) if x.ndim != 1: - raise ValueError('`x` must have exactly one dimension') + raise ValueError("`x` must have exactly one dimension") peaks = np.asarray(peaks) if peaks.ndim == 2: @@ -43,7 +44,7 @@ def peak_prominences(x, peaks, wlen=None): # Empty arrays default to np.float64 but are valid input peaks = np.array([], dtype=np.intp) if peaks.ndim != 1: - raise ValueError('`peaks` must have exactly one dimension') + raise ValueError("`peaks` must have exactly one dimension") if wlen is None: wlen = -1 # Inner function expects int -> None == -1 @@ -53,7 +54,7 @@ def peak_prominences(x, peaks, wlen=None): wlen = int(math.ceil(wlen)) else: # Give feedback if wlen has unexpected value - raise ValueError('`wlen` must be at larger than 1, was ' + str(wlen)) + raise ValueError("`wlen` must be at larger than 1, was " + str(wlen)) return _peak_prominences(x, peaks, wlen) @@ -64,9 +65,9 @@ def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): .. versionadded:: 1.1.0 """ # Inner function expects `x` to be C-contiguous - x = np.asarray(x, order='C', dtype=np.float64) + x = np.asarray(x, order="C", dtype=np.float64) if x.ndim != 1: - raise ValueError('`x` must have exactly one dimension') + raise ValueError("`x` must have exactly one dimension") peaks = np.asarray(peaks) if peaks.size == 0: @@ -74,10 +75,10 @@ def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): peaks = np.array([], dtype=np.intp) if peaks.ndim != 1: print(peaks.ndim) - raise ValueError('`peaks` must have exactly one dimension') + raise ValueError("`peaks` must have exactly one dimension") if rel_height < 0.0: - raise ValueError('`rel_height` must be greater or equal to 0.0') + raise ValueError("`rel_height` must be greater or equal to 0.0") if prominence_data is None: # Calculate prominence if not supplied and use wlen if supplied. @@ -99,11 +100,11 @@ def _unpack_condition_args(interval, x, peaks): # Reduce arrays if arrays if isinstance(imin, np.ndarray): if imin.size != x.size: - raise ValueError('array size of lower interval border must match x') + raise ValueError("array size of lower interval border must match x") imin = imin[peaks] if isinstance(imax, np.ndarray): if imax.size != x.size: - raise ValueError('array size of upper interval border must match x') + raise ValueError("array size of upper interval border must match x") imax = imax[peaks] return imin, imax @@ -116,9 +117,9 @@ def _select_by_property(peak_properties, pmin, pmax): """ keep = np.ones(peak_properties.size, dtype=bool) if pmin is not None: - keep &= (pmin <= peak_properties) + keep &= pmin <= peak_properties if pmax is not None: - keep &= (peak_properties <= pmax) + keep &= peak_properties <= pmax return keep @@ -130,21 +131,19 @@ def _select_by_peak_threshold(x, peaks, tmin, tmax): # Stack thresholds on both sides to make min / max operations easier: # tmin is compared with the smaller, and tmax with the greater thresold to # each peak's side - stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], - x[peaks] - x[peaks + 1]]) + stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], x[peaks] - x[peaks + 1]]) keep = np.ones(peaks.size, dtype=bool) if tmin is not None: min_thresholds = np.min(stacked_thresholds, axis=0) - keep &= (tmin <= min_thresholds) + keep &= tmin <= min_thresholds if tmax is not None: max_thresholds = np.max(stacked_thresholds, axis=0) - keep &= (max_thresholds <= tmax) + keep &= max_thresholds <= tmax return keep, stacked_thresholds[0], stacked_thresholds[1] -def find_peaks(x, height=None, threshold=None, distance=None, - prominence=None, width=None, wlen=None, rel_height=0.5): +def find_peaks(x, height=None, threshold=None, distance=None, prominence=None, width=None, wlen=None, rel_height=0.5): """ Find peaks inside a signal based on peak properties. .. versionadded:: 1.1.0 @@ -152,9 +151,9 @@ def find_peaks(x, height=None, threshold=None, distance=None, # _argmaxima1d expects array of dtype 'float64' x = np.asarray(x, dtype=np.float64) if x.ndim != 1: - raise ValueError('`x` must have exactly one dimension') + raise ValueError("`x` must have exactly one dimension") if distance is not None and distance < 1: - raise ValueError('`distance` must be greater or equal to 1') + raise ValueError("`distance` must be greater or equal to 1") peaks = _argmaxima1d(x) properties = {} @@ -170,8 +169,7 @@ def find_peaks(x, height=None, threshold=None, distance=None, if threshold is not None: # Evaluate threshold condition tmin, tmax = _unpack_condition_args(threshold, x, peaks) - keep, left_thresholds, right_thresholds = _select_by_peak_threshold( - x, peaks, tmin, tmax) + keep, left_thresholds, right_thresholds = _select_by_peak_threshold(x, peaks, tmin, tmax) peaks = peaks[keep] properties["left_thresholds"] = left_thresholds properties["right_thresholds"] = right_thresholds @@ -182,37 +180,35 @@ def find_peaks(x, height=None, threshold=None, distance=None, if prominence is not None or width is not None: # Calculate prominence (required for both conditions) - properties.update(zip( - ['prominences', 'left_bases', 'right_bases'], - peak_prominences(x, peaks, wlen=wlen) - )) + properties.update(zip(["prominences", "left_bases", "right_bases"], peak_prominences(x, peaks, wlen=wlen))) if prominence is not None: # Evaluate prominence condition pmin, pmax = _unpack_condition_args(prominence, x, peaks) - keep = _select_by_property(properties['prominences'], pmin, pmax) + keep = _select_by_property(properties["prominences"], pmin, pmax) peaks = peaks[keep] properties = {key: array[keep] for key, array in properties.items()} if width is not None: # Calculate widths - properties.update(zip( - ['widths', 'width_heights', 'left_ips', 'right_ips'], - peak_widths(x, peaks, rel_height, (properties['prominences'], - properties['left_bases'], - properties['right_bases'])) - )) + properties.update( + zip( + ["widths", "width_heights", "left_ips", "right_ips"], + peak_widths(x, peaks, rel_height, (properties["prominences"], properties["left_bases"], properties["right_bases"])), + ) + ) # Evaluate width condition wmin, wmax = _unpack_condition_args(width, x, peaks) - keep = _select_by_property(properties['widths'], wmin, wmax) + keep = _select_by_property(properties["widths"], wmin, wmax) peaks = peaks[keep] properties = {key: array[keep] for key, array in properties.items()} - return np.asarray(peaks), properties + # The following code belongs in _peak_finding_utils ##################################### + def _argmaxima1d(x): """ Find local maxima in a 1D array. @@ -250,12 +246,10 @@ def _argmaxima1d(x): left_edges.resize(m, refcheck=False) right_edges.resize(m, refcheck=False) - return midpoints#, left_edges, right_edges + return midpoints # , left_edges, right_edges -def _peak_prominences(x, - peaks, - wlen): +def _peak_prominences(x, peaks, wlen): """ Calculate the prominence of each peak in a signal. """ @@ -269,8 +263,7 @@ def _peak_prominences(x, i_min = 0 i_max = x.shape[0] - 1 if not i_min <= peak <= i_max: - raise ValueError("peak {} is not a valid index for `x`" - .format(peak)) + raise ValueError("peak {} is not a valid index for `x`".format(peak)) if 2 <= wlen: # Adjust window around the evaluated peak (within bounds); @@ -307,22 +300,15 @@ def _peak_prominences(x, return np.asarray(prominences), left_bases, right_bases -def _peak_widths(x, - peaks, - rel_height, - prominences, - left_bases, - right_bases): +def _peak_widths(x, peaks, rel_height, prominences, left_bases, right_bases): """ Calculate the width of each each peak in a signal. """ if rel_height < 0: - raise ValueError('`rel_height` must be greater or equal to 0.0') - if not (peaks.shape[0] == prominences.shape[0] == left_bases.shape[0] - == right_bases.shape[0]): - raise ValueError("arrays in `prominence_data` must have the same shape " - "as `peaks`") + raise ValueError("`rel_height` must be greater or equal to 0.0") + if not (peaks.shape[0] == prominences.shape[0] == left_bases.shape[0] == right_bases.shape[0]): + raise ValueError("arrays in `prominence_data` must have the same shape " "as `peaks`") show_warning = False widths = np.empty(peaks.shape[0], dtype=np.float64) @@ -336,8 +322,7 @@ def _peak_widths(x, peak = peaks[p] # Validate bounds and order if not 0 <= i_min <= peak <= i_max < x.shape[0]: - raise ValueError("prominence data is invalid for peak {}" - .format(peak)) + raise ValueError("prominence data is invalid for peak {}".format(peak)) height = width_heights[p] = x[peak] - prominences[p] * rel_height # Find intersection point on left side @@ -354,7 +339,7 @@ def _peak_widths(x, while i < i_max and height < x[i]: i += 1 right_ip = i - if x[i] < height: + if x[i] < height: # Interpolate if true intersection height is between samples right_ip -= (height - x[i]) / (x[i - 1] - x[i]) diff --git a/scripts/autoreduce/rebin_ml.py b/scripts/autoreduce/rebin_ml.py index fcf1132..3f1b31b 100644 --- a/scripts/autoreduce/rebin_ml.py +++ b/scripts/autoreduce/rebin_ml.py @@ -1,12 +1,14 @@ # Rebin and smooth the reflectivity curve to prepare it for ML -import sys import os +import sys + from mantid.simpleapi import * + def smooth_and_rebin(file_path, reference, output_file): """ - Pre-process reflectivity data to be used as input to an ML - process to predict a structure. + Pre-process reflectivity data to be used as input to an ML + process to predict a structure. """ # Check that the output directory exists _dir = os.path.dirname(output_file) @@ -15,25 +17,20 @@ def smooth_and_rebin(file_path, reference, output_file): ws = Load(file_path) ws_ref = Load(reference) - + ws_ref = CropWorkspace(InputWorkspace=ws_ref, XMin=0.0085, XMax=0.13) - + ws_smoothed = SmoothData(InputWorkspace=ws, NPoints=8) - + ws_ref = ConvertToHistogram(ws_ref) ws_smoothed = ConvertToHistogram(ws_smoothed) - - - ws_final = RebinToWorkspace(WorkspaceToRebin=ws_smoothed, - WorkspaceToMatch=ws_ref) - + + ws_final = RebinToWorkspace(WorkspaceToRebin=ws_smoothed, WorkspaceToMatch=ws_ref) + ws_final = ConvertToPointData(ws_final) - - SaveAscii(InputWorkspace=ws_final, - Filename=output_file, - WriteXError=False, - WriteSpectrumID=False, - Separator="Space") + + SaveAscii(InputWorkspace=ws_final, Filename=output_file, WriteXError=False, WriteSpectrumID=False, Separator="Space") + if __name__ == "__main__": - smooth_and_rebin(sys.argv[1], sys.argv[2], sys.argv[3]) \ No newline at end of file + smooth_and_rebin(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/scripts/autoreduce/reduce_REF_L.py b/scripts/autoreduce/reduce_REF_L.py index 7c1e299..d85149d 100644 --- a/scripts/autoreduce/reduce_REF_L.py +++ b/scripts/autoreduce/reduce_REF_L.py @@ -6,23 +6,23 @@ Type 2: Zero-attenuator direct beams Type 3: Data that we don't need to treat """ -import sys import os -import time -import numpy as np +import sys import warnings -warnings.simplefilter('ignore') + +import numpy as np + +warnings.simplefilter("ignore") # New reduction code sys.path.append("/SNS/REF_L/shared/reduction") -CONDA_ENV = 'mantid' +CONDA_ENV = "mantid" -import mantid from mantid.simpleapi import * -event_file_path=sys.argv[1] -output_dir=sys.argv[2] +event_file_path = sys.argv[1] +output_dir = sys.argv[2] template_file = None if len(sys.argv) > 4: @@ -30,21 +30,21 @@ avg_overlap = False if len(sys.argv) > 5: - avg_overlap = sys.argv[5].lower() == 'true' + avg_overlap = sys.argv[5].lower() == "true" const_q = False if len(sys.argv) > 6: - const_q = sys.argv[6].lower() == 'true' + const_q = sys.argv[6].lower() == "true" event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 -run_number = event_file.split('_')[2] -run_number = int(run_number.replace('.nxs.h5', '')) +run_number = event_file.split("_")[2] +run_number = int(run_number.replace(".nxs.h5", "")) # The new reduction will be used by default starting in June 2023 old_version = False -if len(sys.argv) > 3 and sys.argv[3] == 'old': +if len(sys.argv) > 3 and sys.argv[3] == "old": old_version = True # Load data for auto-reduction @@ -60,7 +60,7 @@ template_path = os.path.join(output_dir, "template_up.xml") else: template_path = os.path.join(output_dir, "template_down.xml") - + if os.path.isfile(template_path): template_file = template_path elif os.path.isfile(default_template_path): @@ -69,7 +69,7 @@ print("Using template: %s" % template_file) # Check the measurement geometry. This will be useful later -#if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': +# if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': # Determine whether we have to go through the legacy reduction to # process direct beams @@ -79,44 +79,44 @@ if old_version: SetInstrumentParameter(ws, ParameterName="dq-constant", Value="0.0", ParameterType="Number") - output = LRAutoReduction(#Filename=event_file_path, - InputWorkspace=ws, - ScaleToUnity=False, - ScalingWavelengthCutoff=10, - OutputDirectory=output_dir, - SlitTolerance=0.07, - ReadSequenceFromFile=True, - OrderDirectBeamsByRunNumber=True, - TemplateFile=template_file) - first_run_of_set=int(output[1]) + output = LRAutoReduction( # Filename=event_file_path, + InputWorkspace=ws, + ScaleToUnity=False, + ScalingWavelengthCutoff=10, + OutputDirectory=output_dir, + SlitTolerance=0.07, + ReadSequenceFromFile=True, + OrderDirectBeamsByRunNumber=True, + TemplateFile=template_file, + ) + first_run_of_set = int(output[1]) else: print("Average overlap: %s" % avg_overlap) print("Constant-Q binning: %s" % const_q) from lr_reduction import workflow + if True: - first_run_of_set = workflow.reduce(ws, template_file, output_dir, - average_overlap=avg_overlap, - q_summing=const_q, bck_in_q=False) + first_run_of_set = workflow.reduce(ws, template_file, output_dir, average_overlap=avg_overlap, q_summing=const_q, bck_in_q=False) else: - first_run_of_set = workflow.reduce_fixed_two_theta(ws, template_file, output_dir, - average_overlap=avg_overlap, - q_summing=const_q, bck_in_q=False) + first_run_of_set = workflow.reduce_fixed_two_theta( + ws, template_file, output_dir, average_overlap=avg_overlap, q_summing=const_q, bck_in_q=False + ) -#------------------------------------------------------------------------- +# ------------------------------------------------------------------------- # Produce plot for the web monitor # Wait 30 seconds in order to avoid race condition with live reduction -#time.sleep(30) +# time.sleep(30) sequence_id = int(ws.getRun().getProperty("sequence_id").value[0]) sequence_number = int(ws.getRun().getProperty("sequence_number").value[0]) -default_file_name = 'REFL_%s_combined_data_auto.txt' % sequence_id +default_file_name = "REFL_%s_combined_data_auto.txt" % sequence_id default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): # Set flag to announce that the data is available try: ipts = ws.getRun().getProperty("experiment_identifier").value - ipts_number = ipts.split('-')[1] + ipts_number = ipts.split("-")[1] os.system("/SNS/software/nses/bin/confirm-data -s Yes BL-4B %s 1 Auto" % ipts_number) except: logger.notice("Could not set data availability") @@ -128,7 +128,8 @@ try: from postprocessing.publish_plot import plot1d except ImportError: - from finddata.publish_plot import plot1d, _determine_config_file, publish_plot + from finddata.publish_plot import _determine_config_file, plot1d, publish_plot + if _determine_config_file(None) is None: plotting_ready = False @@ -138,23 +139,44 @@ for i in range(0, 10): _id = i + offset _run = sequence_id + i - reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (sequence_id, _id+1, _run) + reduced_file_name = "REFL_%s_%s_%s_auto.nxs" % (sequence_id, _id + 1, _run) reduced_file_path = os.path.join(output_dir, reduced_file_name) - reduced_file_name2 = 'REFL_%s_%s_%s_partial.txt' % (sequence_id, _id+1, _run) + reduced_file_name2 = "REFL_%s_%s_%s_partial.txt" % (sequence_id, _id + 1, _run) reduced_file_path2 = os.path.join(output_dir, reduced_file_name2) if os.path.isfile(reduced_file_path) or os.path.isfile(reduced_file_path2): # Look to see whether submitting the plot is enabled if plotting_ready: - plot1d(_run, [[x, y, dy, dx]], instrument='REF_L', - x_title=u"Q (1/A)", x_log=True, - y_title="Reflectivity", y_log=True, show_dx=False) + plot1d( + _run, + [[x, y, dy, dx]], + instrument="REF_L", + x_title="Q (1/A)", + x_log=True, + y_title="Reflectivity", + y_log=True, + show_dx=False, + ) else: - plot_div = plot1d(_run, [[x, y, dy, dx]], instrument='REF_L', - x_title=u"q (1/A)", x_log=True, - y_title="Reflectivity", y_log=True, show_dx=False, publish=False) - publish_plot('REF_L', _run, files={'file': plot_div}, - config="/SNS/REF_L/shared/.livedata.conf") + plot_div = plot1d( + _run, + [[x, y, dy, dx]], + instrument="REF_L", + x_title="q (1/A)", + x_log=True, + y_title="Reflectivity", + y_log=True, + show_dx=False, + publish=False, + ) + publish_plot("REF_L", _run, files={"file": plot_div}, config="/SNS/REF_L/shared/.livedata.conf") else: - plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', - x_title=u"Q (1/A)", x_log=True, - y_title="Reflectivity", y_log=True, show_dx=False) + plot1d( + run_number, + [[x, y, dy, dx]], + instrument="REF_L", + x_title="Q (1/A)", + x_log=True, + y_title="Reflectivity", + y_log=True, + show_dx=False, + ) diff --git a/scripts/autoreduce/sf_calculator.py b/scripts/autoreduce/sf_calculator.py index 24bdcf7..3743d0c 100644 --- a/scripts/autoreduce/sf_calculator.py +++ b/scripts/autoreduce/sf_calculator.py @@ -1,27 +1,27 @@ """ Scaling factor calculation for automated reduction """ -from mantid.simpleapi import * -import numpy as np +import functools +import numpy as np from mantid.api import * from mantid.kernel import * -import functools - +from mantid.simpleapi import * THI_TOLERANCE = 0.002 class CompareTwoNXSDataForSFcalculator(object): """ - will return -1, 0 or 1 according to the position of the nexusToPosition in relation to the - nexusToCompareWith based on the following criteria - #1: number of attenuators (ascending order) - #2: lambda requested (descending order) - #3: S2W (ascending order) - #4: S2H (descending order) - #5 if everything up to this point is identical, return 0 + will return -1, 0 or 1 according to the position of the nexusToPosition in relation to the + nexusToCompareWith based on the following criteria + #1: number of attenuators (ascending order) + #2: lambda requested (descending order) + #3: S2W (ascending order) + #4: S2H (descending order) + #5 if everything up to this point is identical, return 0 """ + nexusToCompareWithRun = None nexusToPositionRun = None resultComparison = 0 @@ -30,32 +30,32 @@ def __init__(self, nxsdataToCompareWith, nxsdataToPosition): self.nexusToCompareWithRun = nxsdataToCompareWith.getRun() self.nexusToPositionRun = nxsdataToPosition.getRun() - compare = self.compareParameter('LambdaRequest', 'descending') + compare = self.compareParameter("LambdaRequest", "descending") if compare != 0: self.resultComparison = compare return - compare = self.compareParameter('thi', 'descending', tolerance=THI_TOLERANCE) + compare = self.compareParameter("thi", "descending", tolerance=THI_TOLERANCE) if compare != 0: self.resultComparison = compare return - compare = self.compareParameter('vAtt', 'ascending') + compare = self.compareParameter("vAtt", "ascending") if compare != 0: self.resultComparison = compare return - pcharge1 = self.nexusToCompareWithRun.getProperty('gd_prtn_chrg').value/nxsdataToCompareWith.getNEvents() - pcharge2 = self.nexusToPositionRun.getProperty('gd_prtn_chrg').value/nxsdataToPosition.getNEvents() + pcharge1 = self.nexusToCompareWithRun.getProperty("gd_prtn_chrg").value / nxsdataToCompareWith.getNEvents() + pcharge2 = self.nexusToPositionRun.getProperty("gd_prtn_chrg").value / nxsdataToPosition.getNEvents() self.resultComparison = -1 if pcharge1 < pcharge2 else 1 def compareParameter(self, param, order, tolerance=0.0): """ - Compare parameters for the two runs - :param string param: name of the parameter to compare - :param string order: ascending or descending - :param float tolerance: tolerance to apply to the comparison [optional] + Compare parameters for the two runs + :param string param: name of the parameter to compare + :param string order: ascending or descending + :param float tolerance: tolerance to apply to the comparison [optional] """ _nexusToCompareWithRun = self.nexusToCompareWithRun _nexusToPositionRun = self.nexusToPositionRun @@ -66,7 +66,7 @@ def compareParameter(self, param, order, tolerance=0.0): if abs(_paramNexusToPosition - _paramNexusToCompareWith) <= tolerance: return 0 - if order == 'ascending': + if order == "ascending": resultLessThan = -1 resultMoreThan = 1 else: @@ -86,15 +86,13 @@ def result(self): def sorter_function(r1, r2): """ - Sorter function used by with the 'sorted' call to sort the direct beams. + Sorter function used by with the 'sorted' call to sort the direct beams. """ return CompareTwoNXSDataForSFcalculator(r2, r1).result() class ScalingFactor(object): - - def __init__(self, run_list, sort_by_runs=True, sf_file='test.cfg', tof_step=200, - medium='Si', slit_tolerance=0.06, dry_run=False): + def __init__(self, run_list, sort_by_runs=True, sf_file="test.cfg", tof_step=200, medium="Si", slit_tolerance=0.06, dry_run=False): self._run_list = run_list self._sort_by_runs = sort_by_runs self._sf_file = sf_file @@ -122,9 +120,9 @@ def execute(self): # Compute the scaling factors if requested self._compute_scaling_factors(lr_data_sorted) - def find_peak(self, ws, crop=25, factor=1.): + def find_peak(self, ws, crop=25, factor=1.0): """ - Find peak in y using Mantid's peak finder + Find peak in y using Mantid's peak finder """ # Sum detector counts into 1D y = ws.extractY() @@ -143,15 +141,17 @@ def find_peak(self, ws, crop=25, factor=1.): peak_ws_name = "__peaks" # FitPeaks returns [OutputWorkspace, FittedPeaksWorkspace, OutputPeakParametersWorkspace] - _peak_ws = FitPeaks(InputWorkspace=_data_ws, - OutputWorkspace=peak_ws_name, - PeakCenters=f'{max_index}', - FitWindowBoundaryList=f'{crop},{signal.shape[0]-crop}', - HighBackground=False, - ConstrainPeakPositions=False, - FittedPeaksWorkspace=model_ws_name, - OutputPeakParametersWorkspace=param_ws_name, - RawPeakParameters=False) + _peak_ws = FitPeaks( + InputWorkspace=_data_ws, + OutputWorkspace=peak_ws_name, + PeakCenters=f"{max_index}", + FitWindowBoundaryList=f"{crop},{signal.shape[0]-crop}", + HighBackground=False, + ConstrainPeakPositions=False, + FittedPeaksWorkspace=model_ws_name, + OutputPeakParametersWorkspace=param_ws_name, + RawPeakParameters=False, + ) # Retrieve value peak_width = mtd[param_ws_name].cell(0, 3) @@ -166,12 +166,12 @@ def find_peak(self, ws, crop=25, factor=1.): def find_peak_scipy(self, ws): """ - Find the peak in y - TODO: find peak in x + Find the peak in y + TODO: find peak in x """ from scipy.signal import find_peaks, peak_widths - y=ws.extractY() + y = ws.extractY() y = np.reshape(y, (256, 304, y.shape[1])) p_vs_t = np.sum(y, axis=0) @@ -180,24 +180,23 @@ def find_peak_scipy(self, ws): avg = np.average(counts) # Crop pixels on each side where background can create a peak - _crop=25 - peaks, props = find_peaks(counts[_crop:-_crop], - threshold=None, - width=3, - prominence=0.5*avg) + _crop = 25 + peaks, props = find_peaks(counts[_crop:-_crop], threshold=None, width=3, prominence=0.5 * avg) width = peak_widths(counts[_crop:-_crop], peaks, rel_height=0.05) _peak_index = 0 _peak_max = 0 - if len(peaks)>0: + if len(peaks) > 0: for i in range(len(peaks)): - if counts[peaks[i]+_crop] > _peak_max: + if counts[peaks[i] + _crop] > _peak_max: _peak_index = i - _peak_max = counts[peaks[i]+_crop] + _peak_max = counts[peaks[i] + _crop] try: - peak = [np.int(np.floor(peaks[_peak_index]+_crop-2.0*width[0][_peak_index])), - np.int(np.floor(peaks[_peak_index]+_crop+2.0*width[0][_peak_index]))] + peak = [ + np.int(np.floor(peaks[_peak_index] + _crop - 2.0 * width[0][_peak_index])), + np.int(np.floor(peaks[_peak_index] + _crop + 2.0 * width[0][_peak_index])), + ] except: print(counts) print(avg) @@ -206,25 +205,25 @@ def find_peak_scipy(self, ws): raise return peak, [0, 255] - + def _compute_scaling_factors(self, lr_data_sorted): """ - If we need to compute the scaling factors, group the runs by their wavelength request - @param lr_data_sorted: ordered list of workspaces + If we need to compute the scaling factors, group the runs by their wavelength request + @param lr_data_sorted: ordered list of workspaces """ group_list = [] current_group = [] _current_wl = None _current_thi = None for r in lr_data_sorted: - wl_ = r.getRun().getProperty('LambdaRequest').value[0] - thi = r.getRun().getProperty('thi').value[0] + wl_ = r.getRun().getProperty("LambdaRequest").value[0] + thi = r.getRun().getProperty("thi").value[0] - if _current_thi is None or abs(thi-_current_thi)>THI_TOLERANCE or not _current_wl == wl_: + if _current_thi is None or abs(thi - _current_thi) > THI_TOLERANCE or not _current_wl == wl_: # New group _current_wl = wl_ _current_thi = thi - if len(current_group)>0: + if len(current_group) > 0: group_list.append(current_group) current_group = [] @@ -247,19 +246,27 @@ def _compute_scaling_factors(self, lr_data_sorted): print("processing: %g" % run.getRunNumber()) peak, low_res = self.find_peak(run) - att = run.getRun().getProperty('vAtt').value[0]-1 - wl = run.getRun().getProperty('LambdaRequest').value[0] - thi = run.getRun().getProperty('thi').value[0] + att = run.getRun().getProperty("vAtt").value[0] - 1 + wl = run.getRun().getProperty("LambdaRequest").value[0] + thi = run.getRun().getProperty("thi").value[0] direct_beam_runs.append(run.getRunNumber()) peak_ranges.append(int(peak[0])) peak_ranges.append(int(peak[1])) x_ranges.append(int(low_res[0])) x_ranges.append(int(low_res[1])) - bck_ranges.append(int(peak[0])-3) - bck_ranges.append(int(peak[1])+3) - - summary += "%10s wl=%5s thi=%5s att=%s %5s,%5s %5s,%5s\n" % \ - (run.getRunNumber(), wl, thi, att, peak[0], peak[1], low_res[0], low_res[1]) + bck_ranges.append(int(peak[0]) - 3) + bck_ranges.append(int(peak[1]) + 3) + + summary += "%10s wl=%5s thi=%5s att=%s %5s,%5s %5s,%5s\n" % ( + run.getRunNumber(), + wl, + thi, + att, + peak[0], + peak[1], + low_res[0], + low_res[1], + ) # Determine TOF range from first file sample = g[0].getInstrument().getSample() @@ -270,24 +277,27 @@ def _compute_scaling_factors(self, lr_data_sorted): source_detector_distance = source_sample_distance + sample_detector_distance h = 6.626e-34 # m^2 kg s^-1 m = 1.675e-27 # kg - wl = g[0].getRun().getProperty('LambdaRequest').value[0] - chopper_speed = g[0].getRun().getProperty('SpeedRequest1').value[0] + wl = g[0].getRun().getProperty("LambdaRequest").value[0] + chopper_speed = g[0].getRun().getProperty("SpeedRequest1").value[0] wl_offset = 0.0 - tof_min = source_detector_distance / h * m * (wl + wl_offset*60.0/chopper_speed - 1.7*60.0/chopper_speed) * 1e-4 - tof_max = source_detector_distance / h * m * (wl + wl_offset*60.0/chopper_speed + 1.7*60.0/chopper_speed) * 1e-4 + tof_min = source_detector_distance / h * m * (wl + wl_offset * 60.0 / chopper_speed - 1.7 * 60.0 / chopper_speed) * 1e-4 + tof_max = source_detector_distance / h * m * (wl + wl_offset * 60.0 / chopper_speed + 1.7 * 60.0 / chopper_speed) * 1e-4 tof_range = [tof_min, tof_max] summary += " TOF: %s\n\n" % tof_range # Compute the scaling factors if not self._dry_run: - LRScalingFactors(DirectBeamRuns=direct_beam_runs, - TOFRange=tof_range, TOFSteps=self._tof_steps, - SignalPeakPixelRange=peak_ranges, - SignalBackgroundPixelRange=bck_ranges, - LowResolutionPixelRange=x_ranges, - IncidentMedium=self._incident_medium, - SlitTolerance=self._slit_tolerance, - ScalingFactorFile=self._sf_file) + LRScalingFactors( + DirectBeamRuns=direct_beam_runs, + TOFRange=tof_range, + TOFSteps=self._tof_steps, + SignalPeakPixelRange=peak_ranges, + SignalBackgroundPixelRange=bck_ranges, + LowResolutionPixelRange=x_ranges, + IncidentMedium=self._incident_medium, + SlitTolerance=self._slit_tolerance, + ScalingFactorFile=self._sf_file, + ) print(summary) diff --git a/scripts/livereduce/reduce_REF_L_live_post_proc.py b/scripts/livereduce/reduce_REF_L_live_post_proc.py index d8dd9d5..b6baf54 100644 --- a/scripts/livereduce/reduce_REF_L_live_post_proc.py +++ b/scripts/livereduce/reduce_REF_L_live_post_proc.py @@ -1,27 +1,27 @@ -import sys -import os import json +import os +import sys +import time import mantid.simpleapi as mtd_api - import numpy as np -import time sys.path.append("/SNS/REF_L/shared/reduction") from lr_reduction import workflow - DEBUG = True if DEBUG: - logfile = open("/SNS/REF_L/shared/livereduce/LR_live_outer.log", 'a') + logfile = open("/SNS/REF_L/shared/livereduce/LR_live_outer.log", "a") logfile.write("\nStarting post-proc: %s\n" % time.ctime()) + def logthis(msg): if DEBUG: logfile.write(msg) + plotting_ready = True -LIVE_DATA_WS = 'accumulation' +LIVE_DATA_WS = "accumulation" try: from finddata.publish_plot import plot1d, publish_plot @@ -31,16 +31,16 @@ def logthis(msg): def reduction(): """ - Perform reduction on live data + Perform reduction on live data """ ws = mtd_api.mtd[LIVE_DATA_WS] run_number = ws.getRunNumber() ws.getRun().integrateProtonCharge() # Find the template to use - expt = ws.getRun().getProperty('experiment_identifier').value - if len(expt)>0: - output_dir = '/SNS/REF_L/%s/shared/autoreduce' % expt + expt = ws.getRun().getProperty("experiment_identifier").value + if len(expt) > 0: + output_dir = "/SNS/REF_L/%s/shared/autoreduce" % expt logthis("IPTS %s [%s]\n" % (expt, output_dir)) default_template_path = os.path.join(output_dir, "template.xml") @@ -57,23 +57,30 @@ def reduction(): template_file = default_template_path else: logthis("No template found\n") - return '' + return "" - first_run_of_set = workflow.reduce(ws, template_file, - output_dir, - average_overlap=False, q_summing=False, - bck_in_q=False, is_live=True) + first_run_of_set = workflow.reduce( + ws, template_file, output_dir, average_overlap=False, q_summing=False, bck_in_q=False, is_live=True + ) - reduced_data = os.path.join(output_dir, 'REFL_%s_live_estimate.txt' % first_run_of_set) + reduced_data = os.path.join(output_dir, "REFL_%s_live_estimate.txt" % first_run_of_set) r = _data = np.loadtxt(reduced_data).T if plotting_ready: - plot_div = plot1d(run_number, [[r[0], r[1], r[2], r[3]]], instrument='REF_L', - x_title=u"Q (1/A)", x_log=True, - y_title="Reflectivity", y_log=True, show_dx=False, publish=False) + plot_div = plot1d( + run_number, + [[r[0], r[1], r[2], r[3]]], + instrument="REF_L", + x_title="Q (1/A)", + x_log=True, + y_title="Reflectivity", + y_log=True, + show_dx=False, + publish=False, + ) return plot_div else: logthis("No experiment ID\n") - return '' + return "" def time_resolved(): @@ -86,33 +93,31 @@ def time_resolved(): plot_data = [] data_names = [] ws = mtd_api.SumSpectra(LIVE_DATA_WS) - tof = mtd_api.Rebin(ws, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace='tof_') + tof = mtd_api.Rebin(ws, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace="tof_") x = tof.readX(0) - x = (x[1:]+x[:-1])/2.0 + x = (x[1:] + x[:-1]) / 2.0 y = tof.readY(0) time_data = get_live_data(run_number) # If we changed run, we should not use the previous data if len(time_data) == 0: logthis("New run: clearing previous data\n") - if 'previous_data' in mtd_api.mtd: + if "previous_data" in mtd_api.mtd: mtd_api.DeleteWorkspace("previous_data") - if "previous_data" in mtd_api.mtd and len(time_data)>0: + if "previous_data" in mtd_api.mtd and len(time_data) > 0: _previous_data = mtd_api.mtd["previous_data"] _previous_charge = _previous_data.getRun().getProtonCharge() - tof_previous_data = mtd_api.Rebin(_previous_data, - [ws.getTofMin(), 300, ws.getTofMax()], - OutputWorkspace='tof_previous_data_') + tof_previous_data = mtd_api.Rebin(_previous_data, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace="tof_previous_data_") x_prev = tof_previous_data.readX(0) - x_prev = (x_prev[1:]+x_prev[:-1])/2.0 + x_prev = (x_prev[1:] + x_prev[:-1]) / 2.0 y_prev = tof_previous_data.readY(0) - _nevts = int(np.sum(y-y_prev)) - signal = (y-y_prev) / (charge - _previous_charge) + _nevts = int(np.sum(y - y_prev)) + signal = (y - y_prev) / (charge - _previous_charge) plot_data.append([x_prev, signal]) - data_names.append('Last 30s [%s events]' % _nevts) + data_names.append("Last 30s [%s events]" % _nevts) # Append to time data time_data.append([time.time(), [list(x), list(signal)]]) @@ -121,59 +126,69 @@ def time_resolved(): # A minute ago if len(time_data) > 1: plot_data.append(time_data[-2][1]) - data_names.append('Previous 30s') + data_names.append("Previous 30s") # Five minutes if len(time_data) > 11: plot_data.append(time_data[-11][1]) - d_time = int(time.time() - time_data[-11][0]) - data_names.append('5 minutes ago') + int(time.time() - time_data[-11][0]) + data_names.append("5 minutes ago") # Very first 30s that is only for this run if len(time_data) > 1: plot_data.append(time_data[1][1]) first_time = int(time.time() - time_data[1][0]) - data_names.append('First 30s [%gs ago]' % first_time) + data_names.append("First 30s [%gs ago]" % first_time) else: time_data.append([time.time(), [list(x), list(y)]]) save_live_data(run_number, time_data) plot_data.append([x, y]) - data_names.append('Last 30s') + data_names.append("Last 30s") - previous_data = mtd_api.CloneWorkspace(ws) + mtd_api.CloneWorkspace(ws) logthis("plotting...\n") - plot_div = plot1d(run_number, plot_data, data_names=data_names, instrument='REF_L', - x_title="TOF", x_log=True, title=time.ctime(), - y_title="Counts / charge", y_log=True, show_dx=False, publish=False) + plot_div = plot1d( + run_number, + plot_data, + data_names=data_names, + instrument="REF_L", + x_title="TOF", + x_log=True, + title=time.ctime(), + y_title="Counts / charge", + y_log=True, + show_dx=False, + publish=False, + ) return plot_div def get_live_data(run_number): """ - Load stored live data or return an empty set + Load stored live data or return an empty set """ - live_data_path = '/SNS/REF_L/shared/livereduce/live_data.json' + live_data_path = "/SNS/REF_L/shared/livereduce/live_data.json" if os.path.isfile(live_data_path): - with open(live_data_path, 'r') as fd: + with open(live_data_path, "r") as fd: live_data = json.load(fd) - if 'run_number' in live_data and run_number == live_data['run_number']: - logthis("Found stored live data: %g\n" % len(live_data['time_data'])) - return live_data['time_data'] + if "run_number" in live_data and run_number == live_data["run_number"]: + logthis("Found stored live data: %g\n" % len(live_data["time_data"])) + return live_data["time_data"] return [] def save_live_data(run_number, time_data): """ - Save time data + Save time data """ live_data = dict(run_number=run_number, time_data=time_data) - live_data_path = '/SNS/REF_L/shared/livereduce/live_data.json' - with open(live_data_path, 'w') as fp: + live_data_path = "/SNS/REF_L/shared/livereduce/live_data.json" + with open(live_data_path, "w") as fp: json.dump(live_data, fp) -html_div = '' +html_div = "" if LIVE_DATA_WS in mtd_api.mtd: try: ws = mtd_api.mtd[LIVE_DATA_WS] @@ -188,24 +203,23 @@ def save_live_data(run_number, time_data): try: reduction_div = reduction() except: - reduction_div = '' + reduction_div = "" # Time-resolved plot plot_div = time_resolved() if plotting_ready: html_div += reduction_div - html_div += '' + html_div += "" html_div += plot_div # There's a race condition between the automated reduction and the live reduction # at the end of a run. To avoid this, we always post the live reduction to run 0. RUN_NUMBER = run_number - publish_plot('REF_L', RUN_NUMBER, files={'file': html_div}, - config="/SNS/REF_L/shared/.livedata.conf") + publish_plot("REF_L", RUN_NUMBER, files={"file": html_div}, config="/SNS/REF_L/shared/.livedata.conf") except: logthis("failure: %s" % sys.exc_info()[1]) # Creating a 'result' workspace is necessary for the service not to crash - mtd_api.SumSpectra(LIVE_DATA_WS, OutputWorkspace='result') + mtd_api.SumSpectra(LIVE_DATA_WS, OutputWorkspace="result") else: logthis("No live data available") diff --git a/scripts/shared/batch_reduce.py b/scripts/shared/batch_reduce.py index c224958..2225551 100644 --- a/scripts/shared/batch_reduce.py +++ b/scripts/shared/batch_reduce.py @@ -1,11 +1,10 @@ #!/usr/bin/python3 -import sys import os -import time import subprocess +import sys +import time - -PYTHON_CMD = 'nsd-conda-wrap.sh mantid' +PYTHON_CMD = "nsd-conda-wrap.sh mantid" if len(sys.argv) < 4: print("\nUsage: python3 batch_reduce.py ") @@ -24,11 +23,11 @@ # Look for additional options for the new reduction process new_version = False -if len(sys.argv)>4 and sys.argv[4] == 'new': +if len(sys.argv) > 4 and sys.argv[4] == "new": new_version = True template_file = None -if len(sys.argv)>5: +if len(sys.argv) > 5: template_file = sys.argv[5] avg_overlap = True @@ -45,42 +44,44 @@ print(" Constant-Q binning: %s" % const_q) t_0 = time.time() -for r in range(first_run, last_run+1): - _data_file_path = os.path.join('/SNS', 'REF_L', ipts, 'nexus', 'REF_L_%d.nxs.h5' % r) - _output_dir = os.path.join('/SNS', 'REF_L', ipts, 'shared', 'autoreduce') +for r in range(first_run, last_run + 1): + _data_file_path = os.path.join("/SNS", "REF_L", ipts, "nexus", "REF_L_%d.nxs.h5" % r) + _output_dir = os.path.join("/SNS", "REF_L", ipts, "shared", "autoreduce") if not os.path.isfile(_data_file_path): print("File does not exist: %s" % _data_file_path) else: print("Processing %s" % _data_file_path) if new_version: - cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s new %s %s %s" % (PYTHON_CMD, - _data_file_path, - _output_dir, - template_file, - avg_overlap, - const_q) + cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s new %s %s %s" % ( + PYTHON_CMD, + _data_file_path, + _output_dir, + template_file, + avg_overlap, + const_q, + ) else: if template_file is not None: - cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s old %s" % (PYTHON_CMD, - _data_file_path, - _output_dir, - template_file) + cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s old %s" % ( + PYTHON_CMD, + _data_file_path, + _output_dir, + template_file, + ) else: - cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s" % (PYTHON_CMD, - _data_file_path, - _output_dir) + cmd = "%s /SNS/REF_L/shared/autoreduce/reduce_REF_L.py %s %s" % (PYTHON_CMD, _data_file_path, _output_dir) - out_log = os.path.join('/SNS', 'REF_L', ipts, 'shared', 'autoreduce', 'reduction_log', 'REF_L_%d.nxs.h5.log' % r) - out_err = os.path.join('/SNS', 'REF_L', ipts, 'shared', 'autoreduce', 'reduction_log', 'REF_L_%d.nxs.h5.err' % r) - logFile=open(out_log, "w") - errFile=open(out_err, "w") - proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - stdout=logFile, stderr=errFile, universal_newlines = True, - cwd=_output_dir) + out_log = os.path.join("/SNS", "REF_L", ipts, "shared", "autoreduce", "reduction_log", "REF_L_%d.nxs.h5.log" % r) + out_err = os.path.join("/SNS", "REF_L", ipts, "shared", "autoreduce", "reduction_log", "REF_L_%d.nxs.h5.err" % r) + logFile = open(out_log, "w") + errFile = open(out_err, "w") + proc = subprocess.Popen( + cmd, shell=True, stdin=subprocess.PIPE, stdout=logFile, stderr=errFile, universal_newlines=True, cwd=_output_dir + ) proc.communicate() logFile.close() errFile.close() - + success = not os.path.isfile(out_err) or os.stat(out_err).st_size == 0 if success: if os.path.isfile(out_err): @@ -89,4 +90,4 @@ print(" Errors were found. Check %s" % out_log) t_1 = time.time() -print("\nElapsed time: %g s [%g s/run]" % ((t_1-t_0), ((t_1-t_0)/(last_run-first_run+1)))) +print("\nElapsed time: %g s [%g s/run]" % ((t_1 - t_0), ((t_1 - t_0) / (last_run - first_run + 1)))) diff --git a/scripts/shared/listener.py b/scripts/shared/listener.py index bb630c9..2c09c9c 100755 --- a/scripts/shared/listener.py +++ b/scripts/shared/listener.py @@ -1,34 +1,32 @@ #!/usr/bin/python3 -import sys import os -import time +import sys import threading +import time import matplotlib.pyplot as plt -if ("MANTIDPATH" in os.environ): +if "MANTIDPATH" in os.environ: del os.environ["MANTIDPATH"] -sys.path.insert(0,"/opt/mantid63/bin") -sys.path.insert(1,"/opt/mantid63/lib") +sys.path.insert(0, "/opt/mantid63/bin") +sys.path.insert(1, "/opt/mantid63/lib") import mantid + mantid.kernel.config.setLogLevel(3) -from mantid.simpleapi import * import numpy as np - - +from mantid.simpleapi import * print(mantid.__version__) -LIVE_DATA_WS = 'live_data' -UPDATE_TIME = 30 # seconds - +LIVE_DATA_WS = "live_data" +UPDATE_TIME = 30 # seconds def thread_function(): is_alive = True - #fig, ax = plt.subplots(figsize=(6,6)) - plt.subplots(2, 1, dpi=100, figsize=(6,9), sharex=True) + # fig, ax = plt.subplots(figsize=(6,6)) + plt.subplots(2, 1, dpi=100, figsize=(6, 9), sharex=True) plt.subplots_adjust(hspace=0.5) ax = plt.subplot(2, 1, 1) ax2 = plt.subplot(2, 1, 2) @@ -39,103 +37,98 @@ def thread_function(): if LIVE_DATA_WS in mtd: try: n_events = mtd[LIVE_DATA_WS].getNumberEvents() - #ipts = mtd[LIVE_DATA_WS].getRun()["experiment_identifier"].value - #run_number = mtd[LIVE_DATA_WS].getRun()["run_number"].value + # ipts = mtd[LIVE_DATA_WS].getRun()["experiment_identifier"].value + # run_number = mtd[LIVE_DATA_WS].getRun()["run_number"].value print("Events: %g" % n_events) if n_events == 0: continue ws = SumSpectra(LIVE_DATA_WS) - tof = Rebin(ws, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace='tof_') + tof = Rebin(ws, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace="tof_") x = tof.readX(0) - x = (x[1:]+x[:-1])/2.0 + x = (x[1:] + x[:-1]) / 2.0 y = tof.readY(0) - + ax.clear() total = np.sum(y) - ax.step(x, y/total, where='mid', label='Total') - + ax.step(x, y / total, where="mid", label="Total") + if previous_data: - tof_previous_data = Rebin(previous_data, [ws.getTofMin(), 300, ws.getTofMax()], - OutputWorkspace='tof_previous_data_') + tof_previous_data = Rebin(previous_data, [ws.getTofMin(), 300, ws.getTofMax()], OutputWorkspace="tof_previous_data_") x_prev = tof_previous_data.readX(0) - x_prev = (x_prev[1:]+x_prev[:-1])/2.0 + x_prev = (x_prev[1:] + x_prev[:-1]) / 2.0 y_prev = tof_previous_data.readY(0) - + total = np.sum(y_prev) - ax.step(x_prev, y_prev/total, where='mid') - - ax.set_title('%g events | %s' % (n_events, time.ctime())) - ax.set_xlabel('TOF') - ax.set_ylabel('Events') - ax.legend(['Total', 'Previous']) - #ax.set_yscale('log') - #ax.set_xscale('log') - + ax.step(x_prev, y_prev / total, where="mid") + + ax.set_title("%g events | %s" % (n_events, time.ctime())) + ax.set_xlabel("TOF") + ax.set_ylabel("Events") + ax.legend(["Total", "Previous"]) + # ax.set_yscale('log') + # ax.set_xscale('log') + # Difference between this chunk and the previous - + if previous_data: n_previous_events = previous_data.getNumberEvents() - if n_events > n_previous_events: + if n_events > n_previous_events: ax2.clear() - y_delta = y-y_prev + y_delta = y - y_prev total_d = np.sum(y_delta) - #ax2.step(x_prev, y_prev/total, where='mid', label='Total') - ax2.step(x_prev, y_delta/total_d, where='mid', label='Difference') - + # ax2.step(x_prev, y_prev/total, where='mid', label='Total') + ax2.step(x_prev, y_delta / total_d, where="mid", label="Difference") + if previous_delta is not None: - ax2.step(x_prev, previous_delta, where='mid', label='Previous Diff') - previous_delta = y_delta/total_d - - ax2.set_title('Difference from previous [%g -> %g events]' % (n_previous_events, n_events)) - ax2.set_xlabel('TOF') - ax2.set_ylabel('Events') - ax2.legend(['Difference', 'Previous diff']) + ax2.step(x_prev, previous_delta, where="mid", label="Previous Diff") + previous_delta = y_delta / total_d + + ax2.set_title("Difference from previous [%g -> %g events]" % (n_previous_events, n_events)) + ax2.set_xlabel("TOF") + ax2.set_ylabel("Events") + ax2.legend(["Difference", "Previous diff"]) else: ax2.clear() - ax2.set_title('No data yet') + ax2.set_title("No data yet") plt.pause(10) - #plt.savefig('/SNS/REF_L/shared/livedata.png') - - DeleteWorkspace('tof_') + # plt.savefig('/SNS/REF_L/shared/livedata.png') + + DeleteWorkspace("tof_") previous_data = CloneWorkspace(ws) except: print(sys.exc_info()[1]) - print('Stopping') + print("Stopping") AlgorithmManager.cancelAll() is_alive = False - - - time.sleep(int(UPDATE_TIME/2.0)) + time.sleep(int(UPDATE_TIME / 2.0)) x = threading.Thread(target=thread_function) x.start() try: - StartLiveData(Instrument='REF_L', - FromNow=False, - FromStartOfRun=True, - UpdateEvery=UPDATE_TIME, - Listener='SNSLiveEventDataListener', - Address='bl4b-daq1.sns.gov:31415', - #PostProcessingScript='output=input', - AccumulationWorkspace='acc', - PreserveEvents=True, - OutputWorkspace=LIVE_DATA_WS) - - # If we were to have only the StartLiveData call, we'd have to + StartLiveData( + Instrument="REF_L", + FromNow=False, + FromStartOfRun=True, + UpdateEvery=UPDATE_TIME, + Listener="SNSLiveEventDataListener", + Address="bl4b-daq1.sns.gov:31415", + # PostProcessingScript='output=input', + AccumulationWorkspace="acc", + PreserveEvents=True, + OutputWorkspace=LIVE_DATA_WS, + ) + + # If we were to have only the StartLiveData call, we'd have to # keep this process alive: - #time.sleep(2000) + # time.sleep(2000) except: print("STOPPING") AlgorithmManager.cancelAll() time.sleep(1) - - - - diff --git a/scripts/shared/process_sf.py b/scripts/shared/process_sf.py index 35f12e6..c283a76 100755 --- a/scripts/shared/process_sf.py +++ b/scripts/shared/process_sf.py @@ -6,9 +6,11 @@ """ import sys -sys.path.append('/SNS/REF_L/shared/autoreduce') + +sys.path.append("/SNS/REF_L/shared/autoreduce") import os + if len(sys.argv) < 5: print("\nUsage: python3 process_sf.py ") print("\nExample:\n python process_sf.py Si 178195 178216 /SNS/REF_L/shared/autoreduce/sf_178195_Si2InDiam.cfg") @@ -28,7 +30,7 @@ _fpath = os.path.abspath(sys.argv[4]) _dir, _file = os.path.split(_fpath) -if len(_dir)>0 and not os.path.isdir(_dir): +if len(_dir) > 0 and not os.path.isdir(_dir): print("The file you asked for is not in an existing directory: %s" % _dir) sys.exit(0) print("Output file: %s" % _fpath) @@ -36,11 +38,7 @@ from sf_calculator import ScalingFactor -sf = ScalingFactor(run_list=range(first_run,last_run+1), - sort_by_runs=True, - sf_file=_fpath, - tof_step=200, - medium=sys.argv[1], - slit_tolerance=0.06) +sf = ScalingFactor( + run_list=range(first_run, last_run + 1), sort_by_runs=True, sf_file=_fpath, tof_step=200, medium=sys.argv[1], slit_tolerance=0.06 +) sf.execute() - diff --git a/scripts/shared/sample_changer.py b/scripts/shared/sample_changer.py index de62eb0..4e1b827 100644 --- a/scripts/shared/sample_changer.py +++ b/scripts/shared/sample_changer.py @@ -1,44 +1,130 @@ -import sys import time + from epics import PV MOVE_TIMEOUT = 600 ALIGN_TIMEOUT = 1200 -# Set to True only for debugging +# Set to True only for debugging RETURN_ON_FAIL = True # Data types: 0=data, 1=direct beam (full), 2=direct beam (0-att), 3=ignore -DATATYPE = PV('BL4B:CS:ExpPl:DataType') -AUTOALIGN = PV('BL4B:CS:AutoAlign:Run') -AUTOALIGN_STATUS = PV('BL4B:CS:AutoAlign:Stat') +DATATYPE = PV("BL4B:CS:ExpPl:DataType") +AUTOALIGN = PV("BL4B:CS:AutoAlign:Run") +AUTOALIGN_STATUS = PV("BL4B:CS:AutoAlign:Stat") # 1 is running, 0 is idle -RUNNING = PV('BL4B:CS:Running:Stat') -BL4B_MOT_PREFIX = 'BL4B:Mot:' - -POSITIONS = [dict(ysc=50, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=108, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=165, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=222, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=279, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=336, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=50, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=108, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=165, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=222, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=279, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - dict(ysc=336, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), - ] - -SCAN = [[6000, {'BL4B:Chop:Gbl:WavelengthReq': 15, 's1:Y:Gap': 0.39, 'si:Y:Gap': 0.25, 's3:Y:Gap': 10, 'ths': 0.6, 'tthd': 1.2, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [6000, {'BL4B:Chop:Gbl:WavelengthReq': 12.386, 's1:Y:Gap': 0.39, 'si:Y:Gap': 0.25, 's3:Y:Gap': 10, 'ths': 0.6, 'tthd': 1.2, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [6000, {'BL4B:Chop:Gbl:WavelengthReq': 9.74, 's1:Y:Gap': 0.39, 'si:Y:Gap': 0.25, 's3:Y:Gap': 10, 'ths': 0.6, 'tthd': 1.2, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [6000, {'BL4B:Chop:Gbl:WavelengthReq': 7.043, 's1:Y:Gap': 0.39, 'si:Y:Gap': 0.25, 's3:Y:Gap': 10, 'ths': 0.6, 'tthd': 1.2, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [18000, {'BL4B:Chop:Gbl:WavelengthReq': 4.25, 's1:Y:Gap': 0.39, 'si:Y:Gap': 0.25, 's3:Y:Gap': 10, 'ths': 0.6, 'tthd': 1.2, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [18000, {'BL4B:Chop:Gbl:WavelengthReq': 4.25, 's1:Y:Gap': 0.769, 'si:Y:Gap': 0.493, 's3:Y:Gap': 10, 'ths': 1.183, 'tthd': 2.366, 's1:X:Gap': 20, 'si:X:Gap': 20}], - [72000, {'BL4B:Chop:Gbl:WavelengthReq': 4.25, 's1:Y:Gap': 1.523, 'si:Y:Gap': 0.976, 's3:Y:Gap': 20, 'ths': 2.343, 'tthd': 4.686, 's1:X:Gap': 20, 'si:X:Gap': 20}], - ] +RUNNING = PV("BL4B:CS:Running:Stat") +BL4B_MOT_PREFIX = "BL4B:Mot:" + +POSITIONS = [ + dict(ysc=50, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=108, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=165, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=222, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=279, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=336, hs=9, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=50, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=108, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=165, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=222, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=279, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), + dict(ysc=336, hs=66, ths=-3.7, chis=0, zs=199.2, xs=0, ys=6.002), +] + +SCAN = [ + [ + 6000, + { + "BL4B:Chop:Gbl:WavelengthReq": 15, + "s1:Y:Gap": 0.39, + "si:Y:Gap": 0.25, + "s3:Y:Gap": 10, + "ths": 0.6, + "tthd": 1.2, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 6000, + { + "BL4B:Chop:Gbl:WavelengthReq": 12.386, + "s1:Y:Gap": 0.39, + "si:Y:Gap": 0.25, + "s3:Y:Gap": 10, + "ths": 0.6, + "tthd": 1.2, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 6000, + { + "BL4B:Chop:Gbl:WavelengthReq": 9.74, + "s1:Y:Gap": 0.39, + "si:Y:Gap": 0.25, + "s3:Y:Gap": 10, + "ths": 0.6, + "tthd": 1.2, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 6000, + { + "BL4B:Chop:Gbl:WavelengthReq": 7.043, + "s1:Y:Gap": 0.39, + "si:Y:Gap": 0.25, + "s3:Y:Gap": 10, + "ths": 0.6, + "tthd": 1.2, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 18000, + { + "BL4B:Chop:Gbl:WavelengthReq": 4.25, + "s1:Y:Gap": 0.39, + "si:Y:Gap": 0.25, + "s3:Y:Gap": 10, + "ths": 0.6, + "tthd": 1.2, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 18000, + { + "BL4B:Chop:Gbl:WavelengthReq": 4.25, + "s1:Y:Gap": 0.769, + "si:Y:Gap": 0.493, + "s3:Y:Gap": 10, + "ths": 1.183, + "tthd": 2.366, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], + [ + 72000, + { + "BL4B:Chop:Gbl:WavelengthReq": 4.25, + "s1:Y:Gap": 1.523, + "si:Y:Gap": 0.976, + "s3:Y:Gap": 20, + "ths": 2.343, + "tthd": 4.686, + "s1:X:Gap": 20, + "si:X:Gap": 20, + }, + ], +] def move_motors(positions): @@ -51,13 +137,13 @@ def move_motors(positions): else: _motor = BL4B_MOT_PREFIX + motor _pv = PV(_motor).put(position) - check_list.append(PV(_motor + '.Status')) + check_list.append(PV(_motor + ".Status")) ready = False t0 = time.time() while not ready: time.sleep(2) - print(' ..checking') + print(" ..checking") for _pv in check_list: # Check motor status ready = ready and _pv.get() == 0 @@ -65,7 +151,7 @@ def move_motors(positions): print("Timed out....") return RETURN_ON_FAIL - print('Ready') + print("Ready") return True @@ -73,7 +159,7 @@ def align_sample(): print("Starting automated alignment") t0 = time.time() AUTOALIGN.put(1) - while AUTOALIGN.get() is not 1: + while AUTOALIGN.get() != 1: if time.time() - t0 > ALIGN_TIMEOUT: print("Timed out...") return RETURN_ON_FAIL @@ -84,24 +170,24 @@ def align_sample(): def run_scan(name, scan): - group_id = PV('BL4B:CS:RunControl:LastRunNumber').get() + 1 + group_id = PV("BL4B:CS:RunControl:LastRunNumber").get() + 1 PV("BL4B:CS:Autoreduce:Sequence:Total").put(len(scan)) PV("BL4B:CS:Autoreduce:Sequence:Id").put(group_id) sequence_num = PV("BL4B:CS:Autoreduce:Sequence:Num") - title = PV('BL4B:CS:Autoreduce:BaseTitle') + title = PV("BL4B:CS:Autoreduce:BaseTitle") - for i in range(1, len(scan)+1): + for i in range(1, len(scan) + 1): sequence_num.put(i) title.put("%s-%s-%s" % (name, group_id, i)) if move_motors(scan[i][1]): # Acquire neutrons! - PV('BL4B:CS:RunControl:Start').put(1) - neutrons=PV('BL4B:Det:Neutrons') + PV("BL4B:CS:RunControl:Start").put(1) + neutrons = PV("BL4B:Det:Neutrons") while neutrons.get() < scan[i][0]: time.sleep(5) - PV('BL4B:CS:RunControl:Stop').put(1) + PV("BL4B:CS:RunControl:Stop").put(1) return True @@ -111,16 +197,15 @@ def process(positions): for sample in positions: # Move the sample changer - assert(sample['pos']>0 and sample['pos']<13) - if move_motors(sample['pos']+1): + assert sample["pos"] > 0 + assert sample["pos"] < 13 + if move_motors(sample["pos"] + 1): # Align the sample if align_sample(): # Run the CSV - run_scan(sample['name'], SCAN) + run_scan(sample["name"], SCAN) if __name__ == "__main__": - positions = [dict(name='quartz', pos=1), - dict(name='SiO2', pos=2) - ] - process(positions) \ No newline at end of file + positions = [dict(name="quartz", pos=1), dict(name="SiO2", pos=2)] + process(positions) diff --git a/scripts/test/test_peaks.py b/scripts/test/test_peaks.py index e0f2f7b..8e77465 100644 --- a/scripts/test/test_peaks.py +++ b/scripts/test/test_peaks.py @@ -1,10 +1,11 @@ # import mantid algorithms, numpy and matplotlib import sys -sys.path.append('../autoreduce') -from mantid.simpleapi import * + +sys.path.append("../autoreduce") import numpy as np -from scipy import ndimage +from mantid.simpleapi import * from peak_finding import find_peaks, peak_prominences, peak_widths +from scipy import ndimage def scan_peaks(x): @@ -17,11 +18,12 @@ def scan_peaks(x): quality = -peaks_w * prom zipped = zip(peaks, peaks_w, quality, prom) - ordered = sorted(zipped, key=lambda a:a[2]) - found_peaks = [[p[0],p[1]] for p in ordered] + ordered = sorted(zipped, key=lambda a: a[2]) + found_peaks = [[p[0], p[1]] for p in ordered] return found_peaks + ws = Load("REF_L_179932") n_x = int(ws.getInstrument().getNumberParameter("number-of-x-pixels")[0]) @@ -29,9 +31,9 @@ def scan_peaks(x): _integrated = Integration(InputWorkspace=ws) signal = _integrated.extractY() -z=np.reshape(signal, (n_x, n_y)) +z = np.reshape(signal, (n_x, n_y)) x = z.sum(axis=0) peaks = scan_peaks(x) -print(peaks[0][0], peaks[0][1]) \ No newline at end of file +print(peaks[0][0], peaks[0][1]) diff --git a/scripts/test/test_sf.py b/scripts/test/test_sf.py index b9c973c..ea054df 100644 --- a/scripts/test/test_sf.py +++ b/scripts/test/test_sf.py @@ -1,11 +1,9 @@ import sys -sys.path.append('../autoreduce') + +sys.path.append("../autoreduce") from sf_calculator import ScalingFactor -sf = ScalingFactor(run_list=range(184975, 184990), - sf_file="/tmp/sf_184975_air.cfg", - medium='air') +sf = ScalingFactor(run_list=range(184975, 184990), sf_file="/tmp/sf_184975_air.cfg", medium="air") sf.execute() - diff --git a/scripts/test/thi_alignment.py b/scripts/test/thi_alignment.py index f95d163..d2a8ff9 100644 --- a/scripts/test/thi_alignment.py +++ b/scripts/test/thi_alignment.py @@ -1,127 +1,130 @@ """ Created on Mon Oct 3 09:36:53 2022 - + @author: esw """ import h5py -import numpy as np import matplotlib.pyplot as plt +import numpy as np from scipy.optimize import curve_fit - - -#------------------------------------------------- - -def get_ypix_v_i(fname,nx,ny): - f = h5py.File(fname, 'r') - - - eid=np.array(f['entry/bank1_events/event_id']) - ect=np.array(f['entry/bank1_events/total_counts']) - ect=ect[0] - - ipix=np.zeros(ny) - - #convert the event id into and x and y pixel value - #x= eid // ny - y= eid % ny - + +# ------------------------------------------------- + + +def get_ypix_v_i(fname, nx, ny): + f = h5py.File(fname, "r") + + eid = np.array(f["entry/bank1_events/event_id"]) + ect = np.array(f["entry/bank1_events/total_counts"]) + ect = ect[0] + + ipix = np.zeros(ny) + + # convert the event id into and x and y pixel value + # x= eid // ny + y = eid % ny + for i in range(ect): - ipix[y[i]]=ipix[y[i]]+1 - + ipix[y[i]] = ipix[y[i]] + 1 + return ipix -#----------------------------------------------- - - + + +# ----------------------------------------------- + + def get_zd(fname): - f = h5py.File(fname, 'r') - return np.array(f['entry/bank1_events/event_id']) - -#----------------------------------------------- - -#----------------------------------------------- - + f = h5py.File(fname, "r") + return np.array(f["entry/bank1_events/event_id"]) + + +# ----------------------------------------------- + +# ----------------------------------------------- + + def gauss(x, a, b, c): - return a*np.exp(-(x-b)**2/(2*c**2)) - -#----------------------------------------------- - -nx=256 -ny=304 - -path='C:/Users/esw/Documents/NR/4B/INSTRUMENT/water/' - -#zi scans (0.70) -runs=[197310,197327] -runs=[204912,204921] - -zstep=1 - -th=[-0.2,-0.4,-0.6,-0.8,-1.0] - - -runs=[] -th=[-0.25,-0.45,-0.65,-0.85,-1.05] - -#------------------------------- - -nn=runs[1]-runs[0] -ypix=np.arange(ny) - + return a * np.exp(-((x - b) ** 2) / (2 * c**2)) + + +# ----------------------------------------------- + +nx = 256 +ny = 304 + +path = "C:/Users/esw/Documents/NR/4B/INSTRUMENT/water/" + +# zi scans (0.70) +runs = [197310, 197327] +runs = [204912, 204921] + +zstep = 1 + +th = [-0.2, -0.4, -0.6, -0.8, -1.0] + + +runs = [] +th = [-0.25, -0.45, -0.65, -0.85, -1.05] + +# ------------------------------- + +nn = runs[1] - runs[0] +ypix = np.arange(ny) + plt.cla() - -mm=np.zeros(nn+1) -pixCOM=np.zeros(nn+1) -pixGauss=np.zeros(nn+1) -ypix=np.arange(ny) - -for i in range(nn+1): - name='REF_L_'+str(runs[0]+i)+'.nxs.h5' - #print(name) - - ipix=get_ypix_v_i(path+name,nx,ny) - plt.plot(ypix,ipix) - cen=0 - mm[i]=i*zstep - + +mm = np.zeros(nn + 1) +pixCOM = np.zeros(nn + 1) +pixGauss = np.zeros(nn + 1) +ypix = np.arange(ny) + +for i in range(nn + 1): + name = "REF_L_" + str(runs[0] + i) + ".nxs.h5" + # print(name) + + ipix = get_ypix_v_i(path + name, nx, ny) + plt.plot(ypix, ipix) + cen = 0 + mm[i] = i * zstep + plt.plot(ipix) - plt.xlim(0,170) - - #get the center of mass of the distribution - com=sum(ypix*ipix)/sum(ipix) - #fit a gaussian to the distribution - par,cov = curve_fit(gauss,ypix,ipix, p0=(max(ipix),com,1)) - - yy=np.arange(min(ypix),max(ypix),0.01) - fit = gauss(yy, par[0],par[1],par[2]) - - pixCOM[i]=com - pixGauss[i]=par[1] - -d1=np.zeros(int(len(pixCOM)/2)) -d2=np.zeros(int(len(pixCOM)/2)) - -for i in range(0,len(pixCOM),2): - v=int(i/2) - print(i,v) - print((pixCOM[i]-pixCOM[i+1]),(pixGauss[i]-pixGauss[i+1])) - d1[v]=(pixGauss[i]-pixGauss[i+1]) - d2[v]=(pixCOM[i]-pixCOM[i+1]) - + plt.xlim(0, 170) + + # get the center of mass of the distribution + com = sum(ypix * ipix) / sum(ipix) + # fit a gaussian to the distribution + par, cov = curve_fit(gauss, ypix, ipix, p0=(max(ipix), com, 1)) + + yy = np.arange(min(ypix), max(ypix), 0.01) + fit = gauss(yy, par[0], par[1], par[2]) + + pixCOM[i] = com + pixGauss[i] = par[1] + +d1 = np.zeros(int(len(pixCOM) / 2)) +d2 = np.zeros(int(len(pixCOM) / 2)) + +for i in range(0, len(pixCOM), 2): + v = int(i / 2) + print(i, v) + print((pixCOM[i] - pixCOM[i + 1]), (pixGauss[i] - pixGauss[i + 1])) + d1[v] = pixGauss[i] - pixGauss[i + 1] + d2[v] = pixCOM[i] - pixCOM[i + 1] + plt.cla() -plt.plot(d1,th,'o') -plt.plot(d2,th,'o') - -#now fit a line -a, b = np.polyfit(d1,th, 1) -print(a,b) -x=np.arange(0,100.0,0.01) -y=a*x+b -plt.plot(x,y) - -#now fit a line -a, b = np.polyfit(d2,th, 1) -print(a,b) -x=np.arange(0,100.0,0.01) -y=a*x+b -plt.plot(x,y) - +plt.plot(d1, th, "o") +plt.plot(d2, th, "o") + +# now fit a line +a, b = np.polyfit(d1, th, 1) +print(a, b) +x = np.arange(0, 100.0, 0.01) +y = a * x + b +plt.plot(x, y) + +# now fit a line +a, b = np.polyfit(d2, th, 1) +print(a, b) +x = np.arange(0, 100.0, 0.01) +y = a * x + b +plt.plot(x, y) diff --git a/xrr/xrr_processing.py b/xrr/xrr_processing.py index 875a5cd..36c84bd 100644 --- a/xrr/xrr_processing.py +++ b/xrr/xrr_processing.py @@ -1,12 +1,12 @@ -import sys import os -import numpy as np +import sys +import warnings +import numpy as np from matplotlib import pyplot as plt -import warnings -warnings.filterwarnings('ignore', module='numpy') -warnings.filterwarnings('ignore') +warnings.filterwarnings("ignore", module="numpy") +warnings.filterwarnings("ignore") WAVELENGTH_META = "HW_XG_WAVE_LENGTH_ALPHA1" @@ -14,12 +14,12 @@ def process_xrr(data_file, output_dir=None): """ - Process Rigaku .ras files to produce R(Q). + Process Rigaku .ras files to produce R(Q). - data_file: full file path of the data file to process - output_dir: optional output directory + data_file: full file path of the data file to process + output_dir: optional output directory """ - data = np.loadtxt(data_file, comments=['#','*']).T + data = np.loadtxt(data_file, comments=["#", "*"]).T # If no output directory was provided, use the location of the data file if output_dir is None: @@ -29,13 +29,13 @@ def process_xrr(data_file, output_dir=None): meta_data = dict() with open(data_file) as fd: for line in fd: - if line.startswith('*'): + if line.startswith("*"): toks = line.split() - if len(toks)<2: + if len(toks) < 2: # Single keywords are used to define meta data sections, skip them pass else: - value = toks[1].replace('"','') + value = toks[1].replace('"', "") try: value = float(value) except: @@ -57,7 +57,7 @@ def process_xrr(data_file, output_dir=None): ttheta = data[0] counts = data[1] - q = 4*np.pi/wl*np.sin(ttheta/2*np.pi/180) + q = 4 * np.pi / wl * np.sin(ttheta / 2 * np.pi / 180) # Select only points in a useful range _q_idx = (q > q_min) & (q < q_max) @@ -67,7 +67,7 @@ def process_xrr(data_file, output_dir=None): # R(q) will be normalized to the average between q_min and norm_q_max norm_q_max = 0.04 _q_idx = (q > q_min) & (q < norm_q_max) - _norm = np.sum(r[_q_idx])/len(q[_q_idx]) + _norm = np.sum(r[_q_idx]) / len(q[_q_idx]) print("Norm %g" % _norm) r /= _norm err = r * 0.1 @@ -79,14 +79,15 @@ def process_xrr(data_file, output_dir=None): np.savetxt(os.path.join(output_dir, "%s-Rq.txt" % _name), _rq_data) - plt.figure(figsize=(10,6)) + plt.figure(figsize=(10, 6)) plt.plot(q, r) - plt.xlabel('q [$1/\AA$]') - plt.ylabel('R(q)') - plt.yscale('log') - plt.xscale('linear') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + plt.yscale("log") + plt.xscale("linear") plt.savefig(os.path.join(output_dir, "%s-Rq.png" % _name)) + if len(sys.argv) < 2: print("\nUsage: python3 xrr_processing.py ") print("\nExample:\n python3 xrr_processing.py xrr_data_file.ras")