diff --git a/reduction/lr_reduction/event_reduction.py b/reduction/lr_reduction/event_reduction.py index ad72c1f..b8cfb65 100644 --- a/reduction/lr_reduction/event_reduction.py +++ b/reduction/lr_reduction/event_reduction.py @@ -404,7 +404,7 @@ def extract_meta_data(self): Extract meta data from the loaded data file. """ # Get instrument parameters - if self.instrument_settings is None or self.instrument_settings.apply_instrument_settings: + if self.instrument_settings is None or not self.instrument_settings.apply_instrument_settings: settings = read_settings(self._ws_sc) else: settings = self.instrument_settings @@ -451,13 +451,13 @@ def extract_meta_data_4A(self): 4A-specific meta data """ run_object = self._ws_sc.getRun() - self.det_distance = run_object["SampleDetDis"].getStatistics().mean + self.sample_detector_distance = run_object["SampleDetDis"].getStatistics().mean source_sample_distance = run_object["ModeratorSamDis"].getStatistics().mean if run_object["SampleDetDis"].units not in ["m", "meter"]: - self.det_distance /= 1000.0 + self.sample_detector_distance /= 1000.0 if run_object["ModeratorSamDis"].units not in ["m", "meter"]: source_sample_distance /= 1000.0 - self.source_detector_distance = source_sample_distance + self.det_distance + self.source_detector_distance = source_sample_distance + self.sample_detector_distance def extract_meta_data_4B(self): """ @@ -472,9 +472,9 @@ def extract_meta_data_4B(self): settings = self.instrument_settings if settings.apply_instrument_settings: - self.det_distance = settings.sample_detector_distance + self.sample_detector_distance = settings.sample_detector_distance else: - self.det_distance = self.DEFAULT_4B_SAMPLE_DET_DISTANCE + self.sample_detector_distance = self.DEFAULT_4B_SAMPLE_DET_DISTANCE # Check that we have the needed meta data for the emission delay calculation if self.use_emission_time: @@ -503,9 +503,9 @@ def __repr__(self): String representation of the reduction settings """ output = "Reduction settings:\n" - output += " sample-det: %s\n" % self.det_distance + output += " sample-det: %s\n" % self.sample_detector_distance output += " source-det: %s\n" % self.source_detector_distance - output += " pixel: %s\n" % self.pixel_width + output += " pixel-width: %s\n" % self.pixel_width output += " WL: %s %s\n" % (self.wl_range[0], self.wl_range[1]) output += " Q: %s %s\n" % (self.q_min_meas, self.q_max_meas) theta_degrees = self.theta * 180 / np.pi @@ -887,7 +887,7 @@ def _reflectivity( event_weights = evt_list.getWeights() x_distance = _pixel_width * (j - peak_position) - delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0 + delta_theta_f = np.arctan(x_distance / self.sample_detector_distance) / 2.0 # Sign will depend on reflect up or down ths_value = ws.getRun()["ths"].value[-1] @@ -919,8 +919,8 @@ def _reflectivity( if q_summing: x0 = _pixel_width * (peak_position - peak[0]) x1 = _pixel_width * (peak_position - peak[1]) - delta_theta_f0 = np.arctan(x0 / self.det_distance) / 2.0 - delta_theta_f1 = np.arctan(x1 / self.det_distance) / 2.0 + delta_theta_f0 = np.arctan(x0 / self.sample_detector_distance) / 2.0 + delta_theta_f1 = np.arctan(x1 / self.sample_detector_distance) / 2.0 qz_max = 4.0 * np.pi / self.tof_range[1] * self.constant * np.fabs(np.sin(theta + delta_theta_f0)) qz_min = 4.0 * np.pi / self.tof_range[1] * self.constant * np.fabs(np.sin(theta + delta_theta_f1)) @@ -1051,7 +1051,7 @@ def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, the wl_weights = 1.0 / np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf) x_distance = float(j - peak_position) * self.pixel_width - delta_theta_f = np.arctan(x_distance / self.det_distance) + delta_theta_f = np.arctan(x_distance / self.sample_detector_distance) # Sign will depend on reflect up or down ths_value = ws.getRun()["ths"].value[-1] delta_theta_f *= np.sign(ths_value) diff --git a/reduction/lr_reduction/template.py b/reduction/lr_reduction/template.py index 55147a3..8410c78 100644 --- a/reduction/lr_reduction/template.py +++ b/reduction/lr_reduction/template.py @@ -291,7 +291,7 @@ def process_from_template_ws( instrument_settings=instrument_settings, use_emission_time=template_data.use_emission_time, ) - print(event_refl) + print(f"{'*'*88}\nevent_refl:\n{event_refl}\n{'*'*88}") # R(Q) qz, refl, d_refl = event_refl.specular( diff --git a/reduction/notebooks/workflow-fixed-tthd.ipynb b/reduction/notebooks/workflow-fixed-tthd.ipynb index 7a5c76a..da22d41 100644 --- a/reduction/notebooks/workflow-fixed-tthd.ipynb +++ b/reduction/notebooks/workflow-fixed-tthd.ipynb @@ -540,7 +540,7 @@ }, { "cell_type": "code", - "execution_count": 143, + "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2024-09-01T22:56:07.736455Z", @@ -561,7 +561,7 @@ ], "source": [ "_pixel_width = 0.0007\n", - "det_distance = 1.355\n", + "sample_detector_distance = 1.355\n", "dirpix = 261\n", "peak_position = 85.5\n", "#peak_position = 210\n", @@ -571,21 +571,21 @@ "\n", "\n", "x0 = _pixel_width * (peak_position - dirpix)\n", - "theta = np.arctan(x0 / det_distance) / 2.0 * 180 / np.pi\n", + "theta = np.arctan(x0 / sample_detector_distance) / 2.0 * 180 / np.pi\n", "print(theta)\n", "\n", "\n", - " \n", + "\n", "if False:\n", " for i in range(304):\n", " x0 = _pixel_width * (i - dirpix)\n", - " theta = np.arctan(x0 / det_distance) / 2.0 * 180 / np.pi\n", + " theta = np.arctan(x0 / sample_detector_distance) / 2.0 * 180 / np.pi\n", " print(\"Pixel:%g Theta = %g\" % (i, theta))" ] }, { "cell_type": "code", - "execution_count": 74, + "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2024-08-30T01:39:22.045519Z", @@ -628,10 +628,10 @@ "all_pix = np.arange(300)\n", "\n", "x0 = _pixel_width * (pix - dirpix)\n", - "calc = np.arctan(x0 / det_distance) / 2.0 * 180 / np.pi\n", + "calc = np.arctan(x0 / sample_detector_distance) / 2.0 * 180 / np.pi\n", "\n", "x0 = _pixel_width * (all_pix - dirpix)\n", - "all_calc = np.arctan(x0 / det_distance) / 2.0 * 180 / np.pi\n", + "all_calc = np.arctan(x0 / sample_detector_distance) / 2.0 * 180 / np.pi\n", "\n", "fig, ax = plt.subplots(figsize=(10,5))\n", "plt.plot(pix, ths, 'o', label='ths')\n", @@ -656,9 +656,9 @@ ], "metadata": { "kernelspec": { - "display_name": "sans", - "language": "", - "name": "sans" + "display_name": "lr_reduction", + "language": "python", + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -670,7 +670,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.10.16" } }, "nbformat": 4, diff --git a/reduction/tests/test_reduction.py b/reduction/tests/test_reduction.py index 2096c96..54c3b13 100644 --- a/reduction/tests/test_reduction.py +++ b/reduction/tests/test_reduction.py @@ -18,28 +18,28 @@ def cleanup_partial_files(output_dir, runs): """ - Clean up reduced files left behind after reduction + Clean up reduced files left behind after reduction """ for i, r in enumerate(runs): - reduced_path = os.path.join(output_dir, 'REFL_%s_%d_%s_partial.txt' % (runs[0], i+1, r)) + reduced_path = os.path.join(output_dir, "REFL_%s_%d_%s_partial.txt" % (runs[0], i + 1, r)) if os.path.isfile(reduced_path): os.remove(reduced_path) def test_info(nexus_dir): """ - Test utility functions to get basic info + Test utility functions to get basic info """ with amend_config(data_dir=nexus_dir): ws_sc = mtd_api.Load("REF_L_198409") wl_min, wl_max = event_reduction.get_wl_range(ws_sc) - assert(wl_min == 13.7) - assert(wl_max == 16.3) + assert wl_min == 13.7 + assert wl_max == 16.3 def test_attenuation(nexus_dir): """ - Test attenuation calculation can complete + Test attenuation calculation can complete """ with amend_config(data_dir=nexus_dir): ws_sc = mtd_api.Load("REF_L_198409") @@ -48,38 +48,36 @@ def test_attenuation(nexus_dir): def test_q_summing(nexus_dir): """ - Test Q summing process + Test Q summing process """ - template_path = 'data/template.xml' + template_path = "data/template.xml" template.read_template(template_path, 7) with amend_config(data_dir=nexus_dir): - ws_sc = mtd_api.Load("REF_L_%s" % 198415) + ws_sc = mtd_api.Load("REF_L_%s" % 198415) qz_mid0, refl0, _, meta_data = template.process_from_template_ws(ws_sc, template_path, info=True) - assert(np.fabs(meta_data['dq_over_q'] - 0.02759) < 1e-3) + assert np.fabs(meta_data["dq_over_q"] - 0.02759) < 1e-3 # Now try with Q summing, which should have similar results - qz_mid, refl, _, meta_data = template.process_from_template_ws(ws_sc, template_path, - tof_weighted=True, - info=True, q_summing=True) + qz_mid, refl, _, meta_data = template.process_from_template_ws(ws_sc, template_path, tof_weighted=True, info=True, q_summing=True) - assert(np.fabs(meta_data['dq_over_q'] - 0.009354) < 1e-5) + assert np.fabs(meta_data["dq_over_q"] - 0.009354) < 1e-5 # Note that TOF weighted may have a slightly different range, so here we skip # the extra point. - assert(len(qz_mid0) == len(qz_mid[1:])) - assert(np.fabs(np.mean(refl[1:]-refl0)) < 1e-6) + assert len(qz_mid0) == len(qz_mid[1:]) + assert np.fabs(np.mean(refl[1:] - refl0)) < 1e-6 # Cleanup - output_dir = 'data/' + output_dir = "data/" cleanup_partial_files(output_dir, range(198409, 198417)) def test_full_reduction(nexus_dir): """ - Test the full reduction chain + Test the full reduction chain """ - template_path = 'data/template.xml' + template_path = "data/template.xml" qz_all = [] refl_all = [] d_refl_all = [] @@ -107,30 +105,29 @@ def test_full_reduction(nexus_dir): refl_all = np.take_along_axis(refl_all, idx, axis=None) d_refl_all = np.take_along_axis(d_refl_all, idx, axis=None) - assert(np.fabs(resolution - 0.02785205863936946) < 1e-5) - ref_data = np.loadtxt('data/reference_rq.txt').T - assert(len(ref_data[1]) == len(refl_all)) - assert(np.fabs(np.sum(ref_data[1]-refl_all)) < 1e-10) + assert np.fabs(resolution - 0.02785205863936946) < 1e-5 + ref_data = np.loadtxt("data/reference_rq.txt").T + assert len(ref_data[1]) == len(refl_all) + assert np.fabs(np.sum(ref_data[1] - refl_all)) < 1e-10 # Cleanup - output_dir = 'data/' + output_dir = "data/" cleanup_partial_files(output_dir, range(198409, 198417)) def test_reduce_workflow(nexus_dir): - template_path = 'data/template.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt') + template_path = "data/template.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_198409_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) for i in range(198409, 198417): with amend_config(data_dir=nexus_dir): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_rq.txt' + reference_path = "data/reference_rq.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -138,11 +135,11 @@ def test_reduce_workflow(nexus_dir): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 # Cleanup cleanup_partial_files(output_dir, range(198409, 198417)) @@ -150,9 +147,9 @@ def test_reduce_workflow(nexus_dir): def test_reduce_functional_bck(nexus_dir, template_dir): os.chdir(Path(template_dir).parent) - template_path = 'data/template_fbck.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt') + template_path = "data/template_fbck.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_198409_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) @@ -164,10 +161,9 @@ def test_reduce_functional_bck(nexus_dir, template_dir): template_data = template.read_template(template_path, sequence_number) template_data.two_backgrounds = True - workflow.reduce(ws, template_data, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_data, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_fbck.txt' + reference_path = "data/reference_fbck.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -175,14 +171,14 @@ def test_reduce_functional_bck(nexus_dir, template_dir): _refl = np.loadtxt(reduced_path).T for i in range(2): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-9) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-9 # Error bars from fit might be different - assert(np.fabs(np.sum(_data[2]-_refl[2])) < 1e-8) + assert np.fabs(np.sum(_data[2] - _refl[2])) < 1e-8 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 # Cleanup cleanup_partial_files(output_dir, range(198409, 198417)) @@ -190,12 +186,12 @@ def test_reduce_functional_bck(nexus_dir, template_dir): def test_reduce_bck_option_mismatch(nexus_dir): """ - Ask for functional background but pass by a background range with - only a single region. This will revert to simple averaging over the range. + Ask for functional background but pass by a background range with + only a single region. This will revert to simple averaging over the range. """ - template_path = 'data/template.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt') + template_path = "data/template.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_198409_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) @@ -206,10 +202,9 @@ def test_reduce_bck_option_mismatch(nexus_dir): template_data = template.read_template(template_path, sequence_number) template_data.background_roi = template_data.background_roi[:2] template_data.two_backgrounds = True - workflow.reduce(ws, template_data, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_data, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_rq.txt' + reference_path = "data/reference_rq.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -217,11 +212,11 @@ def test_reduce_bck_option_mismatch(nexus_dir): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 # Cleanup cleanup_partial_files(output_dir, range(198409, 198417)) @@ -229,22 +224,21 @@ def test_reduce_bck_option_mismatch(nexus_dir): def test_reduce_workflow_with_overlap_avg(nexus_dir): """ - Test the complete working, but this time we average the point in the - overlap regions. + Test the complete working, but this time we average the point in the + overlap regions. """ - template_path = 'data/template.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_198409_combined_data_auto.txt') + template_path = "data/template.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_198409_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) for i in range(198409, 198417): with amend_config(data_dir=nexus_dir): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=True) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=True) - reference_path = 'data/reference_rq_avg.txt' + reference_path = "data/reference_rq_avg.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -252,11 +246,11 @@ def test_reduce_workflow_with_overlap_avg(nexus_dir): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 # Cleanup cleanup_partial_files(output_dir, range(198409, 198417)) @@ -264,38 +258,37 @@ def test_reduce_workflow_with_overlap_avg(nexus_dir): def test_quick_reduce(nexus_dir): """ - Test the quick reduction workflow + Test the quick reduction workflow """ with amend_config(data_dir=nexus_dir): ws = mtd_api.Load("REF_L_201284") ws_db = mtd_api.Load("REF_L_201045") _refl = workflow.reduce_explorer(ws, ws_db, center_pixel=145, db_center_pixel=145) - reference_path = 'data/reference_r201284_quick.txt' + reference_path = "data/reference_r201284_quick.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 def test_reduce_workflow_201282(nexus_dir): """ - Test to reproduce autoreduction output + Test to reproduce autoreduction output """ - template_path = 'data/template_201282.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_201282_combined_data_auto.txt') + template_path = "data/template_201282.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_201282_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) for i in range(201282, 201289): with amend_config(data_dir=nexus_dir): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_rq_201282.txt' + reference_path = "data/reference_rq_201282.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -303,30 +296,29 @@ def test_reduce_workflow_201282(nexus_dir): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 def test_background_subtraction(nexus_dir): """ - Test with background subtraction off for the data and on for the normalization + Test with background subtraction off for the data and on for the normalization """ - template_path = 'data/template_short_nobck.xml' - output_dir = 'data/' - reduced_path = os.path.join(output_dir, 'REFL_198382_combined_data_auto.txt') + template_path = "data/template_short_nobck.xml" + output_dir = "data/" + reduced_path = os.path.join(output_dir, "REFL_198382_combined_data_auto.txt") if os.path.isfile(reduced_path): os.remove(reduced_path) for i in range(198388, 198390): with amend_config(data_dir=nexus_dir): ws = mtd_api.Load("REF_L_%s" % i) - workflow.reduce(ws, template_path, output_dir=output_dir, - average_overlap=False) + workflow.reduce(ws, template_path, output_dir=output_dir, average_overlap=False) - reference_path = 'data/reference_short_nobck.txt' + reference_path = "data/reference_short_nobck.txt" if os.path.isfile(reference_path): _data = np.loadtxt(reference_path).T @@ -334,10 +326,10 @@ def test_background_subtraction(nexus_dir): _refl = np.loadtxt(reduced_path).T for i in range(3): - assert(np.fabs(np.sum(_data[i]-_refl[i])) < 1e-10) + assert np.fabs(np.sum(_data[i] - _refl[i])) < 1e-10 # The reference was computed with a constant dq/q but our approach recalculates # it for each run, so we expect a small discrepancy within 1%. - assert(np.sum((_data[3]-_refl[3])/_refl[3])/len(_refl[3]) < 0.01) + assert np.sum((_data[3] - _refl[3]) / _refl[3]) / len(_refl[3]) < 0.01 cleanup_partial_files(output_dir, range(198382, 198390))