diff --git a/tests/unit_tests/steps/__init__.py b/tests/unit_tests/steps/__init__.py index 83eff9b6..409faf66 100644 --- a/tests/unit_tests/steps/__init__.py +++ b/tests/unit_tests/steps/__init__.py @@ -1,5 +1,5 @@ -# ############################################################################## +# ############################################################################ # (c) Crown copyright Met Office. All rights reserved. # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution -# ############################################################################## +# ############################################################################ diff --git a/tests/unit_tests/steps/test_analyse.py b/tests/unit_tests/steps/test_analyse.py index 399bedb1..dcc81359 100644 --- a/tests/unit_tests/steps/test_analyse.py +++ b/tests/unit_tests/steps/test_analyse.py @@ -97,8 +97,12 @@ class Test_gen_symbol_table(object): @pytest.fixture def analysed_files(self): - return [AnalysedDependent(fpath=Path('foo.c'), symbol_defs=['foo_1', 'foo_2'], file_hash=0), - AnalysedDependent(fpath=Path('bar.c'), symbol_defs=['bar_1', 'bar_2'], file_hash=0)] + return [AnalysedDependent(fpath=Path('foo.c'), + symbol_defs=['foo_1', 'foo_2'], + file_hash=0), + AnalysedDependent(fpath=Path('bar.c'), + symbol_defs=['bar_1', 'bar_2'], + file_hash=0)] def test_vanilla(self, analysed_files): result = _gen_symbol_table(analysed_files=analysed_files) @@ -139,12 +143,17 @@ def test_vanilla(self): analysed_files = [ mock.Mock( - spec=AnalysedDependent, fpath=my_file, symbol_deps={'my_func', 'dep1_mod', 'dep2'}, file_deps=set()), + spec=AnalysedDependent, + fpath=my_file, + symbol_deps={'my_func', 'dep1_mod', 'dep2'}, + file_deps=set()), ] _gen_file_deps(analysed_files=analysed_files, symbols=symbols) - assert analysed_files[0].file_deps == {symbols['dep1_mod'], symbols['dep2']} + assert analysed_files[0].file_deps == { + symbols['dep1_mod'], symbols['dep2'] + } # todo: this is fortran-ey, move it? @@ -167,19 +176,27 @@ def test_vanilla(self): Path('root_dep.f90'): AnalysedFortran(fpath=Path(), file_hash=0), } - # we want to force this symbol into the build (because it's not used via modules) + # we want to force this symbol into the build (because it's not used + # via modules) unreferenced_deps = ['util'] # the stuff to add to the build tree will be found in here all_analysed_files = { - # root.f90 and root_util.f90 would also be in here but the test doesn't need them - Path('util.f90'): AnalysedFortran(fpath=Path('util.f90'), file_deps={Path('util_dep.f90')}, file_hash=0), - Path('util_dep.f90'): AnalysedFortran(fpath=Path('util_dep.f90'), file_hash=0), + # root.f90 and root_util.f90 would also be in here but the test + # doesn't need them + Path('util.f90'): AnalysedFortran(fpath=Path('util.f90'), + file_deps={Path('util_dep.f90')}, + file_hash=0), + Path('util_dep.f90'): AnalysedFortran(fpath=Path('util_dep.f90'), + file_hash=0), } _add_unreferenced_deps( unreferenced_deps=unreferenced_deps, - symbol_table=symbol_table, all_analysed_files=all_analysed_files, build_tree=build_tree) + symbol_table=symbol_table, + all_analysed_files=all_analysed_files, + build_tree=build_tree + ) assert Path('util.f90') in build_tree assert Path('util_dep.f90') in build_tree @@ -192,33 +209,58 @@ def test_vanilla(self): class Test_parse_files(object): - # todo: test the correct artefacts are marked as current for the cleanup step + # todo: test the correct artefacts are marked as current for the cleanup + # step. # todo: this method should be tested a bit more thoroughly def test_exceptions(self, tmp_path): # make sure parse exceptions do not stop the build - with mock.patch('fab.steps.run_mp', return_value=[(Exception('foo'), None)]), \ - pytest.warns(UserWarning, match="deprecated 'DEPENDS ON:'"): - # The warning "deprecated 'DEPENDS ON:' comment found in fortran code" - # is in "def _parse_files" in "source/steps/analyse.py" - config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) - - # the exception should be suppressed (and logged) and this step should run to completion - _parse_files(config, files=[], fortran_analyser=mock.Mock(), c_analyser=mock.Mock()) + with mock.patch( + 'fab.steps.run_mp', + return_value=[(Exception('foo'), None)] + ), pytest.warns( + UserWarning, + match="deprecated 'DEPENDS ON:'" + ): + # The warning "deprecated 'DEPENDS ON:' comment found in fortran + # code" is in "def _parse_files" in "source/steps/analyse.py" + config = BuildConfig('proj', + ToolBox(), + fab_workspace=tmp_path) + + # the exception should be suppressed (and logged) and this step + # should run to completion + _parse_files(config, + files=[], + fortran_analyser=mock.Mock(), + c_analyser=mock.Mock()) class Test_add_manual_results(object): - # test user-specified analysis results, for when fparser fails to parse a valid file. + # test user-specified analysis results, for when fparser fails to parse a + # valid file. def test_vanilla(self): # test normal usage of manual analysis results - workaround = FortranParserWorkaround(fpath=Path('foo.f'), symbol_defs={'foo', }) + workaround = FortranParserWorkaround(fpath=Path('foo.f'), + symbol_defs={'foo', }) analysed_files = set() - with mock.patch('fab.parse.fortran.file_checksum', return_value=HashedFile(None, 123)), \ - pytest.warns(UserWarning, match="SPECIAL MEASURE: injecting user-defined analysis results"): - # This warning "UserWarning: SPECIAL MEASURE: injecting user-defined analysis results" - # is in "def _add_manual_results" in "source/steps/analyse.py" - _add_manual_results(special_measure_analysis_results=[workaround], analysed_files=analysed_files) - - assert analysed_files == {AnalysedFortran(fpath=Path('foo.f'), file_hash=123, symbol_defs={'foo', })} + with mock.patch( + 'fab.parse.fortran.file_checksum', + return_value=HashedFile(None, 123) + ), pytest.warns( + UserWarning, + match="SPECIAL MEASURE: injecting user-defined analysis results" + ): + # This warning "UserWarning: SPECIAL MEASURE: injecting + # user-defined analysis results" is in "def _add_manual_results" + # in "source/steps/analyse.py" + _add_manual_results(special_measure_analysis_results=[workaround], + analysed_files=analysed_files) + + assert analysed_files == { + AnalysedFortran(fpath=Path('foo.f'), + file_hash=123, + symbol_defs={'foo', }) + } diff --git a/tests/unit_tests/steps/test_archive_objects.py b/tests/unit_tests/steps/test_archive_objects.py index 805459e3..06d39369 100644 --- a/tests/unit_tests/steps/test_archive_objects.py +++ b/tests/unit_tests/steps/test_archive_objects.py @@ -28,8 +28,10 @@ def test_for_exes(self): targets = ['prog1', 'prog2'] config = BuildConfig('proj', ToolBox()) - config._artefact_store = {OBJECT_FILES: {target: [f'{target}.o', 'util.o'] - for target in targets}} + config._artefact_store = { + OBJECT_FILES: {target: [f'{target}.o', 'util.o'] + for target in targets} + } mock_result = mock.Mock(returncode=0, return_value=123) with mock.patch('fab.tools.tool.subprocess.run', @@ -49,7 +51,10 @@ def test_for_exes(self): # ensure the correct artefacts were created assert config.artefact_store[OBJECT_ARCHIVES] == { - target: [str(config.build_output / f'{target}.a')] for target in targets} + target: [ + str(config.build_output / f'{target}.a') + ] for target in targets + } def test_for_library(self): '''As used when building an object archive or archiving before linking @@ -57,18 +62,32 @@ def test_for_library(self): ''' config = BuildConfig('proj', ToolBox()) - config._artefact_store = {OBJECT_FILES: {None: ['util1.o', 'util2.o']}} + config._artefact_store = { + OBJECT_FILES: {None: ['util1.o', 'util2.o']} + } mock_result = mock.Mock(returncode=0, return_value=123) - with mock.patch('fab.tools.tool.subprocess.run', - return_value=mock_result) as mock_run_command, \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): - archive_objects(config=config, output_fpath=config.build_output / 'mylib.a') + with mock.patch( + 'fab.tools.tool.subprocess.run', + return_value=mock_result + ) as mock_run_command, pytest.warns( + UserWarning, + match="_metric_send_conn not set, cannot send metrics" + ): + archive_objects(config=config, + output_fpath=config.build_output / 'mylib.a') # ensure the correct command line calls were made - mock_run_command.assert_called_once_with([ - 'ar', 'cr', str(config.build_output / 'mylib.a'), 'util1.o', 'util2.o'], - capture_output=True, env=None, cwd=None, check=False) + mock_run_command.assert_called_once_with( + [ + 'ar', + 'cr', + str(config.build_output / 'mylib.a'), + 'util1.o', + 'util2.o' + ], + capture_output=True, env=None, cwd=None, check=False + ) # ensure the correct artefacts were created assert config.artefact_store[OBJECT_ARCHIVES] == { diff --git a/tests/unit_tests/steps/test_cleanup_prebuilds.py b/tests/unit_tests/steps/test_cleanup_prebuilds.py index 99a26952..1d4f5720 100644 --- a/tests/unit_tests/steps/test_cleanup_prebuilds.py +++ b/tests/unit_tests/steps/test_cleanup_prebuilds.py @@ -11,17 +11,31 @@ import pytest from fab.constants import CURRENT_PREBUILDS -from fab.steps.cleanup_prebuilds import by_age, by_version_age, cleanup_prebuilds, remove_all_unused +from fab.steps.cleanup_prebuilds import (by_age, + by_version_age, + cleanup_prebuilds, + remove_all_unused) from fab.util import get_prebuild_file_groups class TestCleanupPrebuilds(object): def test_init_no_args(self): - with mock.patch('fab.steps.cleanup_prebuilds.file_walk', return_value=[Path('foo.o')]), \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): - with mock.patch('fab.steps.cleanup_prebuilds.remove_all_unused') as mock_remove_all_unused: - cleanup_prebuilds(config=mock.Mock(artefact_store={CURRENT_PREBUILDS: [Path('bar.o')]})) + with mock.patch( + 'fab.steps.cleanup_prebuilds.file_walk', + return_value=[Path('foo.o')] + ), pytest.warns( + UserWarning, + match="_metric_send_conn not set, cannot send metrics" + ): + with mock.patch( + 'fab.steps.cleanup_prebuilds.remove_all_unused' + ) as mock_remove_all_unused: + cleanup_prebuilds( + config=mock.Mock( + artefact_store={CURRENT_PREBUILDS: [Path('bar.o')]} + ) + ) mock_remove_all_unused.assert_called_once_with(found_files=[Path('foo.o')], current_files=[Path('bar.o')]) def test_init_bad_args(self): @@ -34,7 +48,9 @@ def test_by_age(self): Path('foo.234.o'): datetime(2022, 10, 1), } - result = by_age(older_than=timedelta(days=15), prebuilds_ts=prebuilds_ts, current_files=[]) + result = by_age(older_than=timedelta(days=15), + prebuilds_ts=prebuilds_ts, + current_files=[]) assert result == {Path('foo.234.o'), } def test_by_age_current(self): @@ -44,7 +60,9 @@ def test_by_age_current(self): Path('foo.234.o'): datetime(2022, 10, 1), } - result = by_age(older_than=timedelta(days=15), prebuilds_ts=prebuilds_ts, current_files=prebuilds_ts.keys()) + result = by_age(older_than=timedelta(days=15), + prebuilds_ts=prebuilds_ts, + current_files=prebuilds_ts.keys()) assert result == set() def test_by_version_age(self): @@ -53,7 +71,9 @@ def test_by_version_age(self): Path('foo.234.o'): datetime(2022, 10, 1), } - result = by_version_age(n_versions=1, prebuilds_ts=prebuilds_ts, current_files=[]) + result = by_version_age(n_versions=1, + prebuilds_ts=prebuilds_ts, + current_files=[]) assert result == {Path('foo.234.o'), } def test_by_version_age_current(self): @@ -63,7 +83,9 @@ def test_by_version_age_current(self): Path('foo.234.o'): datetime(2022, 10, 1), } - result = by_version_age(n_versions=1, prebuilds_ts=prebuilds_ts, current_files=prebuilds_ts.keys()) + result = by_version_age(n_versions=1, + prebuilds_ts=prebuilds_ts, + current_files=prebuilds_ts.keys()) assert result == set() @@ -81,7 +103,8 @@ def test_remove_all_unused(): Path('eric.1943.o'), ] - # using autospec makes our mock recieve the self param, which we want to check + # using autospec makes our mock recieve the self param, which we want to + # check with mock.patch('os.remove', autospec=True) as mock_remove: num_removed = remove_all_unused(found_files, current_files) diff --git a/tests/unit_tests/steps/test_compile_c.py b/tests/unit_tests/steps/test_compile_c.py index 93419b41..ff3b909d 100644 --- a/tests/unit_tests/steps/test_compile_c.py +++ b/tests/unit_tests/steps/test_compile_c.py @@ -29,8 +29,13 @@ def fixture_content(tmp_path, tool_box): config = BuildConfig('proj', tool_box, multiprocessing=False, fab_workspace=tmp_path) - analysed_file = AnalysedC(fpath=Path(f'{config.source_root}/foo.c'), file_hash=0) - config._artefact_store[BUILD_TREES] = {None: {analysed_file.fpath: analysed_file}} + analysed_file = AnalysedC(fpath=Path(f'{config.source_root}/foo.c'), + file_hash=0) + config._artefact_store[BUILD_TREES] = { + None: { + analysed_file.fpath: analysed_file + } + } expect_hash = 7435424994 return config, analysed_file, expect_hash @@ -69,12 +74,20 @@ def test_vanilla(self, content): # run the step with mock.patch("fab.steps.compile_c.send_metric") as send_metric: with mock.patch('pathlib.Path.mkdir'): - with mock.patch.dict(os.environ, {'CFLAGS': '-Denv_flag'}), \ - pytest.warns(UserWarning, match="_metric_send_conn not set, " - "cannot send metrics"): + with mock.patch.dict( + os.environ, + {'CFLAGS': '-Denv_flag'} + ), pytest.warns( + UserWarning, + match="_metric_send_conn not set, cannot send metrics" + ): compile_c(config=config, - path_flags=[AddFlags(match='$source/*', - flags=['-I', 'foo/include', '-Dhello'])]) + path_flags=[ + AddFlags( + match='$source/*', + flags=['-I', 'foo/include', '-Dhello'] + ) + ]) # ensure it made the correct command-line call from the child process compiler.run.assert_called_with( @@ -100,7 +113,9 @@ def test_exception_handling(self, content): # mock the run command to raise an exception with pytest.raises(RuntimeError): with mock.patch.object(compiler, "run", side_effect=Exception): - with mock.patch('fab.steps.compile_c.send_metric') as mock_send_metric: + with mock.patch( + 'fab.steps.compile_c.send_metric' + ) as mock_send_metric: with mock.patch('pathlib.Path.mkdir'): compile_c(config=config) diff --git a/tests/unit_tests/steps/test_compile_fortran.py b/tests/unit_tests/steps/test_compile_fortran.py index 64b3cd13..251c41a4 100644 --- a/tests/unit_tests/steps/test_compile_fortran.py +++ b/tests/unit_tests/steps/test_compile_fortran.py @@ -194,11 +194,14 @@ def fixture_content(tool_box): class TestProcessFile: - # Developer's note: If the "mods combo hash" changes you'll get an unhelpful message from pytest. + # Developer's note: If the "mods combo hash" changes you'll get an + # unhelpful message from pytest. # It'll come from this function but pytest won't tell you that. - # You'll have to set a breakpoint here to see the changed hash in calls to mock_copy. + # You'll have to set a breakpoint here to see the changed hash in calls to + # mock_copy. def ensure_mods_stored(self, mock_copy, mods_combo_hash): - # Make sure the newly created mod files were copied TO the prebuilds folder. + # Make sure the newly created mod files were copied TO the prebuilds + # folder. output_path = Path('/fab/proj/build_output') prebuild_path = output_path / '_prebuild' mock_copy.assert_has_calls( @@ -212,14 +215,15 @@ def ensure_mods_stored(self, mock_copy, mods_combo_hash): ) def ensure_mods_restored(self, mock_copy, mods_combo_hash): - # make sure previously built mod files were copied FROM the prebuilds folder + # make sure previously built mod files were copied FROM the prebuilds + # folder output_path = Path('/fab/proj/build_output') prebuild_path = output_path / '_prebuild' mock_copy.assert_has_calls( calls=[ - call(prebuild_path/ f'mod_def_1.{mods_combo_hash}.mod', + call(prebuild_path / f'mod_def_1.{mods_combo_hash}.mod', output_path / 'mod_def_1.mod'), - call(prebuild_path/ f'mod_def_2.{mods_combo_hash}.mod', + call(prebuild_path / f'mod_def_2.{mods_combo_hash}.mod', output_path / 'mod_def_2.mod'), ], any_order=True, @@ -283,8 +287,10 @@ def test_with_prebuild(self, content): obj_combo_hash, mods_combo_hash) = content - with mock.patch('pathlib.Path.exists', - return_value=True): # mod def files and obj file all exist + with mock.patch( + 'pathlib.Path.exists', + return_value=True + ): # mod def files and obj file all exist with mock.patch( 'fab.steps.compile_fortran.compile_file' ) as mock_compile_file: @@ -313,9 +319,11 @@ def test_with_prebuild(self, content): } def test_file_hash(self, content): - # Changing the source hash must change the combo hash for the mods and obj. - # Note: This test adds 1 to the analysed files hash. We're using checksums so - # the resulting object file and mod file combo hashes can be expected to increase by 1 too. + # Changing the source hash must change the combo hash for the mods and + # obj. + # Note: This test adds 1 to the analysed files hash. We're using + # checksums so the resulting object file and mod file combo hashes can + # be expected to increase by 1 too. (mp_common_args, flags, analysed_file, @@ -363,7 +371,8 @@ def test_file_hash(self, content): } def test_flags_hash(self, content): - # changing the flags must change the object combo hash, but not the mods combo hash + # changing the flags must change the object combo hash, but not the + # mods combo hash (mp_common_args, flags, analysed_file, @@ -410,7 +419,8 @@ def test_flags_hash(self, content): } def test_deps_hash(self, content): - # Changing the checksums of any mod dependency must change the object combo hash but not the mods combo hash. + # Changing the checksums of any mod dependency must change the object + # combo hash but not the mods combo hash. # Note the difference between mods we depend on and mods we define. # The mods we define are not affected by the mods we depend on. (mp_common_args, @@ -570,8 +580,10 @@ def test_mod_missing(self, content): obj_combo_hash, mods_combo_hash) = content - with mock.patch('pathlib.Path.exists', - side_effect=[False, True, True]): # one mod file missing + with mock.patch( + 'pathlib.Path.exists', + side_effect=[False, True, True] + ): # one mod file missing with mock.patch( 'fab.steps.compile_fortran.compile_file' ) as mock_compile_file: @@ -607,10 +619,16 @@ def test_mod_missing(self, content): def test_obj_missing(self, content): # the object file we define is not present, so we must recompile - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content + (mp_common_args, + flags, + analysed_file, + obj_combo_hash, + mods_combo_hash) = content - with mock.patch('pathlib.Path.exists', - side_effect=[True, True, False]): # object file missing + with mock.patch( + 'pathlib.Path.exists', + side_effect=[True, True, False] + ): # object file missing with mock.patch( 'fab.steps.compile_fortran.compile_file' ) as mock_compile_file: diff --git a/tests/unit_tests/steps/test_grab.py b/tests/unit_tests/steps/test_grab.py index 5cdee6de..febb65cf 100644 --- a/tests/unit_tests/steps/test_grab.py +++ b/tests/unit_tests/steps/test_grab.py @@ -64,7 +64,9 @@ def test_no_revision(self): UserWarning, match="_metric_send_conn not set, cannot send metrics" ): - fcm_export(config=mock_config, src=source_url, dst_label=dst_label) + fcm_export(config=mock_config, + src=source_url, + dst_label=dst_label) mock_run.assert_called_once_with( [