From 07ec96a02596e171d64ec22187cbf751d97f57a7 Mon Sep 17 00:00:00 2001 From: Joerg Henrichs Date: Thu, 4 Apr 2024 23:36:02 +1100 Subject: [PATCH 1/9] RSync to respect symlinks (#298) --- source/fab/steps/grab/__init__.py | 2 +- tests/unit_tests/steps/test_grab.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/source/fab/steps/grab/__init__.py b/source/fab/steps/grab/__init__.py index eed72413..ed4bcf91 100644 --- a/source/fab/steps/grab/__init__.py +++ b/source/fab/steps/grab/__init__.py @@ -24,5 +24,5 @@ def call_rsync(src: Union[str, Path], dst: Union[str, Path]): if not src.endswith('/'): src += '/' - command = ['rsync', '--times', '--stats', '-ru', src, str(dst)] + command = ['rsync', '--times', '--links', '--stats', '-ru', src, str(dst)] return run_command(command) diff --git a/tests/unit_tests/steps/test_grab.py b/tests/unit_tests/steps/test_grab.py index 57878b22..cb4292db 100644 --- a/tests/unit_tests/steps/test_grab.py +++ b/tests/unit_tests/steps/test_grab.py @@ -33,7 +33,8 @@ def _common(self, grab_src, expect_grab_src): grab_folder(mock_config, src=grab_src, dst_label=dst) expect_dst = mock_config.source_root / dst - mock_run.assert_called_once_with(['rsync', '--times', '--stats', '-ru', expect_grab_src, str(expect_dst)]) + mock_run.assert_called_once_with(['rsync', '--times', '--links', '--stats', + '-ru', expect_grab_src, str(expect_dst)]) class TestGrabFcm(object): From 4649173903997440b7546eb495732602d5e0a492 Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Wed, 17 Apr 2024 13:45:21 +0100 Subject: [PATCH 2/9] Migrated to PyData Sphinx theme (#299) * Migrated to PyData Sphinx theme and fixed API generation. * Downgrade theme version to one which is available. * Bump depricated actions in workflow. * Prefer a more modern flake8. * Downgrade flake8 to pre v7.0.0. * Another go at guessing the correct flake8 version. --- .github/workflows/build.yml | 4 +- docs/source/_templates/crown-copyright.html | 11 +++++ docs/source/api.rst | 10 +++++ docs/source/conf.py | 50 +++++++++++---------- docs/source/index.rst | 2 +- pyproject.toml | 7 ++- 6 files changed, 55 insertions(+), 29 deletions(-) create mode 100644 docs/source/_templates/crown-copyright.html create mode 100644 docs/source/api.rst diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ded9d856..936c7bd9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,9 +12,9 @@ jobs: python-version: ['3.7', '3.8', '3.9', '3.10'] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Setup Compiler diff --git a/docs/source/_templates/crown-copyright.html b/docs/source/_templates/crown-copyright.html new file mode 100644 index 00000000..44657219 --- /dev/null +++ b/docs/source/_templates/crown-copyright.html @@ -0,0 +1,11 @@ +{# Crown copyright is displayed differently to normal. #} +{# Configured from conf.py as per usual. #} +{% if show_copyright and copyright %} + +{% endif %} diff --git a/docs/source/api.rst b/docs/source/api.rst new file mode 100644 index 00000000..6a5a1bbd --- /dev/null +++ b/docs/source/api.rst @@ -0,0 +1,10 @@ +API Documentation +================= + +This API documentation is generated from comments within the source code. + +.. autosummary:: + :toctree: api + :recursive: + + fab diff --git a/docs/source/conf.py b/docs/source/conf.py index 68762f24..d036262f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,8 +17,8 @@ # -- Project information ----------------------------------------------------- -project = 'fab' -copyright = '2023' +project = 'Fab' +copyright = '2024 Met Office. All rights reserved.' author = 'Fab Team' # The full version, including alpha/beta/rc tags @@ -52,24 +52,35 @@ exclude_patterns = [] -# -- Options for HTML output ------------------------------------------------- +# -- Autodoc ----------------------------------------------------------------- -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# +autodoc_default_options = { + 'members': True, + 'show-inheritane': True +} -# ugly -# html_theme = "classic" +autoclass_content = 'both' -# poor contrast between title, h1 & h2 -# html_theme = "sphinxdoc" -# html_theme = "sphinx_rtd_theme" -# html_theme = 'python_docs_theme' -# good contrast between title, h1 & h2 -# html_theme = 'alabaster' -html_theme = 'sphinx_material' +# -- Options for HTML output ------------------------------------------------- +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pydata_sphinx_theme' + +html_theme_options = { + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/metomi/fab", + "icon": "fa-brands fa-github" + } + ], + "footer_start": ["crown-copyright"], + "footer_center": ["sphinx-version"], + "footer_end": ["theme-version"], +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -93,12 +104,3 @@ # include default values in argument descriptions typehints_defaults = 'braces-after' - -# needed when using material theme -html_sidebars = { - "**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"] -} -# needed when not using material theme -# html_sidebars = { -# "**": ["globaltoc.html", "searchbox.html"] -# } diff --git a/docs/source/index.rst b/docs/source/index.rst index 0b5458c7..fc03936e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -81,7 +81,7 @@ See also writing_config advanced_config features - Api Reference + Api Reference development glossary genindex diff --git a/pyproject.toml b/pyproject.toml index 9e242d43..76424ead 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,8 +22,11 @@ classifiers = [ c-language = ['python-clang'] plots = ['matplotlib'] tests = ['pytest', 'pytest-cov', 'pytest-mock'] -checks = ['flake8', 'mypy'] -docs = ['sphinx', 'sphinx-material', 'sphinx-autodoc-typehints', 'sphinx-copybutton'] +checks = ['flake8>=5.0.4', 'mypy'] +docs = ['sphinx', + 'pydata-sphinx-theme>=0.13.3', + 'sphinx-autodoc-typehints', + 'sphinx-copybutton'] dev = ['sci-fab[plots, tests, checks, docs]'] [project.scripts] From 4eb9a4d3380013ab31163a704bfceb6ec9f7c281 Mon Sep 17 00:00:00 2001 From: Joerg Henrichs Date: Thu, 25 Apr 2024 17:30:33 +1000 Subject: [PATCH 3/9] Create artefact store object (#301) --- docs/source/advanced_config.rst | 4 +- source/fab/artefacts.py | 24 ++++-- source/fab/build_config.py | 39 +++++----- source/fab/parse/fortran_common.py | 2 - source/fab/steps/analyse.py | 4 +- source/fab/steps/archive_objects.py | 4 +- source/fab/steps/c_pragma_injector.py | 4 +- source/fab/steps/cleanup_prebuilds.py | 8 +- source/fab/steps/compile_c.py | 4 +- source/fab/steps/compile_fortran.py | 4 +- source/fab/steps/find_source_files.py | 2 +- source/fab/steps/link.py | 6 +- source/fab/steps/preprocess.py | 6 +- source/fab/steps/psyclone.py | 6 +- source/fab/steps/root_inc_files.py | 2 +- .../CFortranInterop/test_CFortranInterop.py | 4 +- .../CUserHeader/test_CUserHeader.py | 4 +- .../test_FortranDependencies.py | 4 +- .../test_FortranPreProcess.py | 4 +- tests/system_tests/MinimalC/test_MinimalC.py | 4 +- .../MinimalFortran/test_MinimalFortran.py | 4 +- .../unit_tests/steps/test_archive_objects.py | 4 +- .../steps/test_cleanup_prebuilds.py | 2 +- tests/unit_tests/steps/test_compile_c.py | 5 +- tests/unit_tests/steps/test_link.py | 2 +- tests/unit_tests/steps/test_root_inc_files.py | 6 +- tests/unit_tests/test_artefacts.py | 74 ++++++++++++++----- tests/unit_tests/test_build_config.py | 4 +- 28 files changed, 147 insertions(+), 93 deletions(-) diff --git a/docs/source/advanced_config.rst b/docs/source/advanced_config.rst index 0e8a04d1..0babf822 100644 --- a/docs/source/advanced_config.rst +++ b/docs/source/advanced_config.rst @@ -315,7 +315,7 @@ which most Fab steps accept. (See :ref:`Overriding default collections`) @step def custom_step(state): - state._artefact_store['custom_artefacts'] = do_something(state._artefact_store['step 1 artefacts']) + state.artefact_store['custom_artefacts'] = do_something(state.artefact_store['step 1 artefacts']) with BuildConfig(project_label='') as state: @@ -332,7 +332,7 @@ Steps have access to multiprocessing methods through the @step def custom_step(state): - input_files = artefact_store['custom_artefacts'] + input_files = state.artefact_store['custom_artefacts'] results = run_mp(state, items=input_files, func=do_something) diff --git a/source/fab/artefacts.py b/source/fab/artefacts.py index 7953f947..0749e781 100644 --- a/source/fab/artefacts.py +++ b/source/fab/artefacts.py @@ -15,7 +15,7 @@ from pathlib import Path from typing import Iterable, Union, Dict, List -from fab.constants import BUILD_TREES +from fab.constants import BUILD_TREES, CURRENT_PREBUILDS from fab.dep_tree import filter_source_tree, AnalysedDependent from fab.util import suffix_filter @@ -32,7 +32,8 @@ def __call__(self, artefact_store): The artefact store from which to retrieve. """ - pass + raise NotImplementedError(f"__call__ must be implemented for " + f"'{type(self).__name__}'.") class CollectionGetter(ArtefactsGetter): @@ -53,7 +54,6 @@ def __init__(self, collection_name): self.collection_name = collection_name def __call__(self, artefact_store): - super().__call__(artefact_store) return artefact_store.get(self.collection_name, []) @@ -84,7 +84,6 @@ def __init__(self, collections: Iterable[Union[str, ArtefactsGetter]]): # todo: ensure the labelled values are iterables def __call__(self, artefact_store: Dict): - super().__call__(artefact_store) # todo: this should be a set, in case a file appears in multiple collections result = [] for collection in self.collections: @@ -118,7 +117,6 @@ def __init__(self, collection_name: str, suffix: Union[str, List[str]]): self.suffixes = [suffix] if isinstance(suffix, str) else suffix def __call__(self, artefact_store): - super().__call__(artefact_store) # todo: returning an empty list is probably "dishonest" if the collection doesn't exist - return None instead? fpaths: Iterable[Path] = artefact_store.get(self.collection_name, []) return suffix_filter(fpaths, self.suffixes) @@ -149,7 +147,6 @@ def __init__(self, suffix: Union[str, List[str]], collection_name: str = BUILD_T self.suffixes = [suffix] if isinstance(suffix, str) else suffix def __call__(self, artefact_store): - super().__call__(artefact_store) build_trees = artefact_store[self.collection_name] @@ -158,3 +155,18 @@ def __call__(self, artefact_store): build_lists[root] = filter_source_tree(source_tree=tree, suffixes=self.suffixes) return build_lists + + +class ArtefactStore(dict): + '''This object stores artefacts (which can be of any type). Each artefact + is indexed by a string. + ''' + def __init__(self): + super().__init__() + self.reset() + + def reset(self): + '''Clears the artefact store (but does not delete any files). + ''' + self.clear() + self[CURRENT_PREBUILDS] = set() diff --git a/source/fab/build_config.py b/source/fab/build_config.py index ecaa660a..bddb6708 100644 --- a/source/fab/build_config.py +++ b/source/fab/build_config.py @@ -18,16 +18,18 @@ from multiprocessing import cpu_count from pathlib import Path from string import Template -from typing import List, Optional, Dict, Any, Iterable +from typing import List, Optional, Iterable +from fab.artefacts import ArtefactStore from fab.constants import BUILD_OUTPUT, SOURCE_ROOT, PREBUILD, CURRENT_PREBUILDS from fab.metrics import send_metric, init_metrics, stop_metrics, metrics_summary +from fab.steps.cleanup_prebuilds import CLEANUP_COUNT, cleanup_prebuilds from fab.util import TimerLogger, by_type, get_fab_workspace logger = logging.getLogger(__name__) -class BuildConfig(object): +class BuildConfig(): """ Contains and runs a list of build steps. @@ -105,9 +107,10 @@ def __init__(self, project_label: str, multiprocessing: bool = True, n_procs: Op # todo: should probably pull the artefact store out of the config # runtime - # todo: either make this public, add get/setters, or extract into a class. - self._artefact_store: Dict[str, Any] = {} - self.init_artefact_store() # note: the artefact store is reset with every call to run() + self._artefact_store = ArtefactStore() + + self._build_timer = None + self._start_time = None def __enter__(self): @@ -130,8 +133,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): if not exc_type: # None if there's no error. - from fab.steps.cleanup_prebuilds import CLEANUP_COUNT, cleanup_prebuilds - if CLEANUP_COUNT not in self._artefact_store: + if CLEANUP_COUNT not in self.artefact_store: logger.info("no housekeeping step was run, using a default hard cleanup") cleanup_prebuilds(config=self, all_unused=True) @@ -142,19 +144,23 @@ def __exit__(self, exc_type, exc_val, exc_tb): self._finalise_logging() @property - def build_output(self): - return self.project_workspace / BUILD_OUTPUT + def artefact_store(self) -> ArtefactStore: + ''':returns: the Artefact instance for this configuration. + ''' + return self._artefact_store - def init_artefact_store(self): - # there's no point writing to this from a child process of Step.run_mp() because you'll be modifying a copy. - self._artefact_store = {CURRENT_PREBUILDS: set()} + @property + def build_output(self) -> Path: + ''':returns: the build output path. + ''' + return self.project_workspace / BUILD_OUTPUT def add_current_prebuilds(self, artefacts: Iterable[Path]): """ Mark the given file paths as being current prebuilds, not to be cleaned during housekeeping. """ - self._artefact_store[CURRENT_PREBUILDS].update(artefacts) + self.artefact_store[CURRENT_PREBUILDS].update(artefacts) def _run_prep(self): self._init_logging() @@ -168,7 +174,7 @@ def _run_prep(self): init_metrics(metrics_folder=self.metrics_folder) # note: initialising here gives a new set of artefacts each run - self.init_artefact_store() + self.artefact_store.reset() def _prep_folders(self): self.source_root.mkdir(parents=True, exist_ok=True) @@ -210,7 +216,7 @@ def _finalise_metrics(self, start_time, steps_timer): # todo: better name? perhaps PathFlags? -class AddFlags(object): +class AddFlags(): """ Add command-line flags when our path filter matches. Generally used inside a :class:`~fab.build_config.FlagsConfig`. @@ -265,14 +271,13 @@ def run(self, fpath: Path, input_flags: List[str], config): input_flags += add_flags -class FlagsConfig(object): +class FlagsConfig(): """ Return command-line flags for a given path. Simply allows appending flags but may evolve to also replace and remove flags. """ - def __init__(self, common_flags: Optional[List[str]] = None, path_flags: Optional[List[AddFlags]] = None): """ :param common_flags: diff --git a/source/fab/parse/fortran_common.py b/source/fab/parse/fortran_common.py index bc6aa972..f35c243b 100644 --- a/source/fab/parse/fortran_common.py +++ b/source/fab/parse/fortran_common.py @@ -134,8 +134,6 @@ def run(self, fpath: Path) \ # find things in the node tree analysed_file = self.walk_nodes(fpath=fpath, file_hash=file_hash, node_tree=node_tree) - - analysis_fpath = self._get_analysis_fpath(fpath, file_hash) analysed_file.save(analysis_fpath) return analysed_file, analysis_fpath diff --git a/source/fab/steps/analyse.py b/source/fab/steps/analyse.py index 84dc39b0..26c6cdc8 100644 --- a/source/fab/steps/analyse.py +++ b/source/fab/steps/analyse.py @@ -167,7 +167,7 @@ def analyse( c_analyser._config = config # parse - files: List[Path] = source_getter(config._artefact_store) + files: List[Path] = source_getter(config.artefact_store) analysed_files = _parse_files(config, files=files, fortran_analyser=fortran_analyser, c_analyser=c_analyser) _add_manual_results(special_measure_analysis_results, analysed_files) @@ -206,7 +206,7 @@ def analyse( _add_unreferenced_deps(unreferenced_deps, symbol_table, project_source_tree, build_tree) validate_dependencies(build_tree) - config._artefact_store[BUILD_TREES] = build_trees + config.artefact_store[BUILD_TREES] = build_trees def _analyse_dependencies(analysed_files: Iterable[AnalysedDependent]): diff --git a/source/fab/steps/archive_objects.py b/source/fab/steps/archive_objects.py index d308ee2d..c450af4b 100644 --- a/source/fab/steps/archive_objects.py +++ b/source/fab/steps/archive_objects.py @@ -95,14 +95,14 @@ def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = Non output_fpath = str(output_fpath) if output_fpath else None output_collection = output_collection - target_objects = source_getter(config._artefact_store) + target_objects = source_getter(config.artefact_store) assert target_objects.keys() if output_fpath and list(target_objects.keys()) != [None]: raise ValueError("You must not specify an output path (library) when there are root symbols (exes)") if not output_fpath and list(target_objects.keys()) == [None]: raise ValueError("You must specify an output path when building a library.") - output_archives = config._artefact_store.setdefault(output_collection, {}) + output_archives = config.artefact_store.setdefault(output_collection, {}) for root, objects in target_objects.items(): if root: diff --git a/source/fab/steps/c_pragma_injector.py b/source/fab/steps/c_pragma_injector.py index a79431c3..d30321d2 100644 --- a/source/fab/steps/c_pragma_injector.py +++ b/source/fab/steps/c_pragma_injector.py @@ -43,9 +43,9 @@ def c_pragma_injector(config, source: Optional[ArtefactsGetter] = None, output_n source_getter = source or DEFAULT_SOURCE_GETTER output_name = output_name or PRAGMAD_C - files = source_getter(config._artefact_store) + files = source_getter(config.artefact_store) results = run_mp(config, items=files, func=_process_artefact) - config._artefact_store[output_name] = list(results) + config.artefact_store[output_name] = list(results) def _process_artefact(fpath: Path): diff --git a/source/fab/steps/cleanup_prebuilds.py b/source/fab/steps/cleanup_prebuilds.py index e62120d9..8d1548b2 100644 --- a/source/fab/steps/cleanup_prebuilds.py +++ b/source/fab/steps/cleanup_prebuilds.py @@ -63,7 +63,7 @@ def cleanup_prebuilds( elif all_unused: num_removed = remove_all_unused( - found_files=prebuild_files, current_files=config._artefact_store[CURRENT_PREBUILDS]) + found_files=prebuild_files, current_files=config.artefact_store[CURRENT_PREBUILDS]) else: # get the file access time for every artefact @@ -71,15 +71,15 @@ def cleanup_prebuilds( dict(zip(prebuild_files, run_mp(config, prebuild_files, get_access_time))) # type: ignore # work out what to delete - to_delete = by_age(older_than, prebuilds_ts, current_files=config._artefact_store[CURRENT_PREBUILDS]) - to_delete |= by_version_age(n_versions, prebuilds_ts, current_files=config._artefact_store[CURRENT_PREBUILDS]) + to_delete = by_age(older_than, prebuilds_ts, current_files=config.artefact_store[CURRENT_PREBUILDS]) + to_delete |= by_version_age(n_versions, prebuilds_ts, current_files=config.artefact_store[CURRENT_PREBUILDS]) # delete them all run_mp(config, to_delete, os.remove) num_removed = len(to_delete) logger.info(f'removed {num_removed} prebuild files') - config._artefact_store[CLEANUP_COUNT] = num_removed + config.artefact_store[CLEANUP_COUNT] = num_removed def by_age(older_than: Optional[timedelta], diff --git a/source/fab/steps/compile_c.py b/source/fab/steps/compile_c.py index 09f1eee1..ba2be4dd 100644 --- a/source/fab/steps/compile_c.py +++ b/source/fab/steps/compile_c.py @@ -83,7 +83,7 @@ def compile_c(config, common_flags: Optional[List[str]] = None, source_getter = source or DEFAULT_SOURCE_GETTER # gather all the source to compile, for all build trees, into one big lump - build_lists: Dict = source_getter(config._artefact_store) + build_lists: Dict = source_getter(config.artefact_store) to_compile: list = sum(build_lists.values(), []) logger.info(f"compiling {len(to_compile)} c files") @@ -101,7 +101,7 @@ def compile_c(config, common_flags: Optional[List[str]] = None, config.add_current_prebuilds(prebuild_files) # record the compilation results for the next step - store_artefacts(compiled_c, build_lists, config._artefact_store) + store_artefacts(compiled_c, build_lists, config.artefact_store) # todo: very similar code in fortran compiler diff --git a/source/fab/steps/compile_fortran.py b/source/fab/steps/compile_fortran.py index f84e71fa..706e598e 100644 --- a/source/fab/steps/compile_fortran.py +++ b/source/fab/steps/compile_fortran.py @@ -84,7 +84,7 @@ def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = Non mod_hashes: Dict[str, int] = {} # get all the source to compile, for all build trees, into one big lump - build_lists: Dict[str, List] = source_getter(config._artefact_store) + build_lists: Dict[str, List] = source_getter(config.artefact_store) # build the arguments passed to the multiprocessing function mp_common_args = MpCommonArgs( @@ -119,7 +119,7 @@ def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = Non logger.info(f"stage 2 compiled {len(compiled_this_pass)} files") # record the compilation results for the next step - store_artefacts(compiled, build_lists, config._artefact_store) + store_artefacts(compiled, build_lists, config.artefact_store) def handle_compiler_args(common_flags=None, path_flags=None): diff --git a/source/fab/steps/find_source_files.py b/source/fab/steps/find_source_files.py index 0e417ccb..25191d5f 100644 --- a/source/fab/steps/find_source_files.py +++ b/source/fab/steps/find_source_files.py @@ -145,4 +145,4 @@ def find_source_files(config, source_root=None, output_collection="all_source", if not filtered_fpaths: raise RuntimeError("no source files found after filtering") - config._artefact_store[output_collection] = filtered_fpaths + config.artefact_store[output_collection] = filtered_fpaths diff --git a/source/fab/steps/link.py b/source/fab/steps/link.py index f44275ac..571ff6be 100644 --- a/source/fab/steps/link.py +++ b/source/fab/steps/link.py @@ -78,11 +78,11 @@ def link_exe(config, linker: Optional[str] = None, flags=None, source: Optional[ flags = flags or [] source_getter = source or DefaultLinkerSource() - target_objects = source_getter(config._artefact_store) + target_objects = source_getter(config.artefact_store) for root, objects in target_objects.items(): exe_path = config.project_workspace / f'{root}' call_linker(linker=linker, flags=flags, filename=str(exe_path), objects=objects) - config._artefact_store.setdefault(EXECUTABLES, []).append(exe_path) + config.artefact_store.setdefault(EXECUTABLES, []).append(exe_path) # todo: the bit about Dict[None, object_files] seems too obscure - try to rethink this. @@ -123,7 +123,7 @@ def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, flags.append(f) # We expect a single build target containing the whole codebase, with no name (as it's not a root symbol). - target_objects = source_getter(config._artefact_store) + target_objects = source_getter(config.artefact_store) assert list(target_objects.keys()) == [None] objects = target_objects[None] diff --git a/source/fab/steps/preprocess.py b/source/fab/steps/preprocess.py index 08d65949..ffc3d406 100644 --- a/source/fab/steps/preprocess.py +++ b/source/fab/steps/preprocess.py @@ -88,7 +88,7 @@ def pre_processor(config: BuildConfig, preprocessor: str, check_for_errors(results, caller_label=name) log_or_dot_finish(logger) - config._artefact_store[output_collection] = list(by_type(results, Path)) + config.artefact_store[output_collection] = list(by_type(results, Path)) def process_artefact(arg: Tuple[Path, MpCommonArgs]): @@ -192,7 +192,7 @@ def preprocess_fortran(config: BuildConfig, source: Optional[ArtefactsGetter] = """ source_getter = source or SuffixFilter('all_source', ['.F90', '.f90']) - source_files = source_getter(config._artefact_store) + source_files = source_getter(config.artefact_store) F90s = suffix_filter(source_files, '.F90') f90s = suffix_filter(source_files, '.f90') @@ -257,7 +257,7 @@ def preprocess_c(config: BuildConfig, source=None, **kwargs): """ source_getter = source or DefaultCPreprocessorSource() - source_files = source_getter(config._artefact_store) + source_files = source_getter(config.artefact_store) pre_processor( config, diff --git a/source/fab/steps/psyclone.py b/source/fab/steps/psyclone.py index eec88a04..d7b1cdba 100644 --- a/source/fab/steps/psyclone.py +++ b/source/fab/steps/psyclone.py @@ -50,7 +50,7 @@ def preprocess_x90(config, common_flags: Optional[List[str]] = None): if fpp_flag not in common_flags: common_flags.append(fpp_flag) - source_files = SuffixFilter('all_source', '.X90')(config._artefact_store) + source_files = SuffixFilter('all_source', '.X90')(config.artefact_store) pre_processor( config, @@ -132,7 +132,7 @@ def psyclone(config, kernel_roots: Optional[List[Path]] = None, source_getter = source_getter or DEFAULT_SOURCE_GETTER overrides_folder = overrides_folder - x90s = source_getter(config._artefact_store) + x90s = source_getter(config.artefact_store) # get the data for child processes to calculate prebuild hashes prebuild_analyses = _analysis_for_prebuilds(config, x90s, transformation_script, kernel_roots) @@ -153,7 +153,7 @@ def psyclone(config, kernel_roots: Optional[List[Path]] = None, prebuild_files: List[Path] = list(chain(*by_type(prebuilds, List))) # record the output files in the artefact store for further processing - config._artefact_store['psyclone_output'] = output_files + config.artefact_store['psyclone_output'] = output_files outputs_str = "\n".join(map(str, output_files)) logger.debug(f'psyclone outputs:\n{outputs_str}\n') diff --git a/source/fab/steps/root_inc_files.py b/source/fab/steps/root_inc_files.py index 6dbbc648..2bc9999a 100644 --- a/source/fab/steps/root_inc_files.py +++ b/source/fab/steps/root_inc_files.py @@ -47,7 +47,7 @@ def root_inc_files(config): # inc files all go in the root - they're going to be removed altogether, soon inc_copied = set() - for fpath in suffix_filter(config._artefact_store["all_source"], [".inc"]): + for fpath in suffix_filter(config.artefact_store["all_source"], [".inc"]): # don't copy from the output root to the output root! # this is currently unlikely to happen but did in the past, and caused problems. diff --git a/tests/system_tests/CFortranInterop/test_CFortranInterop.py b/tests/system_tests/CFortranInterop/test_CFortranInterop.py index 483b6968..ec708e68 100644 --- a/tests/system_tests/CFortranInterop/test_CFortranInterop.py +++ b/tests/system_tests/CFortranInterop/test_CFortranInterop.py @@ -46,10 +46,10 @@ def test_CFortranInterop(tmp_path): # '/lib/x86_64-linux-gnu/libgfortran.so.5', # ] - assert len(config._artefact_store[EXECUTABLES]) == 1 + assert len(config.artefact_store[EXECUTABLES]) == 1 # run - command = [str(config._artefact_store[EXECUTABLES][0])] + command = [str(config.artefact_store[EXECUTABLES][0])] res = subprocess.run(command, capture_output=True) output = res.stdout.decode() assert output == ''.join(open(PROJECT_SOURCE / 'expected.exec.txt').readlines()) diff --git a/tests/system_tests/CUserHeader/test_CUserHeader.py b/tests/system_tests/CUserHeader/test_CUserHeader.py index d8ae6772..04dac386 100644 --- a/tests/system_tests/CUserHeader/test_CUserHeader.py +++ b/tests/system_tests/CUserHeader/test_CUserHeader.py @@ -35,10 +35,10 @@ def test_CUseHeader(tmp_path): link_exe(config, linker='gcc', flags=['-lgfortran']), - assert len(config._artefact_store[EXECUTABLES]) == 1 + assert len(config.artefact_store[EXECUTABLES]) == 1 # run - command = [str(config._artefact_store[EXECUTABLES][0])] + command = [str(config.artefact_store[EXECUTABLES][0])] res = subprocess.run(command, capture_output=True) output = res.stdout.decode() assert output == ''.join(open(PROJECT_SOURCE / 'expected.exec.txt').readlines()) diff --git a/tests/system_tests/FortranDependencies/test_FortranDependencies.py b/tests/system_tests/FortranDependencies/test_FortranDependencies.py index 6971bf83..e5d22f2b 100644 --- a/tests/system_tests/FortranDependencies/test_FortranDependencies.py +++ b/tests/system_tests/FortranDependencies/test_FortranDependencies.py @@ -33,11 +33,11 @@ def test_FortranDependencies(tmp_path): compile_fortran(config, common_flags=['-c']), link_exe(config, linker='gcc', flags=['-lgfortran']), - assert len(config._artefact_store[EXECUTABLES]) == 2 + assert len(config.artefact_store[EXECUTABLES]) == 2 # run both exes output = set() - for exe in config._artefact_store[EXECUTABLES]: + for exe in config.artefact_store[EXECUTABLES]: res = subprocess.run(str(exe), capture_output=True) output.add(res.stdout.decode()) diff --git a/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py b/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py index f45ea74c..0888f536 100644 --- a/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py +++ b/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py @@ -36,13 +36,13 @@ def test_FortranPreProcess(tmp_path): # stay stay_config = build(fab_workspace=tmp_path, fpp_flags=['-P', '-DSHOULD_I_STAY=yes']) - stay_exe = stay_config._artefact_store[EXECUTABLES][0] + stay_exe = stay_config.artefact_store[EXECUTABLES][0] stay_res = subprocess.run(str(stay_exe), capture_output=True) assert stay_res.stdout.decode().strip() == 'I should stay' # go go_config = build(fab_workspace=tmp_path, fpp_flags=['-P']) - go_exe = go_config._artefact_store[EXECUTABLES][0] + go_exe = go_config.artefact_store[EXECUTABLES][0] go_res = subprocess.run(str(go_exe), capture_output=True) assert go_res.stdout.decode().strip() == 'I should go now' diff --git a/tests/system_tests/MinimalC/test_MinimalC.py b/tests/system_tests/MinimalC/test_MinimalC.py index aa99bb1b..36a32b0b 100644 --- a/tests/system_tests/MinimalC/test_MinimalC.py +++ b/tests/system_tests/MinimalC/test_MinimalC.py @@ -34,10 +34,10 @@ def test_MinimalC(tmp_path): link_exe(config, linker='gcc'), - assert len(config._artefact_store[EXECUTABLES]) == 1 + assert len(config.artefact_store[EXECUTABLES]) == 1 # run - command = [str(config._artefact_store[EXECUTABLES][0])] + command = [str(config.artefact_store[EXECUTABLES][0])] res = subprocess.run(command, capture_output=True) output = res.stdout.decode() assert output == 'Hello world!' diff --git a/tests/system_tests/MinimalFortran/test_MinimalFortran.py b/tests/system_tests/MinimalFortran/test_MinimalFortran.py index 6dd7615f..455755cd 100644 --- a/tests/system_tests/MinimalFortran/test_MinimalFortran.py +++ b/tests/system_tests/MinimalFortran/test_MinimalFortran.py @@ -32,10 +32,10 @@ def test_MinimalFortran(tmp_path): compile_fortran(config, common_flags=['-c']), link_exe(config, linker='gcc', flags=['-lgfortran']), - assert len(config._artefact_store[EXECUTABLES]) == 1 + assert len(config.artefact_store[EXECUTABLES]) == 1 # run - command = [str(config._artefact_store[EXECUTABLES][0])] + command = [str(config.artefact_store[EXECUTABLES][0])] res = subprocess.run(command, capture_output=True) output = res.stdout.decode() assert output.strip() == 'Hello world!' diff --git a/tests/unit_tests/steps/test_archive_objects.py b/tests/unit_tests/steps/test_archive_objects.py index 0600d85c..583e4975 100644 --- a/tests/unit_tests/steps/test_archive_objects.py +++ b/tests/unit_tests/steps/test_archive_objects.py @@ -29,7 +29,7 @@ def test_for_exes(self): mock_run_command.assert_has_calls(expected_calls) # ensure the correct artefacts were created - assert config._artefact_store[OBJECT_ARCHIVES] == { + assert config.artefact_store[OBJECT_ARCHIVES] == { target: [str(config.build_output / f'{target}.a')] for target in targets} def test_for_library(self): @@ -48,5 +48,5 @@ def test_for_library(self): 'ar', 'cr', str(config.build_output / 'mylib.a'), 'util1.o', 'util2.o']) # ensure the correct artefacts were created - assert config._artefact_store[OBJECT_ARCHIVES] == { + assert config.artefact_store[OBJECT_ARCHIVES] == { None: [str(config.build_output / 'mylib.a')]} diff --git a/tests/unit_tests/steps/test_cleanup_prebuilds.py b/tests/unit_tests/steps/test_cleanup_prebuilds.py index ec15acc7..99a26952 100644 --- a/tests/unit_tests/steps/test_cleanup_prebuilds.py +++ b/tests/unit_tests/steps/test_cleanup_prebuilds.py @@ -21,7 +21,7 @@ def test_init_no_args(self): with mock.patch('fab.steps.cleanup_prebuilds.file_walk', return_value=[Path('foo.o')]), \ pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): with mock.patch('fab.steps.cleanup_prebuilds.remove_all_unused') as mock_remove_all_unused: - cleanup_prebuilds(config=mock.Mock(_artefact_store={CURRENT_PREBUILDS: [Path('bar.o')]})) + cleanup_prebuilds(config=mock.Mock(artefact_store={CURRENT_PREBUILDS: [Path('bar.o')]})) mock_remove_all_unused.assert_called_once_with(found_files=[Path('foo.o')], current_files=[Path('bar.o')]) def test_init_bad_args(self): diff --git a/tests/unit_tests/steps/test_compile_c.py b/tests/unit_tests/steps/test_compile_c.py index 13f20223..9a58d990 100644 --- a/tests/unit_tests/steps/test_compile_c.py +++ b/tests/unit_tests/steps/test_compile_c.py @@ -14,10 +14,9 @@ @pytest.fixture def content(tmp_path): config = BuildConfig('proj', multiprocessing=False, fab_workspace=tmp_path) - config.init_artefact_store() analysed_file = AnalysedC(fpath=Path(f'{config.source_root}/foo.c'), file_hash=0) - config._artefact_store[BUILD_TREES] = {None: {analysed_file.fpath: analysed_file}} + config.artefact_store[BUILD_TREES] = {None: {analysed_file.fpath: analysed_file}} expect_hash = 9120682468 return config, analysed_file, expect_hash @@ -51,7 +50,7 @@ def test_vanilla(self, content): values['send_metric'].assert_called_once() # ensure it created the correct artefact collection - assert config._artefact_store[OBJECT_FILES] == { + assert config.artefact_store[OBJECT_FILES] == { None: {config.prebuild_folder / f'foo.{expect_hash:x}.o', } } diff --git a/tests/unit_tests/steps/test_link.py b/tests/unit_tests/steps/test_link.py index cfee8f9a..57af5dfd 100644 --- a/tests/unit_tests/steps/test_link.py +++ b/tests/unit_tests/steps/test_link.py @@ -19,7 +19,7 @@ def test_run(self): config = SimpleNamespace( project_workspace=Path('workspace'), - _artefact_store={OBJECT_FILES: {'foo': {'foo.o', 'bar.o'}}}, + artefact_store={OBJECT_FILES: {'foo': {'foo.o', 'bar.o'}}}, ) with mock.patch('os.getenv', return_value='-L/foo1/lib -L/foo2/lib'): diff --git a/tests/unit_tests/steps/test_root_inc_files.py b/tests/unit_tests/steps/test_root_inc_files.py index 3bb55cee..9c61cb92 100644 --- a/tests/unit_tests/steps/test_root_inc_files.py +++ b/tests/unit_tests/steps/test_root_inc_files.py @@ -14,7 +14,7 @@ def test_vanilla(self): inc_files = [Path('/foo/source/bar.inc')] config = BuildConfig('proj') - config._artefact_store['all_source'] = inc_files + config.artefact_store['all_source'] = inc_files with mock.patch('fab.steps.root_inc_files.shutil') as mock_shutil: with mock.patch('fab.steps.root_inc_files.Path.mkdir'), \ @@ -27,7 +27,7 @@ def test_skip_output_folder(self): # ensure it doesn't try to copy a file in the build output config = BuildConfig('proj') inc_files = [Path('/foo/source/bar.inc'), config.build_output / 'fab.inc'] - config._artefact_store['all_source'] = inc_files + config.artefact_store['all_source'] = inc_files with mock.patch('fab.steps.root_inc_files.shutil') as mock_shutil: with mock.patch('fab.steps.root_inc_files.Path.mkdir'), \ @@ -41,7 +41,7 @@ def test_name_clash(self): inc_files = [Path('/foo/source/bar.inc'), Path('/foo/sauce/bar.inc')] config = BuildConfig('proj') - config._artefact_store['all_source'] = inc_files + config.artefact_store['all_source'] = inc_files with pytest.raises(FileExistsError): with mock.patch('fab.steps.root_inc_files.shutil'): diff --git a/tests/unit_tests/test_artefacts.py b/tests/unit_tests/test_artefacts.py index bdd0dce2..cd011143 100644 --- a/tests/unit_tests/test_artefacts.py +++ b/tests/unit_tests/test_artefacts.py @@ -3,28 +3,60 @@ import pytest -from fab.artefacts import FilterBuildTrees -from fab.constants import BUILD_TREES +from fab.artefacts import ArtefactStore, ArtefactsGetter, FilterBuildTrees +from fab.constants import BUILD_TREES, CURRENT_PREBUILDS -class TestFilterBuildTrees(object): +def test_artefacts_getter(): + '''Test that ArtefactsGetter is a proper AbstractClass + and that a NotImplemented error is raised if a derived + class is trying to call the base class. + ''' + + # First check that we can't instantiate + # a class that doesn't implement __call__: + # ---------------------------------------- + class MyClass(ArtefactsGetter): + pass + + with pytest.raises(TypeError) as err: + _ = MyClass() + # The actual error messages changes slightly from python + # version to version: + # 3.7: ... with abstract methods + # 3.8: ... with abstract method + # 3.12: ... without an implementation for abstract + # so we only test for the begin which is identical: + assert "Can't instantiate abstract class MyClass with" in str(err.value) + + # Now test that we can raise the NotImplementedError + # -------------------------------------------------- + class MyClassWithCall(ArtefactsGetter): + def __call__(self, artefact_store): + super().__call__(artefact_store) + + my_class_with_call = MyClassWithCall() + with pytest.raises(NotImplementedError) as err: + my_class_with_call("not-used") + assert ("__call__ must be implemented for 'MyClassWithCall'" + in str(err.value)) + + +class TestFilterBuildTrees(): @pytest.fixture def artefact_store(self): - return { - BUILD_TREES: { - 'tree1': { - 'a.foo': None, - 'b.foo': None, - 'c.bar': None, - }, - 'tree2': { - 'd.foo': None, - 'e.foo': None, - 'f.bar': None, - }, - } - } + '''A fixture that returns an ArtefactStore with + some elements.''' + artefact_store = ArtefactStore() + artefact_store[BUILD_TREES] = {'tree1': {'a.foo': None, + 'b.foo': None, + 'c.bar': None, }, + 'tree2': {'d.foo': None, + 'e.foo': None, + 'f.bar': None, }, + } + return artefact_store def test_single_suffix(self, artefact_store): # ensure the artefact getter passes through the trees properly to the filter func @@ -49,3 +81,11 @@ def test_multiple_suffixes(self, artefact_store): call(source_tree=artefact_store[BUILD_TREES]['tree1'], suffixes=['.foo', '.bar']), call(source_tree=artefact_store[BUILD_TREES]['tree2'], suffixes=['.foo', '.bar']), ]) + + +def test_artefact_store(): + '''Tests the ArtefactStore class.''' + artefact_store = ArtefactStore() + assert len(artefact_store) == 1 + assert isinstance(artefact_store, dict) + assert CURRENT_PREBUILDS in artefact_store diff --git a/tests/unit_tests/test_build_config.py b/tests/unit_tests/test_build_config.py index cb4f3e1a..54a49ab8 100644 --- a/tests/unit_tests/test_build_config.py +++ b/tests/unit_tests/test_build_config.py @@ -25,6 +25,6 @@ def simple_step(config): def test_add_cleanup(self): # ensure the cleanup step is added with BuildConfig('proj') as config: - assert CLEANUP_COUNT not in config._artefact_store + assert CLEANUP_COUNT not in config.artefact_store pass - assert CLEANUP_COUNT in config._artefact_store + assert CLEANUP_COUNT in config.artefact_store From 83f8726e54a9dcf49a00e11f3bf30c238cc91576 Mon Sep 17 00:00:00 2001 From: jasonjunweilyu <161689601+jasonjunweilyu@users.noreply.github.com> Date: Wed, 5 Jun 2024 18:09:07 +1000 Subject: [PATCH 4/9] Allowing file-specific transformation scripts for PSyclone (#308) Changed the transformation_script parameter of function `psyclone` to accept a call-back function that can return file-specific transformation scripts. --- docs/source/writing_config.rst | 68 +++++++- run_configs/lfric/atm.py | 19 ++- run_configs/lfric/gungho.py | 16 +- source/fab/steps/psyclone.py | 145 ++++++++---------- .../psyclone/test_psyclone_system_test.py | 48 ++++-- .../steps/test_psyclone_unit_test.py | 22 ++- 6 files changed, 209 insertions(+), 109 deletions(-) diff --git a/docs/source/writing_config.rst b/docs/source/writing_config.rst index aaab8471..54a742aa 100644 --- a/docs/source/writing_config.rst +++ b/docs/source/writing_config.rst @@ -64,7 +64,7 @@ A grab step will copy files from a folder or remote repo into a folder called if __name__ == '__main__': - with BuildConfig(project_label='` environment variable if __name__ == '__main__': - with BuildConfig(project_label=' Tuple: - """ - Analysis for PSyclone prebuilds. - - In order to build reusable psyclone results, we need to know everything that goes into making one. - Then we can hash it all, and check for changes in subsequent builds. - We'll build up this data in a payload object, to be passed to the child processes. - - Changes which must trigger reprocessing of an x90 file: - - x90 source: - - kernel metadata used by the x90 - - transformation script - - cli args - - Later: - - the psyclone version, to cover changes to built-in kernels - - Kernels: - - Kernel metadata are type definitions passed to invoke(). - For example, this x90 code depends on the kernel `compute_total_mass_kernel_type`. - .. code-block:: fortran - - call invoke( name = "compute_dry_mass", & - compute_total_mass_kernel_type(dry_mass, rho, chi, panel_id, qr), & - sum_X(total_dry, dry_mass)) - - We can see this kernel in a use statement at the top of the x90. - .. code-block:: fortran - - use compute_total_mass_kernel_mod, only: compute_total_mass_kernel_type - - Some kernels, such as `setval_c`, are - `PSyclone built-ins `_. - They will not appear in use statements and can be ignored. - - The Psyclone and Analyse steps both use the generic Fortran analyser, which recognises Psyclone kernel metadata. - The Analysis step must come after this step because it needs to analyse the fortran we create. - - """ - # hash the transformation script - if transformation_script: - transformation_script_hash = file_checksum(transformation_script).file_hash - else: - warnings.warn('no transformation script specified') - transformation_script_hash = 0 - - # analyse the x90s - analysed_x90 = _analyse_x90s(config, x90s) - - # Analyse the kernel files, hashing the psyclone kernel metadata. - # We only need the hashes right now but they all need analysing anyway, and we don't want to parse twice. - # We pass them through the general fortran analyser, which currently recognises kernel metadata. - # todo: We'd like to separate that from the general fortran analyser at some point, to reduce coupling. - all_kernel_hashes = _analyse_kernels(config, kernel_roots) - - return transformation_script_hash, analysed_x90, all_kernel_hashes - - def _analyse_x90s(config, x90s: Set[Path]) -> Dict[Path, AnalysedX90]: # Analyse parsable versions of the x90s, finding kernel dependencies. @@ -280,7 +223,31 @@ def _analyse_x90s(config, x90s: Set[Path]) -> Dict[Path, AnalysedX90]: def _analyse_kernels(config, kernel_roots) -> Dict[str, int]: - # We want to hash the kernel metadata (type defs). + """ + We want to hash the kernel metadata (type defs). + + Kernel metadata are type definitions passed to invoke(). + For example, this x90 code depends on the kernel `compute_total_mass_kernel_type`. + .. code-block:: fortran + + call invoke( name = "compute_dry_mass", & + compute_total_mass_kernel_type(dry_mass, rho, chi, panel_id, qr), & + sum_X(total_dry, dry_mass)) + + We can see this kernel in a use statement at the top of the x90. + .. code-block:: fortran + + use compute_total_mass_kernel_mod, only: compute_total_mass_kernel_type + + Some kernels, such as `setval_c`, are + `PSyclone built-ins `_. + They will not appear in use statements and can be ignored. + + The Psyclone and Analyse steps both use the generic Fortran analyser, which recognises Psyclone kernel metadata. + The Analysis step must come after this step because it needs to analyse the fortran we create. + + """ # Ignore the prebuild folder. Todo: test the prebuild folder is ignored, in case someone breaks this. file_lists = [list(file_walk(root, ignore_folders=[config.prebuild_folder])) for root in kernel_roots] all_kernel_files: Set[Path] = set(sum(file_lists, [])) @@ -346,7 +313,8 @@ def do_one_file(arg: Tuple[Path, MpCommonArgs]): try: # logger.info(f'running psyclone on {x90_file}') run_psyclone(generated, modified_alg, x90_file, - mp_payload.kernel_roots, mp_payload.transformation_script, mp_payload.cli_args) + mp_payload.kernel_roots, mp_payload.transformation_script, + mp_payload.cli_args, mp_payload.config) shutil.copy2(modified_alg, prebuilt_alg) msg = f'created prebuilds for {x90_file}:\n {prebuilt_alg}' @@ -379,6 +347,12 @@ def _gen_prebuild_hash(x90_file: Path, mp_payload: MpCommonArgs): """ Calculate the prebuild hash for this x90 file, based on all the things which should trigger reprocessing. + Changes which must trigger reprocessing of an x90 file: + - x90 source: + - kernel metadata used by the x90 + - transformation script + - cli args + """ # We've analysed (a parsable version of) this x90. analysis_result = mp_payload.analysed_x90[x90_file] # type: ignore @@ -387,6 +361,15 @@ def _gen_prebuild_hash(x90_file: Path, mp_payload: MpCommonArgs): kernel_deps_hashes = { mp_payload.all_kernel_hashes[kernel_name] for kernel_name in analysis_result.kernel_deps} # type: ignore + # calculate the transformation script hash for this file + transformation_script_hash = 0 + if mp_payload.transformation_script: + transformation_script_return_path = mp_payload.transformation_script(x90_file, mp_payload.config) + if transformation_script_return_path: + transformation_script_hash = file_checksum(transformation_script_return_path).file_hash + if transformation_script_hash == 0: + warnings.warn('no transformation script specified') + # hash everything which should trigger re-processing # todo: hash the psyclone version in case the built-in kernels change? prebuild_hash = sum([ @@ -397,8 +380,8 @@ def _gen_prebuild_hash(x90_file: Path, mp_payload: MpCommonArgs): # the hashes of the kernels used by this x90 sum(kernel_deps_hashes), - # - mp_payload.transformation_script_hash, + # the hash of the transformation script for this x90 + transformation_script_hash, # command-line arguments string_checksum(str(mp_payload.cli_args)), @@ -413,13 +396,17 @@ def _get_prebuild_paths(prebuild_folder, modified_alg, generated, prebuild_hash) return prebuilt_alg, prebuilt_gen -def run_psyclone(generated, modified_alg, x90_file, kernel_roots, transformation_script, cli_args): +def run_psyclone(generated, modified_alg, x90_file, kernel_roots, transformation_script, cli_args, config): # -d specifies "a root directory structure containing kernel source" kernel_args: Union[List[str], list] = sum([['-d', k] for k in kernel_roots], []) # transformation python script - transform_options = ['-s', transformation_script] if transformation_script else [] + transform_options = [] + if transformation_script: + transformation_script_return_path = transformation_script(x90_file, config) + if transformation_script_return_path: + transform_options = ['-s', transformation_script_return_path] command = [ 'psyclone', '-api', 'dynamo0.3', diff --git a/tests/system_tests/psyclone/test_psyclone_system_test.py b/tests/system_tests/psyclone/test_psyclone_system_test.py index 20cd7761..3d1f6e21 100644 --- a/tests/system_tests/psyclone/test_psyclone_system_test.py +++ b/tests/system_tests/psyclone/test_psyclone_system_test.py @@ -17,7 +17,8 @@ from fab.steps.find_source_files import find_source_files from fab.steps.grab.folder import grab_folder from fab.steps.preprocess import preprocess_fortran -from fab.steps.psyclone import _analysis_for_prebuilds, make_parsable_x90, preprocess_x90, psyclone, tool_available +from fab.steps.psyclone import _analyse_x90s, _analyse_kernels, make_parsable_x90, preprocess_x90, \ + psyclone, tool_available, run_psyclone from fab.util import file_checksum SAMPLE_KERNEL = Path(__file__).parent / 'kernel.f90' @@ -92,20 +93,12 @@ def test_prebuild(self, tmp_path): assert analysed_x90 == self.expected_analysis_result -class Test_analysis_for_prebuilds(object): +class Test_analysis_for_x90s_and_kernels(object): def test_analyse(self, tmp_path): - with BuildConfig('proj', fab_workspace=tmp_path) as config: - transformation_script_hash, analysed_x90, all_kernel_hashes = \ - _analysis_for_prebuilds(config, - x90s=[SAMPLE_X90], - kernel_roots=[Path(__file__).parent], - # the script is just hashed, so any one will do - use this file! - transformation_script=Path(__file__)) - - # transformation_script_hash - assert transformation_script_hash == file_checksum(__file__).file_hash + analysed_x90 = _analyse_x90s(config, x90s=[SAMPLE_X90]) + all_kernel_hashes = _analyse_kernels(config, kernel_roots=[Path(__file__).parent]) # analysed_x90 assert analysed_x90 == { @@ -192,3 +185,34 @@ def test_prebuild(self, tmp_path, config): mock_x90_walk.assert_not_called() mock_fortran_walk.assert_not_called() mock_run.assert_not_called() + + +class TestTransformationScript(object): + """ + Check whether transformation script is called with x90 file once + and whether transformation script is passed to psyclone after '-s'. + + """ + def test_transformation_script(self): + mock_transformation_script = mock.Mock(return_value=__file__) + with mock.patch('fab.steps.psyclone.run_command') as mock_run_command: + mock_transformation_script.return_value = Path(__file__) + run_psyclone(generated=Path(__file__), + modified_alg=Path(__file__), + x90_file=Path(__file__), + kernel_roots=[], + transformation_script=mock_transformation_script, + cli_args=[], + config=None, # type: ignore[arg-type] + ) + + # check whether x90 is passed to transformation_script + mock_transformation_script.assert_called_once_with(Path(__file__), None) + # check transformation_script is passed to psyclone command with '-s' + mock_run_command.assert_called_with(['psyclone', '-api', 'dynamo0.3', + '-l', 'all', + '-opsy', Path(__file__), + '-oalg', Path(__file__), + '-s', Path(__file__), + Path(__file__), + ]) diff --git a/tests/unit_tests/steps/test_psyclone_unit_test.py b/tests/unit_tests/steps/test_psyclone_unit_test.py index d2c3da8e..13980c0d 100644 --- a/tests/unit_tests/steps/test_psyclone_unit_test.py +++ b/tests/unit_tests/steps/test_psyclone_unit_test.py @@ -11,6 +11,7 @@ from fab.parse.x90 import AnalysedX90 from fab.steps.psyclone import _check_override, _gen_prebuild_hash, MpCommonArgs +from fab.util import file_checksum class Test_gen_prebuild_hash(object): @@ -34,15 +35,20 @@ def data(self, tmp_path) -> Tuple[MpCommonArgs, Path, int]: 'kernel2': 456, } - expect_hash = 223133615 + # the script is just hashed later, so any one will do - use this file! + mock_transformation_script = mock.Mock(return_value=__file__) + + expect_hash = 223133492 + file_checksum(__file__).file_hash # add the transformation_script_hash mp_payload = MpCommonArgs( analysed_x90=analysed_x90, all_kernel_hashes=all_kernel_hashes, - transformation_script_hash=123, cli_args=[], - config=None, kernel_roots=None, transformation_script=None, # type: ignore[arg-type] - overrides_folder=None, override_files=None, # type: ignore[arg-type] + config=None, # type: ignore[arg-type] + kernel_roots=[], + transformation_script=mock_transformation_script, + overrides_folder=None, + override_files=None, # type: ignore[arg-type] ) return mp_payload, x90_file, expect_hash @@ -68,9 +74,11 @@ def test_kernal_deps(self, data): def test_trans_script(self, data): # changing the transformation script should change the hash mp_payload, x90_file, expect_hash = data - mp_payload.transformation_script_hash += 1 - result = _gen_prebuild_hash(x90_file=x90_file, mp_payload=mp_payload) - assert result == expect_hash + 1 + mp_payload.transformation_script = None + with pytest.warns(UserWarning, match="no transformation script specified"): + result = _gen_prebuild_hash(x90_file=x90_file, mp_payload=mp_payload) + # transformation_script_hash = 0 + assert result == expect_hash - file_checksum(__file__).file_hash def test_cli_args(self, data): # changing the cli args should change the hash From 04a65a78570a1249fb562ca04f549652799aaf59 Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Fri, 14 Jun 2024 12:10:18 +0100 Subject: [PATCH 5/9] Update workflows with most recent Python setup action. (#303) Co-authored-by: Joerg Henrichs --- .github/workflows/build.yml | 2 +- .github/workflows/build_docs.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 936c7bd9..4ede95de 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Setup Compiler diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index ed9b8836..cc2564ac 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -13,7 +13,7 @@ jobs: git config --global user.email "metomi@metoffice.gov.uk" git config --global user.name "SciFab Developers" - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.x' - name: install fab From 647e7daae8465aefc5c54242418208b41f5c5302 Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Thu, 20 Jun 2024 10:57:15 +0100 Subject: [PATCH 6/9] Add caching to workflow. (#306) --- .github/workflows/build.yml | 81 +++++++++++++++++--------------- .github/workflows/build_docs.yml | 73 ++++++++++++++++------------ pyproject.toml | 2 +- 3 files changed, 88 insertions(+), 68 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4ede95de..3579682c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,46 +5,53 @@ on: [push, pull_request] jobs: build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: max-parallel: 4 matrix: python-version: ['3.7', '3.8', '3.9', '3.10'] steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Setup Compiler - run: | - sudo apt-get -y install gcc gfortran - - name: Setup LibClang - run: | - sudo apt-get -y install libclang-dev python-clang - SP=~/.local/lib/python${{ matrix.python-version }}/site-packages - mkdir -p $SP - cp -vr /usr/lib/python3/dist-packages/clang $SP/ - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e . - - name: Type check with mypy - run: | - pip install -e .[dev] - pip install pytest - python -m mypy source tests - - name: Code style check with flake8 - run: | - pip install -e .[dev] - flake8 . --count --show-source --statistics - - name: Unit tests with Pytest - run: | - pip install .[tests] - python -m pytest --cov=fab tests/unit_tests - - - name: System tests with Pytest - run: | - pip install .[tests] - python -m pytest --cov=fab tests/system_tests + # Should this step use a cache? + # + - name: Setup Compiler + run: | + sudo apt-get -y install llvm clang libclang-dev gcc gfortran + + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + + # The clang binding version has to match the version in the Ubuntu being used. + - name: Install Python libraries + run: | + python -m pip install --upgrade pip + pip install -e . + pip install clang==14.0 + + - name: Type check with mypy + run: | + pip install -e .[dev] + pip install pytest + python -m mypy source tests + + - name: Code style check with flake8 + run: | + pip install -e .[dev] + flake8 . --count --show-source --statistics + + - name: Unit tests with Pytest + run: | + pip install .[tests] + python -m pytest --cov=fab tests/unit_tests + + - name: System tests with Pytest + run: | + pip install .[tests] + python -m pytest --cov=fab tests/system_tests diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index cc2564ac..df04c7c0 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,35 +5,48 @@ on: workflow_dispatch jobs: build-docs: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - - name: set git user - run: | - git config --global user.email "metomi@metoffice.gov.uk" - git config --global user.name "SciFab Developers" - - uses: actions/checkout@v3 - - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - name: install fab - run: pip install .[docs] - - name: build docs - run: | - cd docs - rm -rf build - sphinx-apidoc --separate --module-first -d 5 -f -o source/apidoc ../source/fab - make html - - name: move built docs to docs root - run: | - mv docs/build/html/* docs/ - - name: git add built docs - run: | - git add docs/* - - name: commit - run: | - git commit -m "docs build" - - name: push to gh_pages branch - run: | - echo "pushing from $GITHUB_REF_NAME to gh_pages" - git push --force origin $GITHUB_REF_NAME:gh_pages + - name: set git user + run: | + git config --global user.email "metomi@metoffice.gov.uk" + git config --global user.name "SciFab Developers" + + - name: Checkout Fab project files + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + cache: pip + + - name: Install Python libraries + run: | + python -m pip install --upgrade pip + pip install -e .[docs] + + - name: build docs + run: | + cd docs + rm -rf build + sphinx-apidoc --separate --module-first -d 5 -f -o source/apidoc ../source/fab + make html + + - name: move built docs to docs root + run: | + mv docs/build/html/* docs/ + + - name: git add built docs + run: | + git add docs/* + + - name: commit + run: | + git commit -m "docs build" + + - name: push to gh_pages branch + run: | + echo "pushing from $GITHUB_REF_NAME to gh_pages" + git push --force origin $GITHUB_REF_NAME:gh_pages diff --git a/pyproject.toml b/pyproject.toml index 76424ead..8823f899 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ ] [project.optional-dependencies] -c-language = ['python-clang'] +c-language = ['clang'] plots = ['matplotlib'] tests = ['pytest', 'pytest-cov', 'pytest-mock'] checks = ['flake8>=5.0.4', 'mypy'] From 3ad91901f73f0bca7ec87beb5b474f9f8d9e7b4e Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Fri, 21 Jun 2024 08:08:07 +0100 Subject: [PATCH 7/9] Change contacts detail to responsible group away from repository owner. (#320) --- .github/workflows/build_docs.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index df04c7c0..8716de74 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -10,7 +10,7 @@ jobs: steps: - name: set git user run: | - git config --global user.email "metomi@metoffice.gov.uk" + git config --global user.email "CoreCapabilityDevelopmentTeam@metoffice.gov.uk" git config --global user.name "SciFab Developers" - name: Checkout Fab project files diff --git a/pyproject.toml b/pyproject.toml index 8823f899..dfcec53a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "sci-fab" description = "Build system for scientific software" authors = [ - {name = "SciFab Developers", email = 'metomi@metoffice.gov.uk'} + {name = "Core Capabilities Development Team", email = 'CoreCapabilityDevelopmentTeam@metoffice.gov.uk'} ] license = {file = 'LICENSE.txt'} dynamic = ['version', 'readme'] From c02a5a7fdff87ea36290f0e164f0d15dca1cbdc6 Mon Sep 17 00:00:00 2001 From: Joerg Henrichs Date: Fri, 21 Jun 2024 19:53:45 +1000 Subject: [PATCH 8/9] Better tool support (#309) Tools are now represented by classes which are instantiated and placed in a "tool box." This improves the organisation and management of tools. --- .gitignore | 5 + docs/source/index.rst | 1 + docs/source/site-specific-config.rst | 159 +++++++++ pyproject.toml | 6 + run_configs/build_all.py | 48 ++- run_configs/gcom/build_gcom_ar.py | 5 +- run_configs/gcom/build_gcom_so.py | 5 +- run_configs/gcom/grab_gcom.py | 5 +- run_configs/jules/build_jules.py | 26 +- run_configs/lfric/atm.py | 19 +- run_configs/lfric/grab_lfric.py | 9 +- run_configs/lfric/gungho.py | 17 +- run_configs/lfric/lfric_common.py | 59 ++- run_configs/lfric/mesh_tools.py | 5 +- .../tiny_fortran/build_tiny_fortran.py | 23 +- run_configs/um/build_um.py | 19 +- source/fab/artefacts.py | 36 +- source/fab/build_config.py | 27 +- source/fab/cli.py | 85 ++--- source/fab/parse/__init__.py | 4 +- source/fab/parse/fortran_common.py | 1 - source/fab/steps/archive_objects.py | 29 +- source/fab/steps/c_pragma_injector.py | 4 +- source/fab/steps/compile_c.py | 54 ++- source/fab/steps/compile_fortran.py | 200 ++++------- source/fab/steps/grab/__init__.py | 16 - source/fab/steps/grab/fcm.py | 30 +- source/fab/steps/grab/folder.py | 5 +- source/fab/steps/grab/git.py | 82 +---- source/fab/steps/grab/prebuild.py | 13 +- source/fab/steps/grab/svn.py | 112 +++--- source/fab/steps/link.py | 46 +-- source/fab/steps/preprocess.py | 101 ++---- source/fab/steps/psyclone.py | 98 ++--- source/fab/steps/root_inc_files.py | 3 +- source/fab/tools.py | 173 --------- source/fab/tools/__init__.py | 50 +++ source/fab/tools/ar.py | 35 ++ source/fab/tools/category.py | 38 ++ source/fab/tools/compiler.py | 309 ++++++++++++++++ source/fab/tools/flags.py | 81 +++++ source/fab/tools/linker.py | 85 +++++ source/fab/tools/preprocessor.py | 77 ++++ source/fab/tools/psyclone.py | 68 ++++ source/fab/tools/rsync.py | 40 +++ source/fab/tools/tool.py | 192 ++++++++++ source/fab/tools/tool_box.py | 64 ++++ source/fab/tools/tool_repository.py | 142 ++++++++ source/fab/tools/versioning.py | 217 +++++++++++ source/fab/util.py | 6 +- tests/conftest.py | 60 ++++ .../CFortranInterop/test_CFortranInterop.py | 28 +- .../CUserHeader/test_CUserHeader.py | 23 +- .../test_FortranDependencies.py | 22 +- .../test_FortranPreProcess.py | 19 +- tests/system_tests/MinimalC/test_MinimalC.py | 24 +- .../MinimalFortran/test_MinimalFortran.py | 20 +- tests/system_tests/git/test_git.py | 21 +- .../test_incremental_fortran.py | 27 +- tests/system_tests/prebuild/test_prebuild.py | 17 +- .../psyclone/test_psyclone_system_test.py | 69 ++-- .../svn_fcm/test_svn_fcm_system_test.py | 130 ++++--- .../zero_config/test_zero_config.py | 24 +- tests/unit_tests/parse/c/test_c_analyser.py | 15 +- .../parse/fortran/test_fortran_analyser.py | 4 +- tests/unit_tests/steps/test_analyse.py | 3 +- .../unit_tests/steps/test_archive_objects.py | 67 +++- tests/unit_tests/steps/test_compile_c.py | 140 +++++--- .../unit_tests/steps/test_compile_fortran.py | 315 ++++++---------- tests/unit_tests/steps/test_grab.py | 32 +- tests/unit_tests/steps/test_link.py | 33 +- .../steps/test_link_shared_object.py | 49 +++ tests/unit_tests/steps/test_preprocess.py | 23 +- tests/unit_tests/steps/test_root_inc_files.py | 9 +- tests/unit_tests/test_build_config.py | 8 +- tests/unit_tests/test_config.py | 7 +- tests/unit_tests/test_tools.py | 195 ---------- tests/unit_tests/tools/test_ar.py | 51 +++ tests/unit_tests/tools/test_categories.py | 27 ++ tests/unit_tests/tools/test_compiler.py | 336 ++++++++++++++++++ tests/unit_tests/tools/test_flags.py | 59 +++ tests/unit_tests/tools/test_linker.py | 127 +++++++ tests/unit_tests/tools/test_preprocessor.py | 82 +++++ tests/unit_tests/tools/test_psyclone.py | 63 ++++ tests/unit_tests/tools/test_rsync.py | 59 +++ tests/unit_tests/tools/test_tool.py | 143 ++++++++ tests/unit_tests/tools/test_tool_box.py | 72 ++++ .../unit_tests/tools/test_tool_repository.py | 96 +++++ tests/unit_tests/tools/test_versioning.py | 295 +++++++++++++++ 89 files changed, 4204 insertions(+), 1524 deletions(-) create mode 100644 docs/source/site-specific-config.rst delete mode 100644 source/fab/tools.py create mode 100644 source/fab/tools/__init__.py create mode 100644 source/fab/tools/ar.py create mode 100644 source/fab/tools/category.py create mode 100644 source/fab/tools/compiler.py create mode 100644 source/fab/tools/flags.py create mode 100644 source/fab/tools/linker.py create mode 100644 source/fab/tools/preprocessor.py create mode 100644 source/fab/tools/psyclone.py create mode 100644 source/fab/tools/rsync.py create mode 100644 source/fab/tools/tool.py create mode 100644 source/fab/tools/tool_box.py create mode 100644 source/fab/tools/tool_repository.py create mode 100644 source/fab/tools/versioning.py create mode 100644 tests/conftest.py create mode 100644 tests/unit_tests/steps/test_link_shared_object.py delete mode 100644 tests/unit_tests/test_tools.py create mode 100644 tests/unit_tests/tools/test_ar.py create mode 100644 tests/unit_tests/tools/test_categories.py create mode 100644 tests/unit_tests/tools/test_compiler.py create mode 100644 tests/unit_tests/tools/test_flags.py create mode 100644 tests/unit_tests/tools/test_linker.py create mode 100644 tests/unit_tests/tools/test_preprocessor.py create mode 100644 tests/unit_tests/tools/test_psyclone.py create mode 100644 tests/unit_tests/tools/test_rsync.py create mode 100644 tests/unit_tests/tools/test_tool.py create mode 100644 tests/unit_tests/tools/test_tool_box.py create mode 100644 tests/unit_tests/tools/test_tool_repository.py create mode 100644 tests/unit_tests/tools/test_versioning.py diff --git a/.gitignore b/.gitignore index 0e8d3a74..9fd85da1 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,11 @@ __pycache__/ *.py[cod] *$py.class +# Build directory for documentation +docs/build +docs/source/api +docs/source/apidoc + # C extensions *.so diff --git a/docs/source/index.rst b/docs/source/index.rst index fc03936e..553ff943 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -80,6 +80,7 @@ See also config_intro writing_config advanced_config + site-specific-config features Api Reference development diff --git a/docs/source/site-specific-config.rst b/docs/source/site-specific-config.rst new file mode 100644 index 00000000..c6363f21 --- /dev/null +++ b/docs/source/site-specific-config.rst @@ -0,0 +1,159 @@ +.. _site_specific_config: + +Site-Specific Configuration +*************************** +A site might have compilers that Fab doesn't know about, or prefers +a different compiler from the Fab default. Fab abstracts the compilers +and other programs required during building as an instance of a +:class:`~fab.tools.Tool` class. All tools that Fab knows about, are +available in a :class:`~fab.tools.tool_repository.ToolRepository`. +That will include tools that might not be available on the current system. + +Each tool belongs to a certain category of +:class:`~fab.tool.category.Category`. A `ToolRepository` can store +several instances of the same category. + +At build time, the user has to create an instance of +:class:`~fab.tools.tool_box.ToolBox` and pass +it to the :class:`~fab.build_config.BuildConfig` object. This toolbox +contains all the tools that will be used during the build process, but +it can only store one tool per category. If a certain tool should not +be defined in the toolbox, the default from the `ToolRepository` will +be used. This is useful for many standard tools like `git`, `rsync` +etc that de-facto will never be changed. + +.. note:: If you need to use for example different compilers for + different files, you would implement this as a `meta-compiler`: + implement a new class based on the existing + :class:`~fab.tools.compiler.Compiler` class, + which takes two (or more) compiler instances. Its + :func:`~fab.tools.compiler.Compiler.compile_file` + method can then decide (e.g. based on the path of the file to + compile, or a hard-coded set of criteria) which compiler to use. + +Category +========== +All possible categories are defined in +:class:`~fab.tool.category.Category`. If additional categories +should be required, they can be added. + +Tool +==== +Each tool must be derived from :class:`~fab.tools.Tool`. +The base class provides a `run` method, which any tool can +use to execute a command in a shell. Typically, a tool will +provide one (or several) custom commands to be used by the steps. +For example, a compiler instance provides a +:func:`~fab.tools.compiler.Compiler.compile_file` method. +This makes sure that no tool-specific command line options need +to be used in any Fab step, which will allow the user to replace any tool +with a different one. + +New tools can easily be created, look at +:class:`~fab.tools.compiler.Gcc` or +:class:`~fab.tools.compiler.Icc`. Typically, they can just be +created by providing a different set of parameters in the +constructor. + +This also allows compiler wrappers to be easily defined. For example, +if you want to use `mpif90` as compiler, which is a MPI-specific +wrapper for `ifort`, you can create this class as follows: + +.. code-block:: + :linenos: + :caption: Compiler wrapper + + from fab.tools import Ifort + + class MpiF90(Ifort): + '''A simple compiler wrapper''' + def __init__(self): + super().__init__(name="mpif90-intel", + exec_name="mpif90") + +.. note:: In `ticket 312 `_ a better + implementation of compiler wrapper will be implemented. + +Tool Repository +=============== +The :class:`~fab.tools.tool_repository.ToolRepository` implements +a singleton to access any tool that Fab knows about. A site-specific +startup section can add more tools to the repository: + +.. code-block:: + :linenos: + :caption: ToolRepository + + from fab.tools import ToolRepository + + # Assume the MpiF90 class as shown in the previous example + + tr = ToolRepository() + tr.add_tool(MpiF90) # the tool repository will create the instance + +Compiler and linker objects define a compiler suite, and the `ToolRepository` +provides +:func:`~fab.tools.tool_repository.ToolRepository.set_default_compiler_suite` +which allows you to change the defaults for compiler and linker with +a single call. This will allow you to easily switch from one compiler +to another. If required, you can still change any individual compiler +after setting a default compiler suite, e.g. you can define `intel-classic` +as default suite, but set the C-compiler to be `gcc`. + + +Tool Box +======== +The class :class:`~fab.tools.tool_box.ToolBox` is used to provide +the tools to be used by the build environment, i.e. the +`BuildConfig` object: + +.. code-block:: + :linenos: + :caption: ToolBox + + from fab.tools import Category, ToolBox, ToolRepository + + tr = ToolRepository() + tr.set_default_compiler_suite("intel-classic") + tool_box = ToolBox() + ifort = tr.get_tool(Category.FORTRAN_COMPILER, "ifort") + tool_box.add_tool(ifort) + c_compiler = tr.get_default(Category.C_COMPILER) + tool_box.add_tool(c_compiler) + + config = BuildConfig(tool_box=tool_box, + project_label=f'lfric_atm-{ifort.name}', ...) + +The advantage of finding the compilers to use in the tool box is that +it allows a site to replace a compiler in the tool repository (e.g. +if a site wants to use an older gfortran version, say one which is called +`gfortran-11`). They can then remove the standard gfortran in the tool +repository and replace it with a new gfortran compiler that will call +`gfortran-11` instead of `gfortran`. But a site can also decide to +not support a generic `gfortran` call, instead adding different +gfortran compiler with a version number in the name. + +If a tool category is not defined in the `ToolBox`, then +the default tool from the `ToolRepository` will be used. Therefore, +in the example above adding `ifort` is not strictly necessary (since +it will be the default after setting the default compiler suite to +`intel-classic`), and `c_compiler` is the default as well. This feature +is especially useful for the many default tools that Fab requires (git, +rsync, ar, ...). + +.. code-block:: + :linenos: + :caption: ToolBox + + tool_box = ToolBox() + default_c_compiler = tool_box.get_tool(Category.C_COMPILER) + + +TODO +==== +At this stage compiler flags are still set in the corresponding Fab +steps, and it might make more sense to allow their modification and +definition in the compiler objects. +This will allow a site to define their own set of default flags to +be used with a certain compiler by replacing or updating a compiler +instance in the Tool Repository diff --git a/pyproject.toml b/pyproject.toml index dfcec53a..bd6bdb74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,3 +48,9 @@ version = {attr = 'fab.__version__'} [build-system] requires = ['setuptools'] build-backend = 'setuptools.build_meta' + +# This is required so that pytest finds conftest.py files. +[tool.pytest.ini_options] +testpaths = [ + "tests", +] diff --git a/run_configs/build_all.py b/run_configs/build_all.py index 102818bf..b3b5f1d8 100755 --- a/run_configs/build_all.py +++ b/run_configs/build_all.py @@ -4,20 +4,48 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## + +'''A top-level build script that executes all scripts in the various +subdirectories. +''' + import os from pathlib import Path +import shutil + +from fab.tools import Category, Tool, ToolBox -from fab.steps.compile_fortran import get_fortran_compiler -from fab.tools import run_command +class Script(Tool): + '''A simple wrapper that runs a shell script. + :name: the path to the script to run. + ''' + def __init__(self, name: Path): + super().__init__(name=name.name, exec_name=name, + category=Category.MISC) -# todo: run the exes, check the output + def check_available(self): + '''Since there typically is no command line option we could test for + the tolls here, we use `which` to determine if a tool is available. + ''' + out = shutil.which(self.exec_name) + if out: + return True + print(f"Tool '{self.name}' (f{self.exec_name}) cannot be executed.") + return False + + +# todo: after running the execs, check the output def build_all(): + '''Build all example codes here. + ''' + tool_box = ToolBox() + compiler = tool_box[Category.FORTRAN_COMPILER] configs_folder = Path(__file__).parent - compiler, _ = get_fortran_compiler() - os.environ['FAB_WORKSPACE'] = os.path.join(os.getcwd(), f'fab_build_all_{compiler}') + os.environ['FAB_WORKSPACE'] = \ + os.path.join(os.getcwd(), f'fab_build_all_{compiler.name}') scripts = [ configs_folder / 'tiny_fortran/build_tiny_fortran.py', @@ -38,20 +66,22 @@ def build_all(): # skip these for now, until we configure them to build again compiler_skip = {'gfortran': [], 'ifort': ['atm.py']} - skip = compiler_skip[compiler] + skip = compiler_skip[compiler.name] for script in scripts: - + script_tool = Script(script) # skip this build script for the current compiler? if script.name in skip: print(f'' f'-----' - f'SKIPPING {script.name} FOR COMPILER {compiler} - GET THIS COMPILING AGAIN' + f'SKIPPING {script.name} FOR COMPILER {compiler.name} - ' + f'GET THIS COMPILING AGAIN' f'-----') continue - run_command([script], capture_output=False) + script_tool.run(capture_output=False) +# ============================================================================= if __name__ == '__main__': build_all() diff --git a/run_configs/gcom/build_gcom_ar.py b/run_configs/gcom/build_gcom_ar.py index 3585ce01..f89b4380 100755 --- a/run_configs/gcom/build_gcom_ar.py +++ b/run_configs/gcom/build_gcom_ar.py @@ -4,15 +4,18 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## + from fab.build_config import BuildConfig from fab.steps.archive_objects import archive_objects from fab.steps.cleanup_prebuilds import cleanup_prebuilds +from fab.tools import ToolBox from gcom_build_steps import common_build_steps if __name__ == '__main__': - with BuildConfig(project_label='gcom object archive $compiler') as state: + with BuildConfig(project_label='gcom object archive $compiler', + tool_box=ToolBox()) as state: common_build_steps(state) archive_objects(state, output_fpath='$output/libgcom.a') cleanup_prebuilds(state, all_unused=True) diff --git a/run_configs/gcom/build_gcom_so.py b/run_configs/gcom/build_gcom_so.py index bb9020fe..09a97af1 100755 --- a/run_configs/gcom/build_gcom_so.py +++ b/run_configs/gcom/build_gcom_so.py @@ -4,6 +4,8 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## + +from fab.tools import ToolBox from fab.build_config import BuildConfig from fab.steps.cleanup_prebuilds import cleanup_prebuilds from fab.steps.link import link_shared_object @@ -17,7 +19,8 @@ # we can add our own arguments here parsed_args = arg_parser.parse_args() - with BuildConfig(project_label='gcom shared library $compiler') as state: + with BuildConfig(project_label='gcom shared library $compiler', + tool_box=ToolBox()) as state: common_build_steps(state, fpic=True) link_shared_object(state, output_fpath='$output/libgcom.so'), cleanup_prebuilds(state, all_unused=True) diff --git a/run_configs/gcom/grab_gcom.py b/run_configs/gcom/grab_gcom.py index 039b0c83..0b53b9d3 100755 --- a/run_configs/gcom/grab_gcom.py +++ b/run_configs/gcom/grab_gcom.py @@ -4,14 +4,17 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## + from fab.build_config import BuildConfig from fab.steps.grab.fcm import fcm_export +from fab.tools import ToolBox revision = 'vn7.6' # we put this here so the two build configs can read its source_root -grab_config = BuildConfig(project_label=f'gcom_source {revision}') +grab_config = BuildConfig(project_label=f'gcom_source {revision}', + tool_box=ToolBox()) if __name__ == '__main__': diff --git a/run_configs/jules/build_jules.py b/run_configs/jules/build_jules.py index bfeb2024..f3fc983c 100755 --- a/run_configs/jules/build_jules.py +++ b/run_configs/jules/build_jules.py @@ -17,20 +17,36 @@ from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran from fab.steps.root_inc_files import root_inc_files +from fab.tools import Ifort, Linker, ToolBox logger = logging.getLogger('fab') +# TODO 312: we need to support non-intel compiler here. + +class MpiIfort(Ifort): + '''A small wrapper to make mpif90 available.''' + def __init__(self): + super().__init__(name="mpif90", exec_name="mpif90") + + if __name__ == '__main__': revision = 'vn6.3' - with BuildConfig(project_label=f'jules {revision} $compiler') as state: + tool_box = ToolBox() + # Create a new Fortran compiler MpiIfort + fc = MpiIfort() + tool_box.add_tool(fc) + # Use the compiler as linker: + tool_box.add_tool(Linker(compiler=fc)) + + with BuildConfig(project_label=f'jules {revision} $compiler', + tool_box=tool_box) as state: # grab the source. todo: use some checkouts instead of exports in these configs. fcm_export(state, src='fcm:jules.xm_tr/src', revision=revision, dst_label='src') fcm_export(state, src='fcm:jules.xm_tr/utils', revision=revision, dst_label='utils') - # grab_pre_build(state, path='/not/a/real/folder', allow_fail=True), # find the source files @@ -47,12 +63,12 @@ preprocess_fortran(state, common_flags=['-P', '-DMPI_DUMMY', '-DNCDF_DUMMY', '-I$output']) - analyse(state, root_symbol='jules', unreferenced_deps=['imogen_update_carb']), + analyse(state, root_symbol='jules', unreferenced_deps=['imogen_update_carb']) compile_fortran(state) - archive_objects(state), + archive_objects(state) - link_exe(state, linker='mpifort', flags=['-lm', '-lnetcdff', '-lnetcdf']), + link_exe(state, flags=['-lm', '-lnetcdff', '-lnetcdf']) cleanup_prebuilds(state, n_versions=1) diff --git a/run_configs/lfric/atm.py b/run_configs/lfric/atm.py index 1d3dac66..c297499c 100755 --- a/run_configs/lfric/atm.py +++ b/run_configs/lfric/atm.py @@ -13,6 +13,7 @@ from fab.steps.preprocess import preprocess_fortran, preprocess_c from fab.steps.psyclone import psyclone, preprocess_x90 from fab.steps.find_source_files import find_source_files, Exclude, Include +from fab.tools import ToolBox from grab_lfric import lfric_source_config, gpl_utils_source_config from lfric_common import configurator, fparser_workaround_stop_concatenation @@ -166,8 +167,13 @@ def get_transformation_script(fpath, config): :rtype: Path ''' - optimisation_path = config.source_root / 'lfric' / 'lfric_atm' / 'optimisation' / 'meto-spice' - local_transformation_script = optimisation_path / (fpath.relative_to(config.source_root).with_suffix('.py')) + optimisation_path = config.source_root / 'optimisation' / 'meto-spice' + for base_path in [config.source_root, config.build_output]: + try: + relative_path = fpath.relative_to(base_path) + except ValueError: + pass + local_transformation_script = optimisation_path / (relative_path.with_suffix('.py')) if local_transformation_script.exists(): return local_transformation_script global_transformation_script = optimisation_path / 'global.py' @@ -180,7 +186,8 @@ def get_transformation_script(fpath, config): lfric_source = lfric_source_config.source_root / 'lfric' gpl_utils_source = gpl_utils_source_config.source_root / 'gpl_utils' - with BuildConfig(project_label='atm $compiler $two_stage') as state: + with BuildConfig(project_label='atm $compiler $two_stage', + tool_box=ToolBox()) as state: # todo: use different dst_labels because they all go into the same folder, # making it hard to see what came from where? @@ -212,13 +219,14 @@ def get_transformation_script(fpath, config): # lfric_atm grab_folder(state, src=lfric_source / 'lfric_atm/source/', dst_label='lfric') - + grab_folder(state, src=lfric_source / 'lfric_atm' / 'optimisation', + dst_label='optimisation') # generate more source files in source and source/configuration configurator(state, lfric_source=lfric_source, gpl_utils_source=gpl_utils_source, rose_meta_conf=lfric_source / 'lfric_atm/rose-meta/lfric-lfric_atm/HEAD/rose-meta.conf', - config_dir=state.source_root / 'lfric/configuration'), + config_dir=state.source_root / 'lfric/configuration') find_source_files(state, path_filters=file_filtering(state)) @@ -288,7 +296,6 @@ def get_transformation_script(fpath, config): link_exe( state, - linker='mpifort', flags=[ '-lyaxt', '-lyaxt_c', '-lnetcdff', '-lnetcdf', '-lhdf5', # EXTERNAL_DYNAMIC_LIBRARIES '-lxios', # EXTERNAL_STATIC_LIBRARIES diff --git a/run_configs/lfric/grab_lfric.py b/run_configs/lfric/grab_lfric.py index 7acf1418..c649ada2 100755 --- a/run_configs/lfric/grab_lfric.py +++ b/run_configs/lfric/grab_lfric.py @@ -4,8 +4,10 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## + from fab.build_config import BuildConfig from fab.steps.grab.fcm import fcm_export +from fab.tools import ToolBox LFRIC_REVISION = 41709 @@ -13,8 +15,11 @@ # these configs are interrogated by the build scripts # todo: doesn't need two separate configs, they use the same project workspace -lfric_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}') -gpl_utils_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}') +tool_box = ToolBox() +lfric_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}', + tool_box=tool_box) +gpl_utils_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}', + tool_box=tool_box) if __name__ == '__main__': diff --git a/run_configs/lfric/gungho.py b/run_configs/lfric/gungho.py index e8789af6..5454d8ca 100755 --- a/run_configs/lfric/gungho.py +++ b/run_configs/lfric/gungho.py @@ -15,6 +15,7 @@ from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran from fab.steps.psyclone import psyclone, preprocess_x90 +from fab.tools import ToolBox from grab_lfric import lfric_source_config, gpl_utils_source_config from lfric_common import configurator, fparser_workaround_stop_concatenation @@ -27,8 +28,13 @@ def get_transformation_script(fpath, config): :rtype: Path ''' - optimisation_path = config.source_root / 'lfric' / 'miniapps' / 'gungho_model' / 'optimisation' / 'meto-spice' - local_transformation_script = optimisation_path / (fpath.relative_to(config.source_root).with_suffix('.py')) + optimisation_path = config.source_root / 'optimisation' / 'meto-spice' + for base_path in [config.source_root, config.build_output]: + try: + relative_path = fpath.relative_to(base_path) + except ValueError: + pass + local_transformation_script = optimisation_path / (relative_path.with_suffix('.py')) if local_transformation_script.exists(): return local_transformation_script global_transformation_script = optimisation_path / 'global.py' @@ -41,7 +47,8 @@ def get_transformation_script(fpath, config): lfric_source = lfric_source_config.source_root / 'lfric' gpl_utils_source = gpl_utils_source_config.source_root / 'gpl_utils' - with BuildConfig(project_label='gungho $compiler $two_stage') as state: + with BuildConfig(project_label='gungho $compiler $two_stage', + tool_box=ToolBox()) as state: grab_folder(state, src=lfric_source / 'infrastructure/source/', dst_label='') grab_folder(state, src=lfric_source / 'components/driver/source/', dst_label='') grab_folder(state, src=lfric_source / 'components' / 'inventory' / 'source', dst_label='') @@ -50,7 +57,8 @@ def get_transformation_script(fpath, config): grab_folder(state, src=lfric_source / 'gungho/source/', dst_label='') grab_folder(state, src=lfric_source / 'um_physics/source/', dst_label='') grab_folder(state, src=lfric_source / 'miniapps' / 'gungho_model' / 'source', dst_label='') - + grab_folder(state, src=lfric_source / 'miniapps' / 'gungho_model' / 'optimisation', + dst_label='optimisation') grab_folder(state, src=lfric_source / 'jules/source/', dst_label='') grab_folder(state, src=lfric_source / 'socrates/source/', dst_label='') @@ -109,7 +117,6 @@ def get_transformation_script(fpath, config): link_exe( state, - linker='mpifort', flags=[ '-fopenmp', diff --git a/run_configs/lfric/lfric_common.py b/run_configs/lfric/lfric_common.py index 4310097f..fd4488c6 100644 --- a/run_configs/lfric/lfric_common.py +++ b/run_configs/lfric/lfric_common.py @@ -4,11 +4,23 @@ from pathlib import Path from fab.steps import step -from fab.tools import run_command +from fab.tools import Category, Tool logger = logging.getLogger('fab') +class Script(Tool): + '''A simple wrapper that runs a shell script. + :name: the path to the script to run. + ''' + def __init__(self, name: Path): + super().__init__(name=name.name, exec_name=str(name), + category=Category.MISC) + + def check_available(self): + return True + + # todo: is this part of psyclone? if so, put it in the psyclone step module? @step def configurator(config, lfric_source: Path, gpl_utils_source: Path, rose_meta_conf: Path, config_dir=None): @@ -27,49 +39,34 @@ def configurator(config, lfric_source: Path, gpl_utils_source: Path, rose_meta_c # "rose picker" # creates rose-meta.json and config_namelists.txt in gungho/source/configuration logger.info('rose_picker') - run_command( - command=[ - str(rose_picker_tool), str(rose_meta_conf), - '-directory', str(config_dir), - '-include_dirs', lfric_source], - env=env, - ) + rose_picker = Script(rose_picker_tool) + rose_picker.run(additional_parameters=[str(rose_meta_conf), + '-directory', str(config_dir), + '-include_dirs', lfric_source], + env=env) # "build_config_loaders" # builds a bunch of f90s from the json logger.info('GenerateNamelist') - run_command( - command=[ - str(gen_namelist_tool), - '-verbose', - str(config_dir / 'rose-meta.json'), - '-directory', str(config_dir), - # '--norandom_enums' - ] - ) + gen_namelist = Script(gen_namelist_tool) + gen_namelist.run(additional_parameters=['-verbose', + str(config_dir / 'rose-meta.json'), + '-directory', str(config_dir)]) # create configuration_mod.f90 in source root logger.info('GenerateLoader') + gen_loader = Script(gen_loader_tool) names = [name.strip() for name in open(config_dir / 'config_namelists.txt').readlines()] configuration_mod_fpath = config.source_root / 'configuration_mod.f90' - run_command( - command=[ - str(gen_loader_tool), - configuration_mod_fpath, - *names, - ] - ) + gen_loader.run(additional_parameters=[configuration_mod_fpath, + *names]) # create feign_config_mod.f90 in source root logger.info('GenerateFeigns') + feign_config = Script(gen_feigns_tool) feign_config_mod_fpath = config.source_root / 'feign_config_mod.f90' - run_command( - command=[ - str(gen_feigns_tool), - str(config_dir / 'rose-meta.json'), - '-output', feign_config_mod_fpath, - ] - ) + feign_config.run(additional_parameters=[str(config_dir / 'rose-meta.json'), + '-output', feign_config_mod_fpath]) # put the generated source into an artefact # todo: we shouldn't need to do this, should we? diff --git a/run_configs/lfric/mesh_tools.py b/run_configs/lfric/mesh_tools.py index ea1f6f97..634b7834 100755 --- a/run_configs/lfric/mesh_tools.py +++ b/run_configs/lfric/mesh_tools.py @@ -11,6 +11,7 @@ from fab.steps.preprocess import preprocess_fortran from fab.steps.find_source_files import find_source_files, Exclude from fab.steps.psyclone import psyclone, preprocess_x90 +from fab.tools import ToolBox from lfric_common import configurator, fparser_workaround_stop_concatenation from grab_lfric import lfric_source_config, gpl_utils_source_config @@ -23,7 +24,8 @@ # this folder just contains previous output, for testing the overrides mechanism. psyclone_overrides = Path(__file__).parent / 'mesh_tools_overrides' - with BuildConfig(project_label='mesh tools $compiler $two_stage') as state: + with BuildConfig(project_label='mesh tools $compiler $two_stage', + tool_box=ToolBox()) as state: grab_folder(state, src=lfric_source / 'infrastructure/source/', dst_label='') grab_folder(state, src=lfric_source / 'mesh_tools/source/', dst_label='') grab_folder(state, src=lfric_source / 'components/science/source/', dst_label='') @@ -72,7 +74,6 @@ # link the 3 trees' objects link_exe( state, - linker='mpifort', flags=[ '-lyaxt', '-lyaxt_c', '-lnetcdff', '-lnetcdf', '-lhdf5', # EXTERNAL_DYNAMIC_LIBRARIES '-lxios', # EXTERNAL_STATIC_LIBRARIES diff --git a/run_configs/tiny_fortran/build_tiny_fortran.py b/run_configs/tiny_fortran/build_tiny_fortran.py index 1c1f0c21..17907cdd 100755 --- a/run_configs/tiny_fortran/build_tiny_fortran.py +++ b/run_configs/tiny_fortran/build_tiny_fortran.py @@ -4,6 +4,7 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## + from fab.build_config import BuildConfig from fab.steps.analyse import analyse from fab.steps.compile_fortran import compile_fortran @@ -11,12 +12,28 @@ from fab.steps.grab.git import git_checkout from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import Ifort, Linker, ToolBox + + +class MpiIfort(Ifort): + '''A small wrapper to make mpiifort available.''' + def __init__(self): + super().__init__(name="mpifort", exec_name="mpifort") if __name__ == '__main__': - with BuildConfig(project_label='tiny_fortran $compiler') as state: - git_checkout(state, src='https://github.com/metomi/fab-test-data.git', revision='main', dst_label='src'), + tool_box = ToolBox() + # Create a new Fortran compiler MpiIfort + fc = MpiIfort() + tool_box.add_tool(fc) + # Use the compiler as linker: + tool_box.add_tool(Linker(compiler=fc)) + + with BuildConfig(project_label='tiny_fortran $compiler', + tool_box=tool_box) as state: + git_checkout(state, src='https://github.com/metomi/fab-test-data.git', + revision='main', dst_label='src'), find_source_files(state), @@ -25,4 +42,4 @@ analyse(state, root_symbol='my_prog'), compile_fortran(state), - link_exe(state, linker='mpifort'), + link_exe(state), diff --git a/run_configs/um/build_um.py b/run_configs/um/build_um.py index 9231a680..ce769865 100755 --- a/run_configs/um/build_um.py +++ b/run_configs/um/build_um.py @@ -21,12 +21,13 @@ from fab.steps.archive_objects import archive_objects from fab.steps.c_pragma_injector import c_pragma_injector from fab.steps.compile_c import compile_c -from fab.steps.compile_fortran import compile_fortran, get_fortran_compiler +from fab.steps.compile_fortran import compile_fortran from fab.steps.grab.fcm import fcm_export from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_c, preprocess_fortran from fab.steps.find_source_files import find_source_files, Exclude, Include from fab.steps.root_inc_files import root_inc_files +from fab.tools import Category, ToolBox logger = logging.getLogger('fab') @@ -124,11 +125,14 @@ def replace_in_file(inpath, outpath, find, replace): revision = 'vn12.1' um_revision = revision.replace('vn', 'um') + state = BuildConfig(project_label=f'um atmos safe {revision} $compiler $two_stage', + tool_box=ToolBox()) + # compiler-specific flags - compiler, _ = get_fortran_compiler() - if compiler == 'gfortran': + compiler = state.tool_box[Category.FORTRAN_COMPILER] + if compiler.name == 'gfortran': compiler_specific_flags = ['-fdefault-integer-8', '-fdefault-real-8', '-fdefault-double-8'] - elif compiler == 'ifort': + elif compiler.name == 'ifort': # compiler_specific_flags = ['-r8'] compiler_specific_flags = [ '-i8', '-r8', '-mcmodel=medium', @@ -144,7 +148,7 @@ def replace_in_file(inpath, outpath, find, replace): compiler_specific_flags = [] # todo: document: if you're changing compilers, put $compiler in your label - with BuildConfig(project_label=f'um atmos safe {revision} $compiler $two_stage') as state: + with state: # todo: these repo defs could make a good set of reusable variables @@ -222,7 +226,7 @@ def replace_in_file(inpath, outpath, find, replace): # Locate the gcom library. UM 12.1 intended to be used with gcom 7.6 gcom_build = os.getenv('GCOM_BUILD') or os.path.normpath(os.path.expanduser( - state.project_workspace / f"../gcom_object_archive_{compiler}/build_output")) + state.project_workspace / f"../gcom_object_archive_{compiler.name}/build_output")) if not os.path.exists(gcom_build): raise RuntimeError(f'gcom not found at {gcom_build}') @@ -243,11 +247,10 @@ def replace_in_file(inpath, outpath, find, replace): ) # this step just makes linker error messages more manageable - archive_objects(state), + archive_objects(state) link_exe( state, - linker='mpifort', flags=[ '-lc', '-lgfortran', '-L', '~/.conda/envs/sci-fab/lib', '-L', gcom_build, '-l', 'gcom' diff --git a/source/fab/artefacts.py b/source/fab/artefacts.py index 0749e781..235a91d1 100644 --- a/source/fab/artefacts.py +++ b/source/fab/artefacts.py @@ -20,6 +20,21 @@ from fab.util import suffix_filter +class ArtefactStore(dict): + '''This object stores artefacts (which can be of any type). Each artefact + is indexed by a string. + ''' + def __init__(self): + super().__init__() + self.reset() + + def reset(self): + '''Clears the artefact store (but does not delete any files). + ''' + self.clear() + self[CURRENT_PREBUILDS] = set() + + class ArtefactsGetter(ABC): """ Abstract base class for artefact getters. @@ -83,7 +98,7 @@ def __init__(self, collections: Iterable[Union[str, ArtefactsGetter]]): self.collections = collections # todo: ensure the labelled values are iterables - def __call__(self, artefact_store: Dict): + def __call__(self, artefact_store: ArtefactStore): # todo: this should be a set, in case a file appears in multiple collections result = [] for collection in self.collections: @@ -116,7 +131,7 @@ def __init__(self, collection_name: str, suffix: Union[str, List[str]]): self.collection_name = collection_name self.suffixes = [suffix] if isinstance(suffix, str) else suffix - def __call__(self, artefact_store): + def __call__(self, artefact_store: ArtefactStore): # todo: returning an empty list is probably "dishonest" if the collection doesn't exist - return None instead? fpaths: Iterable[Path] = artefact_store.get(self.collection_name, []) return suffix_filter(fpaths, self.suffixes) @@ -146,7 +161,7 @@ def __init__(self, suffix: Union[str, List[str]], collection_name: str = BUILD_T self.collection_name = collection_name self.suffixes = [suffix] if isinstance(suffix, str) else suffix - def __call__(self, artefact_store): + def __call__(self, artefact_store: ArtefactStore): build_trees = artefact_store[self.collection_name] @@ -155,18 +170,3 @@ def __call__(self, artefact_store): build_lists[root] = filter_source_tree(source_tree=tree, suffixes=self.suffixes) return build_lists - - -class ArtefactStore(dict): - '''This object stores artefacts (which can be of any type). Each artefact - is indexed by a string. - ''' - def __init__(self): - super().__init__() - self.reset() - - def reset(self): - '''Clears the artefact store (but does not delete any files). - ''' - self.clear() - self[CURRENT_PREBUILDS] = set() diff --git a/source/fab/build_config.py b/source/fab/build_config.py index bddb6708..f55ef185 100644 --- a/source/fab/build_config.py +++ b/source/fab/build_config.py @@ -23,6 +23,8 @@ from fab.artefacts import ArtefactStore from fab.constants import BUILD_OUTPUT, SOURCE_ROOT, PREBUILD, CURRENT_PREBUILDS from fab.metrics import send_metric, init_metrics, stop_metrics, metrics_summary +from fab.tools.category import Category +from fab.tools.tool_box import ToolBox from fab.steps.cleanup_prebuilds import CLEANUP_COUNT, cleanup_prebuilds from fab.util import TimerLogger, by_type, get_fab_workspace @@ -37,15 +39,17 @@ class BuildConfig(): but rather through the build_config() context manager. """ - def __init__(self, project_label: str, multiprocessing: bool = True, n_procs: Optional[int] = None, - reuse_artefacts: bool = False, fab_workspace: Optional[Path] = None, two_stage=False, verbose=False): + def __init__(self, project_label: str, + tool_box: ToolBox, + multiprocessing: bool = True, n_procs: Optional[int] = None, + reuse_artefacts: bool = False, + fab_workspace: Optional[Path] = None, two_stage=False, + verbose=False): """ :param project_label: Name of the build project. The project workspace folder is created from this name, with spaces replaced by underscores. - :param parsed_args: - If you want to add arguments to your script, please use common_arg_parser() and add arguments. - This pararmeter is the result of running :func:`ArgumentParser.parse_args`. + :param tool_box: The ToolBox with all tools to use in the build. :param multiprocessing: An option to disable multiprocessing to aid debugging. :param n_procs: @@ -63,12 +67,12 @@ def __init__(self, project_label: str, multiprocessing: bool = True, n_procs: Op DEBUG level logging. """ + self._tool_box = tool_box self.two_stage = two_stage self.verbose = verbose - from fab.steps.compile_fortran import get_fortran_compiler - compiler, _ = get_fortran_compiler() + compiler = tool_box[Category.FORTRAN_COMPILER] project_label = Template(project_label).safe_substitute( - compiler=compiler, + compiler=compiler.name, two_stage=f'{int(two_stage)+1}stage') self.project_label: str = project_label.replace(' ', '_') @@ -143,6 +147,11 @@ def __exit__(self, exc_type, exc_val, exc_tb): self._finalise_metrics(self._start_time, self._build_timer) self._finalise_logging() + @property + def tool_box(self) -> ToolBox: + ''':returns: the tool box to use.''' + return self._tool_box + @property def artefact_store(self) -> ArtefactStore: ''':returns: the Artefact instance for this configuration. @@ -298,7 +307,7 @@ def flags_for_path(self, path: Path, config): :param path: The file path for which we want command-line flags. :param config: - THe config contains the source root and project workspace. + The config contains the source root and project workspace. """ # We COULD make the user pass these template params to the constructor diff --git a/source/fab/cli.py b/source/fab/cli.py index 7080b542..5cc40315 100644 --- a/source/fab/cli.py +++ b/source/fab/cli.py @@ -3,10 +3,13 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## + +'''Functions to run Fab from the command line. +''' + import sys from pathlib import Path from typing import Dict, Optional -import os from fab.steps.analyse import analyse from fab.steps.c_pragma_injector import c_pragma_injector @@ -16,12 +19,12 @@ from fab.artefacts import CollectionGetter from fab.build_config import BuildConfig from fab.constants import PRAGMAD_C -from fab.steps.compile_fortran import compile_fortran, get_fortran_compiler +from fab.steps.compile_fortran import compile_fortran from fab.steps.find_source_files import find_source_files from fab.steps.grab.folder import grab_folder from fab.steps.preprocess import preprocess_c, preprocess_fortran +from fab.tools import Category, ToolBox, ToolRepository from fab.util import common_arg_parser -from fab.tools import get_tool def _generic_build_config(folder: Path, kwargs=None) -> BuildConfig: @@ -29,56 +32,43 @@ def _generic_build_config(folder: Path, kwargs=None) -> BuildConfig: if kwargs: project_label = kwargs.pop('project_label', 'zero_config_build') or project_label + # Set the default Fortran compiler as linker (otherwise e.g. the + # C compiler might be used in linking, requiring additional flags) + tr = ToolRepository() + fc = tr.get_default(Category.FORTRAN_COMPILER) + # TODO: This assumes a mapping of compiler name to the corresponding + # linker name (i.e. `linker-gfortran` or `linker-ifort`). Still, that's + # better than hard-coding gnu here. + linker = tr.get_tool(Category.LINKER, f"linker-{fc.name}") + tool_box = ToolBox() + tool_box.add_tool(fc) + tool_box.add_tool(linker) # Within the fab workspace, we'll create a project workspace. # Ideally we'd just use folder.name, but to avoid clashes, we'll use the full absolute path. - linker, linker_flags = calc_linker_flags() - with BuildConfig(project_label=project_label, **kwargs) as config: - grab_folder(config, folder), - find_source_files(config), - - root_inc_files(config), # JULES helper, get rid of this eventually - - preprocess_fortran(config), - - c_pragma_injector(config), - preprocess_c(config, source=CollectionGetter(PRAGMAD_C)), - - analyse(config, find_programs=True), - - compile_fortran(config), - compile_c(config), - - link_exe(config, linker=linker, flags=linker_flags), + with BuildConfig(project_label=project_label, + tool_box=tool_box, **kwargs) as config: + grab_folder(config, folder) + find_source_files(config) + root_inc_files(config) # JULES helper, get rid of this eventually + preprocess_fortran(config) + c_pragma_injector(config) + preprocess_c(config, source=CollectionGetter(PRAGMAD_C)) + analyse(config, find_programs=True) + compile_fortran(config) + compile_c(config) + # If ifort should be used, it might need the flag `-nofor-main` in + # case of a mixed language compilation (main program in C, linking + # with ifort). + link_exe(config, flags=[]) return config -def calc_linker_flags(): - - fc, _ = get_fortran_compiler() - - # linker and flags depend on compiler - linkers = { - 'gfortran': ('gcc', ['-lgfortran']), - # todo: test this and get it running - # 'ifort': (..., [...]) - } - - try: - # Get linker from $LD - linker, linker_flags = get_tool(os.environ.get("LD", None)) - except ValueError: - # Get linker from linkers, or else just use $FC - linker, linker_flags = linkers.get(os.path.basename(fc), (fc, [])) - - return linker, linker_flags - - def cli_fab(folder: Optional[Path] = None, kwargs: Optional[Dict] = None): """ - Running Fab from the command line will attempt to build the project in the current or given folder. - The following params are used for testing. When run normally any parameters will be caught - by a common_arg_parser. + Running Fab from the command line will attempt to build the project in the current or + given folder. The following params are used for testing. When run normally any parameters + will be caught by a common_arg_parser. :param folder: source folder (Testing Only) @@ -88,8 +78,9 @@ def cli_fab(folder: Optional[Path] = None, kwargs: Optional[Dict] = None): """ kwargs = kwargs or {} - # We check if 'fab' was called directly. As it can be called by other things like 'pytest', the cli arguments - # may not apply to 'fab' which will cause arg_parser to fail with an invalid argument message. + # We check if 'fab' was called directly. As it can be called by other things like 'pytest', + # the cli arguments may not apply to 'fab' which will cause arg_parser to fail with an + # invalid argument message. if Path(sys.argv[0]).parts[-1] == 'fab': arg_parser = common_arg_parser() kwargs = vars(arg_parser.parse_args()) diff --git a/source/fab/parse/__init__.py b/source/fab/parse/__init__.py index b07838b5..7d7319b1 100644 --- a/source/fab/parse/__init__.py +++ b/source/fab/parse/__init__.py @@ -39,11 +39,11 @@ def __init__(self, fpath: Union[str, Path], file_hash: Optional[int] = None): self._file_hash = file_hash @property - def file_hash(self): + def file_hash(self) -> int: if self._file_hash is None: if not self.fpath.exists(): raise ValueError(f"analysed file '{self.fpath}' does not exist") - self._file_hash: int = file_checksum(self.fpath).file_hash + self._file_hash = file_checksum(self.fpath).file_hash return self._file_hash def __eq__(self, other): diff --git a/source/fab/parse/fortran_common.py b/source/fab/parse/fortran_common.py index f35c243b..0ed4f3fe 100644 --- a/source/fab/parse/fortran_common.py +++ b/source/fab/parse/fortran_common.py @@ -59,7 +59,6 @@ def _typed_child(parent, child_type: Type, must_exist=False): # Returns the child or None. # Raises ValueError if more than one child of the given type is found. children = list(filter(lambda child: isinstance(child, child_type), parent.children)) - print(children) if len(children) > 1: raise ValueError(f"too many children found of type {child_type}") diff --git a/source/fab/steps/archive_objects.py b/source/fab/steps/archive_objects.py index c450af4b..f4d5efcf 100644 --- a/source/fab/steps/archive_objects.py +++ b/source/fab/steps/archive_objects.py @@ -16,7 +16,7 @@ from fab.constants import OBJECT_FILES, OBJECT_ARCHIVES from fab.steps import step from fab.util import log_or_dot -from fab.tools import run_command +from fab.tools import Ar, Category from fab.artefacts import ArtefactsGetter, CollectionGetter logger = logging.getLogger(__name__) @@ -30,8 +30,10 @@ # todo: all this documentation for such a simple step - should we split it up somehow? @step -def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = None, archiver='ar', - output_fpath=None, output_collection=OBJECT_ARCHIVES): +def archive_objects(config: BuildConfig, + source: Optional[ArtefactsGetter] = None, + output_fpath=None, + output_collection=OBJECT_ARCHIVES): """ Create an object archive for every build target, from their object files. @@ -75,8 +77,6 @@ def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = Non :param source: An :class:`~fab.artefacts.ArtefactsGetter` which give us our lists of objects to archive. The artefacts are expected to be of the form `Dict[root_symbol_name, list_of_object_files]`. - :param archiver: - The archiver executable. Defaults to 'ar'. :param output_fpath: The file path of the archive file to create. This string can include templating, where "$output" is replaced with the output folder. @@ -91,9 +91,11 @@ def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = Non # todo: the output path should not be an abs fpath, it should be relative to the proj folder source_getter = source or DEFAULT_SOURCE_GETTER - archiver = archiver + ar = config.tool_box[Category.AR] + if not isinstance(ar, Ar): + raise RuntimeError(f"Unexpected tool '{ar.name}' of type " + f"'{type(ar)}' instead of Ar") output_fpath = str(output_fpath) if output_fpath else None - output_collection = output_collection target_objects = source_getter(config.artefact_store) assert target_objects.keys() @@ -114,14 +116,11 @@ def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = Non output_fpath = Template(str(output_fpath)).substitute( output=config.build_output) - command = [archiver] - command.extend(['cr', output_fpath]) - command.extend(map(str, sorted(objects))) - - log_or_dot(logger, 'CreateObjectArchive running command: ' + ' '.join(command)) + log_or_dot(logger, f"CreateObjectArchive running archiver for " + f"'{output_fpath}'.") try: - run_command(command) - except Exception as err: - raise Exception(f"error creating object archive:\n{err}") + ar.create(output_fpath, sorted(objects)) + except RuntimeError as err: + raise RuntimeError(f"error creating object archive:\n{err}") from err output_archives[root] = [output_fpath] diff --git a/source/fab/steps/c_pragma_injector.py b/source/fab/steps/c_pragma_injector.py index d30321d2..623172a2 100644 --- a/source/fab/steps/c_pragma_injector.py +++ b/source/fab/steps/c_pragma_injector.py @@ -9,7 +9,7 @@ """ import re from pathlib import Path -from typing import Pattern, Optional, Match +from typing import Generator, Pattern, Optional, Match from fab import FabException from fab.constants import PRAGMAD_C @@ -54,7 +54,7 @@ def _process_artefact(fpath: Path): return prag_output_fpath -def inject_pragmas(fpath): +def inject_pragmas(fpath) -> Generator: """ Reads a C source file but when encountering an #include preprocessor directive injects a special Fab-specific diff --git a/source/fab/steps/compile_c.py b/source/fab/steps/compile_c.py index ba2be4dd..81e9bef5 100644 --- a/source/fab/steps/compile_c.py +++ b/source/fab/steps/compile_c.py @@ -9,8 +9,6 @@ """ import logging import os -import warnings -import zlib from collections import defaultdict from dataclasses import dataclass from typing import List, Dict, Optional, Tuple @@ -22,7 +20,7 @@ from fab.metrics import send_metric from fab.parse.c import AnalysedC from fab.steps import check_for_errors, run_mp, step -from fab.tools import flags_checksum, run_command, get_tool, get_compiler_version +from fab.tools import Category, CCompiler, Flags from fab.util import CompiledFile, log_or_dot, Timer, by_type logger = logging.getLogger(__name__) @@ -32,11 +30,10 @@ @dataclass -class MpCommonArgs(object): +class MpCommonArgs: + '''A simple class to pass arguments to subprocesses.''' config: BuildConfig flags: FlagsConfig - compiler: str - compiler_version: str @step @@ -48,7 +45,6 @@ def compile_c(config, common_flags: Optional[List[str]] = None, This step uses multiprocessing. All C files are compiled in a single pass. - The command line compiler to is taken from the environment variable `CC`, and defaults to `gcc -c`. Uses multiprocessing, unless disabled in the *config*. @@ -66,18 +62,11 @@ def compile_c(config, common_flags: Optional[List[str]] = None, """ # todo: tell the compiler (and other steps) which artefact name to create? - compiler, compiler_flags = get_tool(os.getenv('CC', 'gcc -c')) - compiler_version = get_compiler_version(compiler) - logger.info(f'c compiler is {compiler} {compiler_version}') + compiler = config.tool_box[Category.C_COMPILER] + logger.info(f'C compiler is {compiler}') env_flags = os.getenv('CFLAGS', '').split() - common_flags = compiler_flags + env_flags + (common_flags or []) - - # make sure we have a -c - # todo: c compiler awareness, like we have with fortran? - if '-c' not in common_flags: - warnings.warn("Adding '-c' to C compiler flags") - common_flags = ['-c'] + common_flags + common_flags = env_flags + (common_flags or []) flags = FlagsConfig(common_flags=common_flags, path_flags=path_flags) source_getter = source or DEFAULT_SOURCE_GETTER @@ -87,7 +76,7 @@ def compile_c(config, common_flags: Optional[List[str]] = None, to_compile: list = sum(build_lists.values(), []) logger.info(f"compiling {len(to_compile)} c files") - mp_payload = MpCommonArgs(config=config, flags=flags, compiler=compiler, compiler_version=compiler_version) + mp_payload = MpCommonArgs(config=config, flags=flags) mp_items = [(fpath, mp_payload) for fpath in to_compile] # compile everything in one go @@ -121,27 +110,27 @@ def store_artefacts(compiled_files: List[CompiledFile], build_lists: Dict[str, L def _compile_file(arg: Tuple[AnalysedC, MpCommonArgs]): analysed_file, mp_payload = arg - + config = mp_payload.config + compiler = config.tool_box[Category.C_COMPILER] + if not isinstance(compiler, CCompiler): + raise RuntimeError(f"Unexpected tool '{compiler.name}' of type " + f"'{type(compiler)}' instead of CCompiler") with Timer() as timer: - flags = mp_payload.flags.flags_for_path(path=analysed_file.fpath, config=mp_payload.config) - obj_combo_hash = _get_obj_combo_hash(mp_payload.compiler, mp_payload.compiler_version, analysed_file, flags) + flags = Flags(mp_payload.flags.flags_for_path(path=analysed_file.fpath, + config=config)) + obj_combo_hash = _get_obj_combo_hash(compiler, analysed_file, flags) - obj_file_prebuild = mp_payload.config.prebuild_folder / f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o' + obj_file_prebuild = config.prebuild_folder / f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o' # prebuild available? if obj_file_prebuild.exists(): log_or_dot(logger, f'CompileC using prebuild: {analysed_file.fpath}') else: obj_file_prebuild.parent.mkdir(parents=True, exist_ok=True) - - command = mp_payload.compiler.split() # type: ignore - command.extend(flags) - command.append(str(analysed_file.fpath)) - command.extend(['-o', str(obj_file_prebuild)]) - log_or_dot(logger, f'CompileC compiling {analysed_file.fpath}') try: - run_command(command) + compiler.compile_file(analysed_file.fpath, obj_file_prebuild, + add_flags=flags) except Exception as err: return FabException(f"error compiling {analysed_file.fpath}:\n{err}") @@ -152,14 +141,13 @@ def _compile_file(arg: Tuple[AnalysedC, MpCommonArgs]): return CompiledFile(input_fpath=analysed_file.fpath, output_fpath=obj_file_prebuild) -def _get_obj_combo_hash(compiler, compiler_version, analysed_file, flags): +def _get_obj_combo_hash(compiler, analysed_file, flags: Flags): # get a combo hash of things which matter to the object file we define try: obj_combo_hash = sum([ analysed_file.file_hash, - flags_checksum(flags), - zlib.crc32(compiler.encode()), - zlib.crc32(compiler_version.encode()), + flags.checksum(), + compiler.get_hash(), ]) except TypeError: raise ValueError("could not generate combo hash for object file") diff --git a/source/fab/steps/compile_fortran.py b/source/fab/steps/compile_fortran.py index 706e598e..8b3fa632 100644 --- a/source/fab/steps/compile_fortran.py +++ b/source/fab/steps/compile_fortran.py @@ -8,28 +8,24 @@ """ -# TODO: This has become too complicated. Refactor. - - import logging import os import shutil -import zlib from collections import defaultdict from dataclasses import dataclass from itertools import chain from pathlib import Path from typing import List, Set, Dict, Tuple, Optional, Union -from fab.artefacts import ArtefactsGetter, FilterBuildTrees +from fab.artefacts import ArtefactsGetter, ArtefactStore, FilterBuildTrees from fab.build_config import BuildConfig, FlagsConfig from fab.constants import OBJECT_FILES from fab.metrics import send_metric from fab.parse.fortran import AnalysedFortran from fab.steps import check_for_errors, run_mp, step -from fab.tools import COMPILERS, remove_managed_flags, flags_checksum, run_command, get_tool, get_compiler_version -from fab.util import CompiledFile, log_or_dot_finish, log_or_dot, Timer, by_type, \ - file_checksum +from fab.tools import Category, Compiler, Flags, FortranCompiler +from fab.util import (CompiledFile, log_or_dot_finish, log_or_dot, Timer, + by_type, file_checksum) logger = logging.getLogger(__name__) @@ -37,15 +33,12 @@ @dataclass -class MpCommonArgs(object): +class MpCommonArgs: """Arguments to be passed into the multiprocessing function, alongside the filenames.""" config: BuildConfig flags: FlagsConfig - compiler: str - compiler_version: str mod_hashes: Dict[str, int] - two_stage_flag: Optional[str] - stage: Optional[int] + syntax_only: bool @step @@ -71,43 +64,42 @@ def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = Non """ - compiler, compiler_version, flags_config = handle_compiler_args(common_flags, path_flags) + compiler, flags_config = handle_compiler_args(config, common_flags, + path_flags) + # Set module output folder: + compiler.set_module_output_path(config.build_output) source_getter = source or DEFAULT_SOURCE_GETTER - - # todo: move this to the known compiler flags? - # todo: this is a misleading name - two_stage_flag = None - if compiler == 'gfortran' and config.two_stage: - two_stage_flag = '-fsyntax-only' - mod_hashes: Dict[str, int] = {} # get all the source to compile, for all build trees, into one big lump build_lists: Dict[str, List] = source_getter(config.artefact_store) + syntax_only = compiler.has_syntax_only and config.two_stage # build the arguments passed to the multiprocessing function mp_common_args = MpCommonArgs( - config=config, flags=flags_config, compiler=compiler, compiler_version=compiler_version, - mod_hashes=mod_hashes, two_stage_flag=two_stage_flag, stage=None) + config=config, flags=flags_config, + mod_hashes=mod_hashes, syntax_only=syntax_only) # compile everything in multiple passes compiled: Dict[Path, CompiledFile] = {} uncompiled: Set[AnalysedFortran] = set(sum(build_lists.values(), [])) logger.info(f"compiling {len(uncompiled)} fortran files") - if two_stage_flag: + if syntax_only: logger.info("Starting two-stage compile: mod files, multiple passes") - mp_common_args.stage = 1 + elif config.two_stage: + logger.info(f"Compiler {compiler.name} does not support syntax-only, " + f"disabling two-stage compile.") while uncompiled: uncompiled = compile_pass(config=config, compiled=compiled, uncompiled=uncompiled, mp_common_args=mp_common_args, mod_hashes=mod_hashes) log_or_dot_finish(logger) - if two_stage_flag: + if syntax_only: logger.info("Finalising two-stage compile: object files, single pass") - mp_common_args.stage = 2 + mp_common_args.syntax_only = False # a single pass should now compile all the object files in one go uncompiled = set(sum(build_lists.values(), [])) # todo: order by last compile duration @@ -122,32 +114,22 @@ def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = Non store_artefacts(compiled, build_lists, config.artefact_store) -def handle_compiler_args(common_flags=None, path_flags=None): +def handle_compiler_args(config: BuildConfig, common_flags=None, + path_flags=None): # Command line tools are sometimes specified with flags attached. - compiler, compiler_flags = get_fortran_compiler() - - compiler_version = get_compiler_version(compiler) - logger.info(f'fortran compiler is {compiler} {compiler_version}') + compiler = config.tool_box[Category.FORTRAN_COMPILER] + if not isinstance(compiler, FortranCompiler): + raise RuntimeError(f"Unexpected tool '{compiler.name}' of type " + f"'{type(compiler)}' instead of FortranCompiler") + logger.info(f'Fortran compiler is {compiler} {compiler.get_version()}') - # collate the flags from 1) compiler env, 2) flags env and 3) params + # Collate the flags from 1) flags env and 2) parameters. env_flags = os.getenv('FFLAGS', '').split() - common_flags = compiler_flags + env_flags + (common_flags or []) - - # Do we know this compiler? If so we can manage the flags a little, to avoid duplication or misconfiguration. - # todo: This has been raised for discussion - we might never want to modify incoming flags... - known_compiler = COMPILERS.get(os.path.basename(compiler)) - if known_compiler: - common_flags = remove_managed_flags(compiler, common_flags) - else: - logger.warning(f"Unknown compiler {compiler}. Fab cannot control certain flags." - "Please ensure you specify the flag `-c` equivalent flag to only compile." - "Please ensure the module output folder is set to your config's build_output folder." - "or please extend fab.tools.COMPILERS in your build script.") - + common_flags = env_flags + (common_flags or []) flags_config = FlagsConfig(common_flags=common_flags, path_flags=path_flags) - return compiler, compiler_version, flags_config + return compiler, flags_config def compile_pass(config, compiled: Dict[Path, CompiledFile], uncompiled: Set[AnalysedFortran], @@ -209,7 +191,9 @@ def get_compile_next(compiled: Dict[Path, CompiledFile], uncompiled: Set[Analyse return compile_next -def store_artefacts(compiled_files: Dict[Path, CompiledFile], build_lists: Dict[str, List], artefact_store): +def store_artefacts(compiled_files: Dict[Path, CompiledFile], + build_lists: Dict[str, List], + artefact_store: ArtefactStore): """ Create our artefact collection; object files for each compiled file, per root symbol. @@ -244,10 +228,18 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ """ with Timer() as timer: analysed_file, mp_common_args = arg - - flags = mp_common_args.flags.flags_for_path(path=analysed_file.fpath, config=mp_common_args.config) - mod_combo_hash = _get_mod_combo_hash(analysed_file, mp_common_args=mp_common_args) - obj_combo_hash = _get_obj_combo_hash(analysed_file, mp_common_args=mp_common_args, flags=flags) + config = mp_common_args.config + compiler = config.tool_box[Category.FORTRAN_COMPILER] + if not isinstance(compiler, FortranCompiler): + raise RuntimeError(f"Unexpected tool '{compiler.name}' of type " + f"'{type(compiler)}' instead of " + f"FortranCompiler") + flags = Flags(mp_common_args.flags.flags_for_path(path=analysed_file.fpath, config=config)) + + mod_combo_hash = _get_mod_combo_hash(analysed_file, compiler=compiler) + obj_combo_hash = _get_obj_combo_hash(analysed_file, + mp_common_args=mp_common_args, + compiler=compiler, flags=flags) # calculate the incremental/prebuild artefact filenames obj_file_prebuild = mp_common_args.config.prebuild_folder / f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o' @@ -262,7 +254,9 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ # compile try: logger.debug(f'CompileFortran compiling {analysed_file.fpath}') - compile_file(analysed_file, flags, output_fpath=obj_file_prebuild, mp_common_args=mp_common_args) + compile_file(analysed_file.fpath, flags, + output_fpath=obj_file_prebuild, + mp_common_args=mp_common_args) except Exception as err: return Exception(f"Error compiling {analysed_file.fpath}:\n{err}"), None @@ -288,8 +282,10 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ compiled_file = CompiledFile(input_fpath=analysed_file.fpath, output_fpath=obj_file_prebuild) artefacts = [obj_file_prebuild] + mod_file_prebuilds - # todo: probably better to record both mod and obj metrics - metric_name = "compile fortran" + (f' stage {mp_common_args.stage}' if mp_common_args.stage else '') + metric_name = "compile fortran" + if mp_common_args.syntax_only: + metric_name += " syntax-only" + send_metric( group=metric_name, name=str(analysed_file.fpath), @@ -298,7 +294,8 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ return compiled_file, artefacts -def _get_obj_combo_hash(analysed_file, mp_common_args: MpCommonArgs, flags): +def _get_obj_combo_hash(analysed_file, mp_common_args: MpCommonArgs, + compiler: Compiler, flags: Flags): # get a combo hash of things which matter to the object file we define # todo: don't just silently use 0 for a missing dep hash mod_deps_hashes = { @@ -306,23 +303,21 @@ def _get_obj_combo_hash(analysed_file, mp_common_args: MpCommonArgs, flags): try: obj_combo_hash = sum([ analysed_file.file_hash, - flags_checksum(flags), + flags.checksum(), sum(mod_deps_hashes.values()), - zlib.crc32(mp_common_args.compiler.encode()), - zlib.crc32(mp_common_args.compiler_version.encode()), + compiler.get_hash(), ]) except TypeError: raise ValueError("could not generate combo hash for object file") return obj_combo_hash -def _get_mod_combo_hash(analysed_file, mp_common_args: MpCommonArgs): +def _get_mod_combo_hash(analysed_file, compiler: Compiler): # get a combo hash of things which matter to the mod files we define try: mod_combo_hash = sum([ analysed_file.file_hash, - zlib.crc32(mp_common_args.compiler.encode()), - zlib.crc32(mp_common_args.compiler_version.encode()), + compiler.get_hash(), ]) except TypeError: raise ValueError("could not generate combo hash for mod files") @@ -333,84 +328,21 @@ def compile_file(analysed_file, flags, output_fpath, mp_common_args): """ Call the compiler. - The current working folder for the command is set to the folder where the source file lives. - This is done to stop the compiler inserting folder information into the mod files, - which would cause them to have different checksums depending on where they live. + The current working folder for the command is set to the folder where the + source file lives when compile_file is called. This is done to stop the + compiler inserting folder information into the mod files, which would + cause them to have different checksums depending on where they live. """ output_fpath.parent.mkdir(parents=True, exist_ok=True) # tool - command = [mp_common_args.compiler] - known_compiler = COMPILERS.get(os.path.basename(mp_common_args.compiler)) - - # Compile flag. - # If it's an unknown compiler, we rely on the user config to specify this. - if known_compiler: - command.append(known_compiler.compile_flag) - - # flags - command.extend(flags) - if mp_common_args.two_stage_flag and mp_common_args.stage == 1: - command.append(mp_common_args.two_stage_flag) - - # Module folder. - # If it's an unknown compiler, we rely on the user config to specify this. - if known_compiler: - command.extend([known_compiler.module_folder_flag, str(mp_common_args.config.build_output)]) - - # files - command.append(analysed_file.fpath.name) - command.extend(['-o', str(output_fpath)]) + config = mp_common_args.config + compiler = config.tool_box[Category.FORTRAN_COMPILER] - run_command(command, cwd=analysed_file.fpath.parent) - - -# todo: move this - - -def get_fortran_compiler(compiler: Optional[str] = None): - """ - Get the fortran compiler specified by the `$FC` environment variable, - or overridden by the optional `compiler` argument. - - Separates the tool and flags for the sort of value we see in environment variables, e.g. `gfortran -c`. - - :param compiler: - Use this string instead of the $FC environment variable. - - Returns the tool and a list of flags. - - """ - fortran_compiler = None - try: - fortran_compiler = get_tool(compiler or os.getenv('FC', '')) # type: ignore - except ValueError: - # tool not specified - pass - - if not fortran_compiler: - try: - run_command(['gfortran', '--help']) - fortran_compiler = 'gfortran', [] - logger.info('detected gfortran') - except RuntimeError: - # gfortran not available - pass - - if not fortran_compiler: - try: - run_command(['ifort', '--help']) - fortran_compiler = 'ifort', [] - logger.info('detected ifort') - except RuntimeError: - # gfortran not available - pass - - if not fortran_compiler: - raise RuntimeError('no fortran compiler specified or discovered') - - return fortran_compiler + compiler.compile_file(input_file=analysed_file, output_file=output_fpath, + add_flags=flags, + syntax_only=mp_common_args.syntax_only) def get_mod_hashes(analysed_files: Set[AnalysedFortran], config) -> Dict[str, int]: diff --git a/source/fab/steps/grab/__init__.py b/source/fab/steps/grab/__init__.py index ed4bcf91..eeb7b359 100644 --- a/source/fab/steps/grab/__init__.py +++ b/source/fab/steps/grab/__init__.py @@ -8,21 +8,5 @@ """ import logging -import os -from pathlib import Path -from typing import Union - -from fab.tools import run_command - logger = logging.getLogger(__name__) - - -def call_rsync(src: Union[str, Path], dst: Union[str, Path]): - # we want the source folder to end with a / for rsync because we don't want it to create a sub folder - src = os.path.expanduser(str(src)) - if not src.endswith('/'): - src += '/' - - command = ['rsync', '--times', '--links', '--stats', '-ru', src, str(dst)] - return run_command(command) diff --git a/source/fab/steps/grab/fcm.py b/source/fab/steps/grab/fcm.py index 718e3ed8..24cb5850 100644 --- a/source/fab/steps/grab/fcm.py +++ b/source/fab/steps/grab/fcm.py @@ -3,30 +3,40 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## -from typing import Optional + +'''This file contains the various fcm steps. They are not +decorated with @steps since all functions here just call the +corresponding svn steps. +''' + +from typing import Optional, Union from fab.steps.grab.svn import svn_export, svn_checkout, svn_merge +from fab.tools import Category -def fcm_export(config, src: str, dst_label: Optional[str] = None, revision=None): +def fcm_export(config, src: str, dst_label: Optional[str] = None, + revision: Optional[Union[int, str]] = None): """ - Params as per :func:`~fab.steps.svn.svn_export`. + Params as per :func:`~fab.steps.grab.svn.svn_export`. """ - svn_export(config, src, dst_label, revision, tool='fcm') + svn_export(config, src, dst_label, revision, category=Category.FCM) -def fcm_checkout(config, src: str, dst_label: Optional[str] = None, revision=None): +def fcm_checkout(config, src: str, dst_label: Optional[str] = None, + revision: Optional[str] = None): """ - Params as per :func:`~fab.steps.svn.svn_checkout`. + Params as per :func:`~fab.steps.grab.svn.svn_checkout`. """ - svn_checkout(config, src, dst_label, revision, tool='fcm') + svn_checkout(config, src, dst_label, revision, category=Category.FCM) -def fcm_merge(config, src: str, dst_label: Optional[str] = None, revision=None): +def fcm_merge(config, src: str, dst_label: Optional[str] = None, + revision: Optional[str] = None): """ - Params as per :func:`~fab.steps.svn.svn_merge`. + Params as per :func:`~fab.steps.grab.svn.svn_merge`. """ - svn_merge(config, src, dst_label, revision, tool='fcm') + svn_merge(config, src, dst_label, revision, category=Category.FCM) diff --git a/source/fab/steps/grab/folder.py b/source/fab/steps/grab/folder.py index 85d50bf5..d745a3c5 100644 --- a/source/fab/steps/grab/folder.py +++ b/source/fab/steps/grab/folder.py @@ -7,7 +7,7 @@ from typing import Union from fab.steps import step -from fab.steps.grab import call_rsync +from fab.tools import Category @step @@ -27,4 +27,5 @@ def grab_folder(config, src: Union[Path, str], dst_label: str = ''): """ _dst = config.source_root / dst_label _dst.mkdir(parents=True, exist_ok=True) - call_rsync(src=src, dst=_dst) + rsync = config.tool_box[Category.RSYNC] + rsync.execute(src=src, dst=_dst) diff --git a/source/fab/steps/grab/git.py b/source/fab/steps/grab/git.py index 77e2b9b3..a5e5d248 100644 --- a/source/fab/steps/grab/git.py +++ b/source/fab/steps/grab/git.py @@ -3,46 +3,14 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## -import warnings -from pathlib import Path -from typing import Union - -from fab.steps import step -from fab.tools import run_command - - -def current_commit(folder=None): - folder = folder or '.' - output = run_command(['git', 'log', '--oneline', '-n', '1'], cwd=folder) - commit = output.split()[0] - return commit - -def tool_available() -> bool: - """Is the command line git tool available?""" - try: - run_command(['git', 'help']) - except FileNotFoundError: - return False - return True - - -def is_working_copy(dst: Union[str, Path]) -> bool: - """Is the given path is a working copy?""" - try: - run_command(['git', 'status'], cwd=dst) - except RuntimeError: - return False - return True +'''This module contains the git related steps. +''' +import warnings -def fetch(src, revision, dst): - # todo: allow shallow fetch with --depth 1 - command = ['git', 'fetch', src] - if revision: - command.append(revision) - - run_command(command, cwd=str(dst)) +from fab.steps import step +from fab.tools import Category # todo: allow cli args, e.g to set the depth @@ -52,24 +20,20 @@ def git_checkout(config, src: str, dst_label: str = '', revision=None): Checkout or update a Git repo. """ - _dst = config.source_root / dst_label + git = config.tool_box[Category.GIT] + dst = config.source_root / dst_label # create folder? - if not _dst.exists(): - _dst.mkdir(parents=True) - run_command(['git', 'init', '.'], cwd=_dst) - - elif not is_working_copy(_dst): # type: ignore - raise ValueError(f"destination exists but is not a working copy: '{_dst}'") - - fetch(src, revision, _dst) - run_command(['git', 'checkout', 'FETCH_HEAD'], cwd=_dst) + if not dst.exists(): + dst.mkdir(parents=True) + git.init(dst) + git.checkout(src, dst, revision=revision) try: - _dst.relative_to(config.project_workspace) - run_command(['git', 'clean', '-f'], cwd=_dst) - except ValueError: - warnings.warn(f'not safe to clean git source in {_dst}') + dst.relative_to(config.project_workspace) + git.clean(dst) + except RuntimeError: + warnings.warn(f'not safe to clean git source in {dst}') @step @@ -78,15 +42,7 @@ def git_merge(config, src: str, dst_label: str = '', revision=None): Merge a git repo into a local working copy. """ - _dst = config.source_root / dst_label - - if not _dst or not is_working_copy(_dst): - raise ValueError(f"destination is not a working copy: '{_dst}'") - - fetch(src=src, revision=revision, dst=_dst) - - try: - run_command(['git', 'merge', 'FETCH_HEAD'], cwd=_dst) - except RuntimeError as err: - run_command(['git', 'merge', '--abort'], cwd=_dst) - raise RuntimeError(f"Error merging {revision}. Merge aborted.\n{err}") + git = config.tool_box[Category.GIT] + dst = config.source_root / dst_label + git.fetch(src=src, dst=dst, revision=revision) + git.merge(dst=dst, revision=revision) diff --git a/source/fab/steps/grab/prebuild.py b/source/fab/steps/grab/prebuild.py index 7d79cf05..75ad8ff5 100644 --- a/source/fab/steps/grab/prebuild.py +++ b/source/fab/steps/grab/prebuild.py @@ -4,18 +4,21 @@ # which you should have received as part of this distribution # ############################################################################## from fab.steps import step -from fab.steps.grab import call_rsync, logger +from fab.steps.grab import logger +from fab.tools import Category @step -def grab_pre_build(config, path, objects=True, allow_fail=False): +def grab_pre_build(config, path, allow_fail=False): """ - Copy the contents of another project's prebuild folder into our local prebuild folder. + Copy the contents of another project's prebuild folder into our + local prebuild folder. """ dst = config.prebuild_folder + rsync = config.tool_box[Category.RSYNC] try: - res = call_rsync(src=path, dst=dst) + res = rsync.execute(src=path, dst=dst) # log the number of files transferred to_print = [line for line in res.splitlines() if 'Number of' in line] @@ -25,4 +28,4 @@ def grab_pre_build(config, path, objects=True, allow_fail=False): msg = f"could not grab pre-build '{path}':\n{err}" logger.warning(msg) if not allow_fail: - raise RuntimeError(msg) + raise RuntimeError(msg) from err diff --git a/source/fab/steps/grab/svn.py b/source/fab/steps/grab/svn.py index e0d46694..b49c4652 100644 --- a/source/fab/steps/grab/svn.py +++ b/source/fab/steps/grab/svn.py @@ -3,12 +3,18 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## + +'''This file contains the steps related to SVN. It is also used by the various +fcm steps, which call the functions here with just a different category (FCM) +from the tool box. +''' + from pathlib import Path from typing import Optional, Union, Tuple import xml.etree.ElementTree as ET from fab.steps import step -from fab.tools import run_command +from fab.tools import Category, Versioning def _get_revision(src, revision=None) -> Tuple[str, Union[str, None]]: @@ -30,7 +36,8 @@ def _get_revision(src, revision=None) -> Tuple[str, Union[str, None]]: if len(at_split) == 2: url_revision = at_split[1] if url_revision and revision and url_revision != revision: - raise ValueError('Conflicting revisions in url and argument. Please provide as argument only.') + raise ValueError('Conflicting revisions in url and argument. ' + 'Please provide as argument only.') src = at_split[0] else: assert len(at_split) == 1 @@ -38,31 +45,10 @@ def _get_revision(src, revision=None) -> Tuple[str, Union[str, None]]: return src, revision or url_revision -def tool_available(command) -> bool: - """Is the command line tool available?""" - try: - run_command([command, 'help']) - except FileNotFoundError: - return False - return True - - -def _cli_revision_parts(revision): - # return the command line argument to specif the revision, if there is one - return ['--revision', str(revision)] if revision is not None else [] - - -def is_working_copy(tool, dst: Union[str, Path]) -> bool: - # is the given path is a working copy? - try: - run_command([tool, 'info'], cwd=dst) - except RuntimeError: - return False - return True - - -def _svn_prep_common(config, src: str, dst_label: Optional[str], revision: Optional[str]) -> \ - Tuple[str, Path, Optional[str]]: +def _svn_prep_common(config, src: str, + dst_label: Optional[str], + revision: Optional[str]) -> Tuple[str, Path, + Optional[str]]: src, revision = _get_revision(src, revision) if not config.source_root.exists(): config.source_root.mkdir(parents=True, exist_ok=True) @@ -72,77 +58,61 @@ def _svn_prep_common(config, src: str, dst_label: Optional[str], revision: Optio @step -def svn_export(config, src: str, dst_label: Optional[str] = None, revision=None, tool='svn'): +def svn_export(config, src: str, + dst_label: Optional[str] = None, + revision=None, + category=Category.SUBVERSION): # todo: params in docstrings """ Export an FCM repo folder to the project workspace. """ + svn = config.tool_box[category] src, dst, revision = _svn_prep_common(config, src, dst_label, revision) - - run_command([ - tool, 'export', '--force', - *_cli_revision_parts(revision), - src, - str(dst) - ]) + svn.export(src, dst, revision) @step -def svn_checkout(config, src: str, dst_label: Optional[str] = None, revision=None, tool='svn'): +def svn_checkout(config, src: str, dst_label: Optional[str] = None, + revision=None, category=Category.SUBVERSION): """ Checkout or update an FCM repo. .. note:: - If the destination is a working copy, it will be updated to the given revision, **ignoring the source url**. - As such, the revision should be provided via the argument, not as part of the url. + If the destination is a working copy, it will be updated to the given + revision, **ignoring the source url**. As such, the revision should + be provided via the argument, not as part of the url. """ + svn = config.tool_box[category] src, dst, revision = _svn_prep_common(config, src, dst_label, revision) # new folder? if not dst.exists(): # type: ignore - run_command([ - tool, 'checkout', - *_cli_revision_parts(revision), - src, str(dst) - ]) - + svn.checkout(src, dst, revision) else: - # working copy? - if is_working_copy(tool, dst): # type: ignore - # update - # todo: ensure the existing checkout is from self.src? - run_command([tool, 'update', *_cli_revision_parts(revision)], cwd=dst) # type: ignore - else: - # we can't deal with an existing folder that isn't a working copy - raise ValueError(f"destination exists but is not an fcm working copy: '{dst}'") + # update + # todo: ensure the existing checkout is from self.src? + svn.update(dst, revision) -def svn_merge(config, src: str, dst_label: Optional[str] = None, revision=None, tool='svn'): +def svn_merge(config, src: str, dst_label: Optional[str] = None, revision=None, + category=Category.SUBVERSION): """ Merge an FCM repo into a local working copy. """ + svn = config.tool_box[category] src, dst, revision = _svn_prep_common(config, src, dst_label, revision) - if not dst or not is_working_copy(tool, dst): - raise ValueError(f"destination is not a working copy: '{dst}'") - - # We seem to need the url and version combined for this operation. - # The help for fcm merge says it accepts the --revision param, like other commands, - # but it doesn't seem to be recognised. - rev_url = f'{src}' - if revision is not None: - rev_url += f'@{revision}' - - run_command([tool, 'merge', '--non-interactive', rev_url], cwd=dst) - check_conflict(tool, dst) + svn.merge(src, dst, revision) + check_conflict(svn, dst) -def check_conflict(tool, dst): - # check if there's a conflict - xml_str = run_command([tool, 'status', '--xml'], cwd=dst) +def check_conflict(tool: Versioning, dst: Union[str, Path]): + '''Check if there's a conflict + ''' + xml_str = tool.run(['status', '--xml'], cwd=dst, capture_output=True) root = ET.fromstring(xml_str) for target in root: @@ -152,6 +122,8 @@ def check_conflict(tool, dst): if entry.tag != 'entry': continue for element in entry: - if element.tag == 'wc-status' and element.attrib['item'] == 'conflicted': - raise RuntimeError(f'{tool} merge encountered a conflict:\n{xml_str}') + if (element.tag == 'wc-status' and + element.attrib['item'] == 'conflicted'): + raise RuntimeError(f'{tool} merge encountered a ' + f'conflict:\n{xml_str}') return False diff --git a/source/fab/steps/link.py b/source/fab/steps/link.py index 571ff6be..693ea0ab 100644 --- a/source/fab/steps/link.py +++ b/source/fab/steps/link.py @@ -8,14 +8,12 @@ """ import logging -import os from string import Template from typing import Optional from fab.constants import OBJECT_FILES, OBJECT_ARCHIVES, EXECUTABLES from fab.steps import step -from fab.util import log_or_dot -from fab.tools import run_command +from fab.tools import Category from fab.artefacts import ArtefactsGetter, CollectionGetter logger = logging.getLogger(__name__) @@ -33,24 +31,8 @@ def __call__(self, artefact_store): or CollectionGetter(OBJECT_FILES)(artefact_store) -def call_linker(linker, flags, filename, objects): - assert isinstance(linker, str) - command = linker.split() - command.extend(['-o', filename]) - # todo: we need to be able to specify flags which appear before the object files - command.extend(map(str, sorted(objects))) - # note: this must this come after the list of object files? - command.extend(os.getenv('LDFLAGS', '').split()) - command.extend(flags) - log_or_dot(logger, 'Link running command: ' + ' '.join(command)) - try: - run_command(command) - except Exception as err: - raise Exception(f"error linking:\n{err}") - - @step -def link_exe(config, linker: Optional[str] = None, flags=None, source: Optional[ArtefactsGetter] = None): +def link_exe(config, flags=None, source: Optional[ArtefactsGetter] = None): """ Link object files into an executable for every build target. @@ -63,17 +45,15 @@ def link_exe(config, linker: Optional[str] = None, flags=None, source: Optional[ :param config: The :class:`fab.build_config.BuildConfig` object where we can read settings such as the project workspace folder or the multiprocessing flag. - :param linker: - E.g 'gcc' or 'ld'. :param flags: A list of flags to pass to the linker. :param source: - An optional :class:`~fab.artefacts.ArtefactsGetter`. - Typically not required, as there is a sensible default. + An optional :class:`~fab.artefacts.ArtefactsGetter`. It defaults to the + output from compiler steps, which typically is the expected behaviour. """ - linker = linker or os.getenv('LD', 'ld') - logger.info(f'linker is {linker}') + linker = config.tool_box[Category.LINKER] + logger.info(f'Linker is {linker.name}') flags = flags or [] source_getter = source or DefaultLinkerSource() @@ -81,13 +61,13 @@ def link_exe(config, linker: Optional[str] = None, flags=None, source: Optional[ target_objects = source_getter(config.artefact_store) for root, objects in target_objects.items(): exe_path = config.project_workspace / f'{root}' - call_linker(linker=linker, flags=flags, filename=str(exe_path), objects=objects) + linker.link(objects, exe_path, flags) config.artefact_store.setdefault(EXECUTABLES, []).append(exe_path) # todo: the bit about Dict[None, object_files] seems too obscure - try to rethink this. @step -def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, flags=None, +def link_shared_object(config, output_fpath: str, flags=None, source: Optional[ArtefactsGetter] = None): """ Produce a shared object (*.so*) file from the given build target. @@ -102,8 +82,6 @@ def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, such as the project workspace folder or the multiprocessing flag. :param output_fpath: File path of the shared object to create. - :param linker: - E.g 'gcc' or 'ld'. :param flags: A list of flags to pass to the linker. :param source: @@ -111,7 +89,7 @@ def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, Typically not required, as there is a sensible default. """ - linker = linker or os.getenv('LD', 'ld') + linker = config.tool_box[Category.LINKER] logger.info(f'linker is {linker}') flags = flags or [] @@ -127,7 +105,5 @@ def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, assert list(target_objects.keys()) == [None] objects = target_objects[None] - call_linker( - linker=linker, flags=flags, - filename=Template(output_fpath).substitute(output=config.build_output), - objects=objects) + out_name = Template(output_fpath).substitute(output=config.build_output) + linker.link(objects, out_name, add_libs=flags) diff --git a/source/fab/steps/preprocess.py b/source/fab/steps/preprocess.py index ffc3d406..11777e96 100644 --- a/source/fab/steps/preprocess.py +++ b/source/fab/steps/preprocess.py @@ -8,7 +8,6 @@ """ import logging -import os import shutil from dataclasses import dataclass from pathlib import Path @@ -19,24 +18,24 @@ from fab.metrics import send_metric from fab.util import log_or_dot_finish, input_to_output_fpath, log_or_dot, suffix_filter, Timer, by_type -from fab.tools import get_tool, run_command from fab.steps import check_for_errors, run_mp, step +from fab.tools import Category, Cpp, CppFortran, Preprocessor from fab.artefacts import ArtefactsGetter, SuffixFilter, CollectionGetter logger = logging.getLogger(__name__) @dataclass -class MpCommonArgs(object): +class MpCommonArgs(): """Common args for calling process_artefact() using multiprocessing.""" config: BuildConfig output_suffix: str - preprocessor: str + preprocessor: Preprocessor flags: FlagsConfig name: str -def pre_processor(config: BuildConfig, preprocessor: str, +def pre_processor(config: BuildConfig, preprocessor: Preprocessor, files: Collection[Path], output_collection, output_suffix, common_flags: Optional[List[str]] = None, path_flags: Optional[List] = None, @@ -68,7 +67,7 @@ def pre_processor(config: BuildConfig, preprocessor: str, common_flags = common_flags or [] flags = FlagsConfig(common_flags=common_flags, path_flags=path_flags) - logger.info(f'preprocessor is {preprocessor}') + logger.info(f"preprocessor is '{preprocessor.name}'.") logger.info(f'preprocessing {len(files)} files') @@ -97,84 +96,33 @@ def process_artefact(arg: Tuple[Path, MpCommonArgs]): Writes the output file to the output folder, with a lower case extension. """ - fpath, args = arg + input_fpath, args = arg with Timer() as timer: - - # output_fpath = input_to_output_fpath(config=self._config, input_path=fpath).with_suffix(self.output_suffix) - output_fpath = input_to_output_fpath(config=args.config, input_path=fpath).with_suffix(args.output_suffix) + output_fpath = (input_to_output_fpath(config=args.config, + input_path=input_fpath) + .with_suffix(args.output_suffix)) # already preprocessed? - # todo: remove reuse_artefacts eveywhere! + # todo: remove reuse_artefacts everywhere! if args.config.reuse_artefacts and output_fpath.exists(): - log_or_dot(logger, f'Preprocessor skipping: {fpath}') + log_or_dot(logger, f'Preprocessor skipping: {input_fpath}') else: output_fpath.parent.mkdir(parents=True, exist_ok=True) - command = [args.preprocessor] - command.extend(args.flags.flags_for_path(path=fpath, config=args.config)) - command.append(str(fpath)) - command.append(str(output_fpath)) + params = args.flags.flags_for_path(path=input_fpath, config=args.config) - log_or_dot(logger, 'PreProcessor running command: ' + ' '.join(command)) + log_or_dot(logger, f"PreProcessor running with parameters: " + f"'{' '.join(params)}'.'") try: - run_command(command) + args.preprocessor.preprocess(input_fpath, output_fpath, params) except Exception as err: - raise Exception(f"error preprocessing {fpath}:\n{err}") + raise Exception(f"error preprocessing {input_fpath}:\n{err}") from err - send_metric(args.name, str(fpath), {'time_taken': timer.taken, 'start': timer.start}) + send_metric(args.name, str(input_fpath), {'time_taken': timer.taken, 'start': timer.start}) return output_fpath -def get_fortran_preprocessor(): - """ - Identify the fortran preprocessor and any flags from the environment. - - Initially looks for the `FPP` environment variable, then tries to call the `fpp` and `cpp` command line tools. - - Returns the executable and flags. - - The returned flags will always include `-P` to suppress line numbers. - This fparser ticket requests line number handling https://github.com/stfc/fparser/issues/390 . - - """ - fpp: Optional[str] = None - fpp_flags: Optional[List[str]] = None - - try: - fpp, fpp_flags = get_tool(os.getenv('FPP')) - logger.info(f"The environment defined FPP as '{fpp}'") - except ValueError: - pass - - if not fpp: - try: - run_command(['which', 'fpp']) - fpp, fpp_flags = 'fpp', ['-P'] - logger.info('detected fpp') - except RuntimeError: - # fpp not available - pass - - if not fpp: - try: - run_command(['which', 'cpp']) - fpp, fpp_flags = 'cpp', ['-traditional-cpp', '-P'] - logger.info('detected cpp') - except RuntimeError: - # fpp not available - pass - - if not fpp: - raise RuntimeError('no fortran preprocessor specified or discovered') - - assert fpp_flags is not None - if '-P' not in fpp_flags: - fpp_flags.append('-P') - - return fpp, fpp_flags - - # todo: rename preprocess_fortran @step def preprocess_fortran(config: BuildConfig, source: Optional[ArtefactsGetter] = None, **kwargs): @@ -196,17 +144,16 @@ def preprocess_fortran(config: BuildConfig, source: Optional[ArtefactsGetter] = F90s = suffix_filter(source_files, '.F90') f90s = suffix_filter(source_files, '.f90') - # get the tool from FPP - fpp, fpp_flags = get_fortran_preprocessor() + fpp = config.tool_box[Category.FORTRAN_PREPROCESSOR] + if not isinstance(fpp, CppFortran): + raise RuntimeError(f"Unexpected tool '{fpp.name}' of type " + f"'{type(fpp)}' instead of CppFortran") # make sure any flags from FPP are included in any common flags specified by the config try: common_flags = kwargs.pop('common_flags') except KeyError: common_flags = [] - for fpp_flag in fpp_flags: - if fpp_flag not in common_flags: - common_flags.append(fpp_flag) # preprocess big F90s pre_processor( @@ -258,10 +205,14 @@ def preprocess_c(config: BuildConfig, source=None, **kwargs): """ source_getter = source or DefaultCPreprocessorSource() source_files = source_getter(config.artefact_store) + cpp = config.tool_box[Category.C_PREPROCESSOR] + if not isinstance(cpp, Cpp): + raise RuntimeError(f"Unexpected tool '{cpp.name}' of type " + f"'{type(cpp)}' instead of Cpp") pre_processor( config, - preprocessor=os.getenv('CPP', 'cpp'), + preprocessor=cpp, files=source_files, output_collection='preprocessed_c', output_suffix='.c', name='preprocess c', diff --git a/source/fab/steps/psyclone.py b/source/fab/steps/psyclone.py index b6671bc5..0db38b3d 100644 --- a/source/fab/steps/psyclone.py +++ b/source/fab/steps/psyclone.py @@ -15,41 +15,28 @@ import warnings from itertools import chain from pathlib import Path -from typing import Dict, List, Optional, Set, Union, Tuple, Callable +from typing import Dict, List, Optional, Set, Tuple, Union, Callable from fab.build_config import BuildConfig -from fab.tools import run_command from fab.artefacts import ArtefactsGetter, CollectionConcat, SuffixFilter from fab.parse.fortran import FortranAnalyser, AnalysedFortran from fab.parse.x90 import X90Analyser, AnalysedX90 from fab.steps import run_mp, check_for_errors, step -from fab.steps.preprocess import get_fortran_preprocessor, pre_processor +from fab.steps.preprocess import pre_processor +from fab.tools import Category, Psyclone from fab.util import log_or_dot, input_to_output_fpath, file_checksum, file_walk, TimerLogger, \ string_checksum, suffix_filter, by_type, log_or_dot_finish logger = logging.getLogger(__name__) -def tool_available() -> bool: - """Check if the psyclone tool is available at the command line.""" - try: - run_command(['psyclone', '-h']) - except (RuntimeError, FileNotFoundError): - return False - return True - - # todo: should this be part of the psyclone step? def preprocess_x90(config, common_flags: Optional[List[str]] = None): common_flags = common_flags or [] # get the tool from FPP - fpp, fpp_flags = get_fortran_preprocessor() - for fpp_flag in fpp_flags: - if fpp_flag not in common_flags: - common_flags.append(fpp_flag) - + fpp = config.tool_box[Category.FORTRAN_PREPROCESSOR] source_files = SuffixFilter('all_source', '.X90')(config.artefact_store) pre_processor( @@ -74,7 +61,7 @@ class MpCommonArgs: config: BuildConfig analysed_x90: Dict[Path, AnalysedX90] - kernel_roots: List[Path] + kernel_roots: List[Union[str, Path]] transformation_script: Optional[Callable[[Path, BuildConfig], Path]] cli_args: List[str] @@ -131,8 +118,6 @@ def psyclone(config, kernel_roots: Optional[List[Path]] = None, cli_args = cli_args or [] source_getter = source_getter or DEFAULT_SOURCE_GETTER - overrides_folder = overrides_folder - x90s = source_getter(config.artefact_store) # analyse the x90s @@ -174,7 +159,7 @@ def psyclone(config, kernel_roots: Optional[List[Path]] = None, def _generate_mp_payload(config, analysed_x90, all_kernel_hashes, overrides_folder, kernel_roots, - transformation_script, cli_args): + transformation_script, cli_args) -> MpCommonArgs: override_files: List[str] = [] if overrides_folder: override_files = [f.name for f in file_walk(overrides_folder)] @@ -289,38 +274,49 @@ def do_one_file(arg: Tuple[Path, MpCommonArgs]): prebuild_hash = _gen_prebuild_hash(x90_file, mp_payload) # These are the filenames we expect to be output for this x90 input file. - # There will always be one modified_alg, and 0-1 generated. + # There will always be one modified_alg, and 0-1 generated psy file. modified_alg: Path = x90_file.with_suffix('.f90') modified_alg = input_to_output_fpath(config=mp_payload.config, input_path=modified_alg) - generated: Path = x90_file.parent / (str(x90_file.stem) + '_psy.f90') - generated = input_to_output_fpath(config=mp_payload.config, input_path=generated) + psy_file: Path = x90_file.parent / (str(x90_file.stem) + '_psy.f90') + psy_file = input_to_output_fpath(config=mp_payload.config, input_path=psy_file) - generated.parent.mkdir(parents=True, exist_ok=True) + psy_file.parent.mkdir(parents=True, exist_ok=True) # do we already have prebuilt results for this x90 file? prebuilt_alg, prebuilt_gen = _get_prebuild_paths( - mp_payload.config.prebuild_folder, modified_alg, generated, prebuild_hash) + mp_payload.config.prebuild_folder, modified_alg, psy_file, prebuild_hash) if prebuilt_alg.exists(): # todo: error handling in here msg = f'found prebuilds for {x90_file}:\n {prebuilt_alg}' shutil.copy2(prebuilt_alg, modified_alg) if prebuilt_gen.exists(): msg += f'\n {prebuilt_gen}' - shutil.copy2(prebuilt_gen, generated) + shutil.copy2(prebuilt_gen, psy_file) log_or_dot(logger=logger, msg=msg) else: + config = mp_payload.config + psyclone = config.tool_box[Category.PSYCLONE] + if not isinstance(psyclone, Psyclone): + raise RuntimeError(f"Unexpected tool '{psyclone.name}' of type " + f"'{type(psyclone)}' instead of Psyclone") try: - # logger.info(f'running psyclone on {x90_file}') - run_psyclone(generated, modified_alg, x90_file, - mp_payload.kernel_roots, mp_payload.transformation_script, - mp_payload.cli_args, mp_payload.config) + transformation_script = mp_payload.transformation_script + logger.info(f"running psyclone on '{x90_file}'.") + psyclone.process(config=mp_payload.config, + api="dynamo0.3", + x90_file=x90_file, + psy_file=psy_file, + alg_file=modified_alg, + transformation_script=transformation_script, + kernel_roots=mp_payload.kernel_roots, + additional_parameters=mp_payload.cli_args) shutil.copy2(modified_alg, prebuilt_alg) msg = f'created prebuilds for {x90_file}:\n {prebuilt_alg}' - if Path(generated).exists(): + if Path(psy_file).exists(): msg += f'\n {prebuilt_gen}' - shutil.copy2(generated, prebuilt_gen) + shutil.copy2(psy_file, prebuilt_gen) log_or_dot(logger=logger, msg=msg) except Exception as err: @@ -329,12 +325,12 @@ def do_one_file(arg: Tuple[Path, MpCommonArgs]): # do we have handwritten overrides for either of the files we just created? modified_alg = _check_override(modified_alg, mp_payload) - generated = _check_override(generated, mp_payload) + psy_file = _check_override(psy_file, mp_payload) # return the output files from psyclone result: List[Path] = [modified_alg] - if Path(generated).exists(): - result.append(generated) + if Path(psy_file).exists(): + result.append(psy_file) # we also want to return the prebuild artefact files we created, # which are just copies, in the prebuild folder, with hashes in the filenames. @@ -390,38 +386,12 @@ def _gen_prebuild_hash(x90_file: Path, mp_payload: MpCommonArgs): return prebuild_hash -def _get_prebuild_paths(prebuild_folder, modified_alg, generated, prebuild_hash): +def _get_prebuild_paths(prebuild_folder, modified_alg, psy_file, prebuild_hash): prebuilt_alg = Path(prebuild_folder / f'{modified_alg.stem}.{prebuild_hash}{modified_alg.suffix}') - prebuilt_gen = Path(prebuild_folder / f'{generated.stem}.{prebuild_hash}{generated.suffix}') + prebuilt_gen = Path(prebuild_folder / f'{psy_file.stem}.{prebuild_hash}{psy_file.suffix}') return prebuilt_alg, prebuilt_gen -def run_psyclone(generated, modified_alg, x90_file, kernel_roots, transformation_script, cli_args, config): - - # -d specifies "a root directory structure containing kernel source" - kernel_args: Union[List[str], list] = sum([['-d', k] for k in kernel_roots], []) - - # transformation python script - transform_options = [] - if transformation_script: - transformation_script_return_path = transformation_script(x90_file, config) - if transformation_script_return_path: - transform_options = ['-s', transformation_script_return_path] - - command = [ - 'psyclone', '-api', 'dynamo0.3', - '-l', 'all', - *kernel_args, - '-opsy', generated, # filename of generated PSy code - '-oalg', modified_alg, # filename of transformed algorithm code - *transform_options, - *cli_args, - x90_file, - ] - - run_command(command) - - def _check_override(check_path: Path, mp_payload: MpCommonArgs): """ Delete the file if there's an override for it. diff --git a/source/fab/steps/root_inc_files.py b/source/fab/steps/root_inc_files.py index 2bc9999a..9ed53df4 100644 --- a/source/fab/steps/root_inc_files.py +++ b/source/fab/steps/root_inc_files.py @@ -15,6 +15,7 @@ import warnings from pathlib import Path +from fab.build_config import BuildConfig from fab.steps import step from fab.util import suffix_filter @@ -22,7 +23,7 @@ @step -def root_inc_files(config): +def root_inc_files(config: BuildConfig): """ Copy inc files into the workspace output root. diff --git a/source/fab/tools.py b/source/fab/tools.py deleted file mode 100644 index a6ccf60a..00000000 --- a/source/fab/tools.py +++ /dev/null @@ -1,173 +0,0 @@ -# ############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -# ############################################################################## -""" -Known command line tools whose flags we wish to manage. - -""" -import logging -from pathlib import Path -import subprocess -import warnings -from typing import Dict, List, Optional, Tuple, Union - -from fab.util import string_checksum - -logger = logging.getLogger(__name__) - - -class Compiler(object): - """ - A command-line compiler whose flags we wish to manage. - - """ - def __init__(self, exe, compile_flag, module_folder_flag): - self.exe = exe - self.compile_flag = compile_flag - self.module_folder_flag = module_folder_flag - # We should probably extend this for fPIC, two-stage and optimisation levels. - - -COMPILERS: Dict[str, Compiler] = { - 'gfortran': Compiler(exe='gfortran', compile_flag='-c', module_folder_flag='-J'), - 'ifort': Compiler(exe='ifort', compile_flag='-c', module_folder_flag='-module'), -} - - -# todo: We're not sure we actually want to do modify incoming flags. Discuss... -# todo: this is compiler specific, rename - and do we want similar functions for other steps? -def remove_managed_flags(compiler, flags_in): - """ - Remove flags which Fab manages. - - Fab prefers to specify a few compiler flags itself. - For example, Fab wants to place module files in the `build_output` folder. - The flag to do this differs with compiler. - - We don't want duplicate, possibly conflicting flags in our tool invocation so this function is used - to remove any flags which Fab wants to manage. - - If the compiler is not known to Fab, we rely on the user to specify these flags in their config. - - .. note:: - - This approach is due for discussion. It might not be desirable to modify user flags at all. - - """ - def remove_flag(flags: List[str], flag: str, len): - while flag in flags: - warnings.warn(f'removing managed flag {flag} for compiler {compiler}') - flag_index = flags.index(flag) - for _ in range(len): - flags.pop(flag_index) - - known_compiler = COMPILERS.get(compiler) - if not known_compiler: - logger.warning('Unable to remove managed flags for unknown compiler. User config must specify managed flags.') - return flags_in - - flags_out = [*flags_in] - remove_flag(flags_out, known_compiler.compile_flag, 1) - remove_flag(flags_out, known_compiler.module_folder_flag, 2) - return flags_out - - -def flags_checksum(flags: List[str]): - """ - Return a checksum of the flags. - - """ - return string_checksum(str(flags)) - - -def run_command(command: List[str], env=None, cwd: Optional[Union[Path, str]] = None, capture_output=True): - """ - Run a CLI command. - - :param command: - List of strings to be sent to :func:`subprocess.run` as the command. - :param env: - Optional env for the command. By default it will use the current session's environment. - :param capture_output: - If True, capture and return stdout. If False, the command will print its output directly to the console. - - """ - command = list(map(str, command)) - logger.debug(f'run_command: {" ".join(command)}') - res = subprocess.run(command, capture_output=capture_output, env=env, cwd=cwd) - if res.returncode != 0: - msg = f'Command failed with return code {res.returncode}:\n{command}' - if res.stdout: - msg += f'\n{res.stdout.decode()}' - if res.stderr: - msg += f'\n{res.stderr.decode()}' - raise RuntimeError(msg) - - if capture_output: - return res.stdout.decode() - - -def get_tool(tool_str: Optional[str] = None) -> Tuple[str, List[str]]: - """ - Get the compiler, preprocessor, etc, from the given string. - - Separate the tool and flags for the sort of value we see in environment variables, e.g. `gfortran -c`. - - Returns the tool and a list of flags. - - :param env_var: - The environment variable from which to find the tool. - - """ - tool_str = tool_str or '' - - tool_split = tool_str.split() - if not tool_split: - raise ValueError(f"Tool not specified in '{tool_str}'. Cannot continue.") - return tool_split[0], tool_split[1:] - - -# todo: add more compilers and test with more versions of compilers -def get_compiler_version(compiler: str) -> str: - """ - Try to get the version of the given compiler. - - Expects a version in a certain part of the --version output, - which must adhere to the n.n.n format, with at least 2 parts. - - Returns a version string, e.g '6.10.1', or empty string. - - :param compiler: - The command line tool for which we want a version. - - """ - try: - res = run_command([compiler, '--version']) - except FileNotFoundError: - raise ValueError(f'Compiler not found: {compiler}') - except RuntimeError as err: - logger.warning(f"Error asking for version of compiler '{compiler}': {err}") - return '' - - # Pull the version string from the command output. - # All the versions of gfortran and ifort we've tried follow the same pattern, it's after a ")". - try: - version = res.split(')')[1].split()[0] - except IndexError: - logger.warning(f"Unexpected version response from compiler '{compiler}': {res}") - return '' - - # expect major.minor[.patch, ...] - # validate - this may be overkill - split = version.split('.') - if len(split) < 2: - logger.warning(f"unhandled compiler version format for compiler '{compiler}' is not : {version}") - return '' - - # todo: do we care if the parts are integers? Not all will be, but perhaps major and minor? - - logger.info(f'Found compiler version for {compiler} = {version}') - - return version diff --git a/source/fab/tools/__init__.py b/source/fab/tools/__init__.py new file mode 100644 index 00000000..18244e0b --- /dev/null +++ b/source/fab/tools/__init__.py @@ -0,0 +1,50 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''A simple init file to make it shorter to import tools. +''' + +from fab.tools.ar import Ar +from fab.tools.category import Category +from fab.tools.compiler import (CCompiler, Compiler, FortranCompiler, Gcc, + Gfortran, Icc, Ifort) +from fab.tools.flags import Flags +from fab.tools.linker import Linker +from fab.tools.psyclone import Psyclone +from fab.tools.rsync import Rsync +from fab.tools.preprocessor import Cpp, CppFortran, Fpp, Preprocessor +from fab.tools.tool import Tool, CompilerSuiteTool +# Order here is important to avoid a circular import +from fab.tools.tool_repository import ToolRepository +from fab.tools.tool_box import ToolBox +from fab.tools.versioning import Fcm, Git, Subversion, Versioning + +__all__ = ["Ar", + "Category", + "CCompiler", + "Compiler", + "CompilerSuiteTool", + "Cpp", + "CppFortran", + "Fcm", + "Flags", + "FortranCompiler", + "Fpp", + "Gcc", + "Gfortran", + "Git", + "Icc", + "Ifort", + "Linker", + "Preprocessor", + "Psyclone", + "Rsync", + "Subversion", + "Tool", + "ToolBox", + "ToolRepository", + "Versioning", + ] diff --git a/source/fab/tools/ar.py b/source/fab/tools/ar.py new file mode 100644 index 00000000..54a1a881 --- /dev/null +++ b/source/fab/tools/ar.py @@ -0,0 +1,35 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the Ar class for archiving files. +""" + +from pathlib import Path +from typing import List, Union + +from fab.tools.category import Category +from fab.tools.tool import Tool + + +class Ar(Tool): + '''This is the base class for `ar`. + ''' + + def __init__(self): + super().__init__("ar", "ar", Category.AR) + + def create(self, output_fpath: Path, + members: List[Union[Path, str]]): + '''Create the archive with the specified name, containing the + listed members. + + :param output_fpath: the output path. + :param members: the list of objects to be added to the archive. + ''' + # Explicit type is required to avoid mypy errors :( + parameters: List[Union[Path, str]] = ["cr", output_fpath] + parameters.extend(map(str, members)) + return self.run(additional_parameters=parameters) diff --git a/source/fab/tools/category.py b/source/fab/tools/category.py new file mode 100644 index 00000000..6eab9b9d --- /dev/null +++ b/source/fab/tools/category.py @@ -0,0 +1,38 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This simple module defines an Enum for all allowed categories. +''' + +from enum import auto, Enum + + +class Category(Enum): + '''This class defines the allowed tool categories.''' + # TODO 311: Allow support for users to add their own tools. + + C_COMPILER = auto() + C_PREPROCESSOR = auto() + FORTRAN_COMPILER = auto() + FORTRAN_PREPROCESSOR = auto() + LINKER = auto() + PSYCLONE = auto() + FCM = auto() + GIT = auto() + SUBVERSION = auto() + AR = auto() + RSYNC = auto() + MISC = auto() + + def __str__(self): + '''Simplify the str output by using only the name (e.g. `C_COMPILER` + instead of `Category.C_COMPILER)`.''' + return str(self.name) + + @property + def is_compiler(self): + '''Returns if the category is either a C or a Fortran compiler.''' + return self in [Category.FORTRAN_COMPILER, Category.C_COMPILER] diff --git a/source/fab/tools/compiler.py b/source/fab/tools/compiler.py new file mode 100644 index 00000000..b7ec8541 --- /dev/null +++ b/source/fab/tools/compiler.py @@ -0,0 +1,309 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the base class for any compiler, and derived +classes for gcc, gfortran, icc, ifort +""" + +import os +from pathlib import Path +from typing import List, Optional, Union +import zlib + +from fab.tools.category import Category +from fab.tools.flags import Flags +from fab.tools.tool import CompilerSuiteTool + + +class Compiler(CompilerSuiteTool): + '''This is the base class for any compiler. It provides flags for + + - compilation only (-c), + - naming the output file (-o), + - OpenMP + + :param name: name of the compiler. + :param exec_name: name of the executable to start. + :param suite: name of the compiler suite this tool belongs to. + :param category: the Category (C_COMPILER or FORTRAN_COMPILER). + :param compile_flag: the compilation flag to use when only requesting + compilation (not linking). + :param output_flag: the compilation flag to use to indicate the name + of the output file + :param omp_flag: the flag to use to enable OpenMP + ''' + + # pylint: disable=too-many-arguments + def __init__(self, name: str, + exec_name: Union[str, Path], + suite: str, + category: Category, + compile_flag: Optional[str] = None, + output_flag: Optional[str] = None, + omp_flag: Optional[str] = None): + super().__init__(name, exec_name, suite, category) + self._version = None + self._compile_flag = compile_flag if compile_flag else "-c" + self._output_flag = output_flag if output_flag else "-o" + self._omp_flag = omp_flag + self.flags.extend(os.getenv("FFLAGS", "").split()) + + def get_hash(self) -> int: + ''':returns: a hash based on the compiler name and version. + ''' + return (zlib.crc32(self.name.encode()) + + zlib.crc32(str(self.get_version()).encode())) + + def compile_file(self, input_file: Path, output_file: Path, + add_flags: Union[None, List[str]] = None): + '''Compiles a file. It will add the flag for compilation-only + automatically, as well as the output directives. The current working + directory for the command is set to the folder where the source file + lives when compile_file is called. This is done to stop the compiler + inserting folder information into the mod files, which would cause + them to have different checksums depending on where they live. + + :param input_file: the path of the input file. + :param outpout_file: the path of the output file. + :param add_flags: additional compiler flags. + ''' + + params: List[Union[Path, str]] = [self._compile_flag] + if add_flags: + params += add_flags + + params.extend([input_file.name, + self._output_flag, str(output_file)]) + + return self.run(cwd=input_file.parent, + additional_parameters=params) + + def check_available(self) -> bool: + '''Checks if the compiler is available. While the method in + the Tools base class would be sufficient (when using --version), + in case of a compiler we also want to store the compiler version. + So, re-implement check_available in a way that will automatically + store the compiler version for later usage. + + :returns: whether the compiler is available or not. We do + this by requesting the compiler version. + ''' + try: + version = self.get_version() + except RuntimeError: + # Compiler does not exist: + return False + + # An empty string is returned if some other error occurred when trying + # to get the compiler version. + return version != "" + + def get_version(self): + """ + Try to get the version of the given compiler. + # TODO: why return "" when an error happened? + # TODO: we need to properly create integers for compiler versions + # to (later) allow less and greater than comparisons. + + Expects a version in a certain part of the --version output, + which must adhere to the n.n.n format, with at least 2 parts. + + :Returns: a version string, e.g '6.10.1', or empty string if + a different error happened when trying to get the compiler version. + + :raises RuntimeError: if the compiler was not found. + """ + if self._version: + return self._version + + try: + res = self.run("--version", capture_output=True) + except FileNotFoundError as err: + raise RuntimeError(f'Compiler not found: {self.name}') from err + except RuntimeError as err: + self.logger.warning(f"Error asking for version of compiler " + f"'{self.name}': {err}") + return '' + + # Pull the version string from the command output. + # All the versions of gfortran and ifort we've tried follow the + # same pattern, it's after a ")". + try: + version = res.split(')')[1].split()[0] + except IndexError: + self.logger.warning(f"Unexpected version response from " + f"compiler '{self.name}': {res}") + return '' + + # expect major.minor[.patch, ...] + # validate - this may be overkill + split = version.split('.') + if len(split) < 2: + self.logger.warning(f"unhandled compiler version format for " + f"compiler '{self.name}' is not " + f": {version}") + return '' + + # todo: do we care if the parts are integers? Not all will be, + # but perhaps major and minor? + + self.logger.info(f'Found compiler version for {self.name} = {version}') + self._version = version + return version + + +# ============================================================================ +class CCompiler(Compiler): + '''This is the base class for a C compiler. It just sets the category + of the compiler as convenience. + + :param name: name of the compiler. + :param exec_name: name of the executable to start. + :param suite: name of the compiler suite. + :param category: the Category (C_COMPILER or FORTRAN_COMPILER). + :param compile_flag: the compilation flag to use when only requesting + compilation (not linking). + :param output_flag: the compilation flag to use to indicate the name + of the output file + :param omp_flag: the flag to use to enable OpenMP + ''' + + # pylint: disable=too-many-arguments + def __init__(self, name: str, exec_name: str, suite: str, + compile_flag=None, output_flag=None, omp_flag=None): + super().__init__(name, exec_name, suite, Category.C_COMPILER, + compile_flag, output_flag, omp_flag) + + +# ============================================================================ +class FortranCompiler(Compiler): + '''This is the base class for a Fortran compiler. It is a compiler + that needs to support a module output path and support for syntax-only + compilation (which will only generate the .mod files). + + :param name: name of the compiler. + :param exec_name: name of the executable to start. + :param suite: name of the compiler suite. + :param module_folder_flag: the compiler flag to indicate where to + store created module files. + :param syntax_only_flag: flag to indicate to only do a syntax check. + The side effect is that the module files are created. + :param compile_flag: the compilation flag to use when only requesting + compilation (not linking). + :param output_flag: the compilation flag to use to indicate the name + of the output file + :param omp_flag: the flag to use to enable OpenMP + ''' + + # pylint: disable=too-many-arguments + def __init__(self, name: str, exec_name: str, suite: str, + module_folder_flag: str, syntax_only_flag=None, + compile_flag=None, output_flag=None, omp_flag=None): + + super().__init__(name, exec_name, suite, Category.FORTRAN_COMPILER, + compile_flag, output_flag, omp_flag) + self._module_folder_flag = module_folder_flag + self._module_output_path = "" + self._syntax_only_flag = syntax_only_flag + + @property + def has_syntax_only(self) -> bool: + ''':returns: whether this compiler supports a syntax-only feature.''' + return self._syntax_only_flag is not None + + def set_module_output_path(self, path: Path): + '''Sets the output path for modules. + + :params path: the path to the output directory. + ''' + self._module_output_path = str(path) + + def compile_file(self, input_file: Path, output_file: Path, + add_flags: Union[None, List[str]] = None, + syntax_only: bool = False): + '''Compiles a file. + + :param input_file: the name of the input file. + :param output_file: the name of the output file. + :param add_flags: additional flags for the compiler. + :param syntax_only: if set, the compiler will only do + a syntax check + ''' + + params: List[str] = [] + if add_flags: + new_flags = Flags(add_flags) + new_flags.remove_flag(self._module_folder_flag, has_parameter=True) + new_flags.remove_flag(self._compile_flag, has_parameter=False) + params += new_flags + + if syntax_only and self._syntax_only_flag: + params.append(self._syntax_only_flag) + + # Append module output path + if self._module_folder_flag and self._module_output_path: + params.append(self._module_folder_flag) + params.append(self._module_output_path) + super().compile_file(input_file, output_file, params) + + +# ============================================================================ +class Gcc(CCompiler): + '''Class for GNU's gcc compiler. + + :param name: name of this compiler. + :param exec_name: name of the executable. + ''' + def __init__(self, + name: str = "gcc", + exec_name: str = "gcc"): + super().__init__(name, exec_name, "gnu", omp_flag="-fopenmp") + + +# ============================================================================ +class Gfortran(FortranCompiler): + '''Class for GNU's gfortran compiler. + + :param name: name of this compiler. + :param exec_name: name of the executable. + ''' + def __init__(self, + name: str = "gfortran", + exec_name: str = "gfortran"): + super().__init__(name, exec_name, "gnu", + module_folder_flag="-J", + omp_flag="-fopenmp", + syntax_only_flag="-fsyntax-only") + + +# ============================================================================ +class Icc(CCompiler): + '''Class for the Intel's icc compiler. + + :param name: name of this compiler. + :param exec_name: name of the executable. + ''' + def __init__(self, + name: str = "icc", + exec_name: str = "icc"): + super().__init__(name, exec_name, "intel-classic", + omp_flag="-qopenmp") + + +# ============================================================================ +class Ifort(FortranCompiler): + '''Class for Intel's ifort compiler. + + :param name: name of this compiler. + :param exec_name: name of the executable. + ''' + def __init__(self, + name: str = "ifort", + exec_name: str = "ifort"): + super().__init__(name, exec_name, "intel-classic", + module_folder_flag="-module", + omp_flag="-qopenmp", + syntax_only_flag="-syntax-only") diff --git a/source/fab/tools/flags.py b/source/fab/tools/flags.py new file mode 100644 index 00000000..6303a754 --- /dev/null +++ b/source/fab/tools/flags.py @@ -0,0 +1,81 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This file contains a simple Flag class to manage tool flags. +It will need to be combined with build_config.FlagsConfig in a follow up +PR. +''' + +import logging +from typing import List, Optional +import warnings + +from fab.util import string_checksum + + +class Flags(list): + '''This class represents a list of parameters for a tool. It is a + list with some additional functionality. + + TODO #22: This class and build_config.FlagsConfig should be combined. + + :param list_of_flags: List of parameters to initialise this object with. + ''' + + def __init__(self, list_of_flags: Optional[List[str]] = None): + self._logger = logging.getLogger(__name__) + super().__init__() + if list_of_flags: + self.extend(list_of_flags) + + def checksum(self) -> str: + """ + :returns: a checksum of the flags. + + """ + return string_checksum(str(self)) + + def remove_flag(self, remove_flag: str, has_parameter: bool = False): + '''Removes all occurrences of `remove_flag` in flags`. + If has_parameter is defined, the next entry in flags will also be + removed, and if this object contains this flag+parameter without space + (e.g. `-J/tmp`), it will be correctly removed. Note that only the + flag itself must be specified, you cannot remove a flag only if a + specific parameter is given (i.e. `remove_flag="-J/tmp"` will not + work if this object contains `[...,"-J", "/tmp"]`). + + :param remove_flag: the flag to remove + :param has_parameter: if the flag to remove takes a parameter + ''' + + # TODO #313: Check if we can use an OrderedDict and get O(1) + # behaviour here (since ordering of flags can be important) + i = 0 + flag_len = len(remove_flag) + while i < len(self): + flag = self[i] + # First check for the flag stand-alone, i.e. if it has a parameter, + # it will be the next entry: [... "-J", "/tmp"]: + if flag == remove_flag: + if has_parameter and i + 1 == len(self): + # We have a flag which takes a parameter, but there is no + # parameter. Issue a warning: + self._logger.warning(f"Flags '{' '. join(self)}' contain " + f"'{remove_flag}' but no parameter.") + del self[i] + else: + # Delete the argument and if required its parameter + del self[i:i+(2 if has_parameter else 1)] + warnings.warn(f"Removing managed flag '{remove_flag}'.") + continue + # Now check if it has flag and parameter as one argument (-J/tmp) + # ['-J/tmp'] and remove_flag('-J', True) + if has_parameter and flag[:flag_len] == remove_flag: + # No space between flag and parameter, remove this one flag + warnings.warn(f"Removing managed flag '{remove_flag}'.") + del self[i] + continue + i += 1 diff --git a/source/fab/tools/linker.py b/source/fab/tools/linker.py new file mode 100644 index 00000000..06bb5cfa --- /dev/null +++ b/source/fab/tools/linker.py @@ -0,0 +1,85 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the base class for any Linker. +""" + +import os +from pathlib import Path +from typing import cast, List, Optional + +from fab.tools.category import Category +from fab.tools.compiler import Compiler +from fab.tools.tool import CompilerSuiteTool + + +class Linker(CompilerSuiteTool): + '''This is the base class for any Linker. If a compiler is specified, + its name, executable, and compile suite will be used for the linker (if + not explicitly set in the constructor). + + :param name: the name of the linker. + :param exec_name: the name of the executable. + :param suite: optional, the name of the suite. + :param compiler: optional, a compiler instance + :param output_flag: flag to use to specify the output name. + ''' + + # pylint: disable=too-many-arguments + def __init__(self, name: Optional[str] = None, + exec_name: Optional[str] = None, + suite: Optional[str] = None, + compiler: Optional[Compiler] = None, + output_flag: str = "-o"): + if (not name or not exec_name or not suite) and not compiler: + raise RuntimeError("Either specify name, exec name, and suite " + "or a compiler when creating Linker.") + # Make mypy happy, since it can't work out otherwise if these string + # variables might still be None :( + compiler = cast(Compiler, compiler) + if not name: + name = compiler.name + if not exec_name: + exec_name = compiler.exec_name + if not suite: + suite = compiler.suite + self._output_flag = output_flag + super().__init__(name, exec_name, suite, Category.LINKER) + self._compiler = compiler + self.flags.extend(os.getenv("LDFLAGS", "").split()) + + def check_available(self) -> bool: + ''' + :returns: whether the linker is available or not. We do this + by requesting the linker version. + ''' + if self._compiler: + return self._compiler.check_available() + + return super().check_available() + + def link(self, input_files: List[Path], output_file: Path, + add_libs: Optional[List[str]] = None) -> str: + '''Executes the linker with the specified input files, + creating `output_file`. + + :param input_files: list of input files to link. + :param output_file: output file. + :param add_libs: additional linker flags. + + :returns: the stdout of the link command + ''' + if self._compiler: + # Create a copy: + params = self._compiler.flags[:] + else: + params = [] + # TODO: why are the .o files sorted? That shouldn't matter + params.extend(sorted(map(str, input_files))) + if add_libs: + params += add_libs + params.extend([self._output_flag, str(output_file)]) + return self.run(params) diff --git a/source/fab/tools/preprocessor.py b/source/fab/tools/preprocessor.py new file mode 100644 index 00000000..be9f9d43 --- /dev/null +++ b/source/fab/tools/preprocessor.py @@ -0,0 +1,77 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the base class for any preprocessor, and two derived +classes for cpp and fpp. + +""" + +from pathlib import Path +from typing import List, Optional, Union + +from fab.tools.category import Category +from fab.tools.tool import Tool + + +class Preprocessor(Tool): + '''This is the base class for any preprocessor. + + :param name: the name of the preprocessor. + :param exec_name: the name of the executable. + :param category: the category (C_PREPROCESSOR or FORTRAN_PREPROCESSOR) + ''' + + def __init__(self, name: str, exec_name: Union[str, Path], + category: Category, + availablility_option: Optional[str] = None): + super().__init__(name, exec_name, category) + self._version = None + + def preprocess(self, input_file: Path, output_file: Path, + add_flags: Union[None, List[Union[Path, str]]] = None): + '''Calls the preprocessor to process the specified input file, + creating the requested output file. + + :param input_file: input file. + :param output_file: the output filename. + :param add_flags: List with additional flags to be used. + ''' + params: List[Union[str, Path]] = [] + if add_flags: + # Make a copy to avoid modifying the caller's list + params = add_flags[:] + # Input and output files come as the last two parameters + params.extend([input_file, output_file]) + + return self.run(additional_parameters=params) + + +# ============================================================================ +class Cpp(Preprocessor): + '''Class for cpp. + ''' + def __init__(self): + super().__init__("cpp", "cpp", Category.C_PREPROCESSOR) + + +# ============================================================================ +class CppFortran(Preprocessor): + '''Class for cpp when used as a Fortran preprocessor + ''' + def __init__(self): + super().__init__("cpp", "cpp", Category.FORTRAN_PREPROCESSOR) + self.flags.extend(["-traditional-cpp", "-P"]) + + +# ============================================================================ +class Fpp(Preprocessor): + '''Class for Intel's Fortran-specific preprocessor. + ''' + def __init__(self): + # fpp -V prints version information, but then hangs (i.e. reading + # from stdin), so use -what to see if it is available + super().__init__("fpp", "fpp", Category.FORTRAN_PREPROCESSOR, + availablility_option="-what") diff --git a/source/fab/tools/psyclone.py b/source/fab/tools/psyclone.py new file mode 100644 index 00000000..af453178 --- /dev/null +++ b/source/fab/tools/psyclone.py @@ -0,0 +1,68 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the tool class for PSyclone. +""" + +from pathlib import Path +from typing import Callable, List, Optional, TYPE_CHECKING, Union + +from fab.tools.category import Category +from fab.tools.tool import Tool + +if TYPE_CHECKING: + # TODO 314: see if this circular dependency can be broken + # Otherwise we have a circular dependency: + # BuildConfig needs ToolBox which imports __init__ which imports this + from fab.build_config import BuildConfig + + +class Psyclone(Tool): + '''This is the base class for `PSyclone`. + ''' + + def __init__(self): + super().__init__("psyclone", "psyclone", Category.PSYCLONE) + + def process(self, api: str, + config: "BuildConfig", + x90_file: Path, + psy_file: Path, + alg_file: Union[Path, str], + transformation_script: Optional[Callable[[Path, "BuildConfig"], + Path]] = None, + additional_parameters: Optional[List[str]] = None, + kernel_roots: Optional[List[Union[str, Path]]] = None + ): + # pylint: disable=too-many-arguments + '''Run PSyclone with the specified parameters. + + :param api: the PSyclone API. + :param x90_file: the input file for PSyclone + :param psy_file: the output PSy-layer file. + :param alg_file: the output modified algorithm file. + :param transformation_script: an optional transformation script + :param additional_parameters: optional additional parameters + for PSyclone + :param kernel_roots: optional directories with kernels. + ''' + + parameters: List[Union[str, Path]] = [ + "-api", api, "-l", "all", "-opsy", psy_file, "-oalg", alg_file] + if transformation_script: + transformation_script_return_path = \ + transformation_script(x90_file, config) + if transformation_script_return_path: + parameters.extend(['-s', transformation_script_return_path]) + + if additional_parameters: + parameters.extend(additional_parameters) + if kernel_roots: + roots_with_dash_d: List[str] = sum([['-d', str(k)] + for k in kernel_roots], []) + parameters.extend(roots_with_dash_d) + parameters.append(str(x90_file)) + return self.run(additional_parameters=parameters) diff --git a/source/fab/tools/rsync.py b/source/fab/tools/rsync.py new file mode 100644 index 00000000..a072345f --- /dev/null +++ b/source/fab/tools/rsync.py @@ -0,0 +1,40 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the Rsync class for synchronising file trees. +""" + +import os +from pathlib import Path +from typing import List, Union + +from fab.tools.category import Category +from fab.tools.tool import Tool + + +class Rsync(Tool): + '''This is the base class for `rsync`. + ''' + + def __init__(self): + super().__init__("rsync", "rsync", Category.RSYNC) + + def execute(self, src: Path, + dst: Path): + '''Execute an rsync command from src to dst. It supports + ~ expansion for src, and makes sure that `src` end with a `/` + so that rsync does not create a sub-directory. + + :param src: the input path. + :param dst: destination path. + ''' + src_str = os.path.expanduser(str(src)) + if not src_str.endswith('/'): + src_str += '/' + + parameters: List[Union[str, Path]] = [ + '--times', '--links', '--stats', '-ru', src_str, dst] + return self.run(additional_parameters=parameters) diff --git a/source/fab/tools/tool.py b/source/fab/tools/tool.py new file mode 100644 index 00000000..4fe97de4 --- /dev/null +++ b/source/fab/tools/tool.py @@ -0,0 +1,192 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the base class for all tools, i.e. compiler, +preprocessor, linker, archiver, Psyclone, rsync, versioning tools. + +Each tool belongs to one category (e.g. FORTRAN_COMPILER). This category +is used when adding a tool to a ToolRepository or ToolBox. +It provides basic support for running a binary, and keeping track if +a tool is actually available. +""" + +import logging +from pathlib import Path +import subprocess +from typing import Dict, List, Optional, Union + +from fab.tools.category import Category +from fab.tools.flags import Flags + + +class Tool: + '''This is the base class for all tools. It stores the name of the tool, + the name of the executable, and provides a `run` method. + + :param name: name of the tool. + :param exec_name: name or full path of the executable to start. + :param category: the Category to which this tool belongs. + :param availability_option: a command line option for the tool to test + if the tool is available on the current system. Defaults to + `--version`. + ''' + + def __init__(self, name: str, exec_name: Union[str, Path], + category: Category = Category.MISC, + availablility_option: Optional[str] = None): + self._logger = logging.getLogger(__name__) + self._name = name + self._exec_name = str(exec_name) + self._flags = Flags() + self._category = category + if availablility_option: + self._availability_option = availablility_option + else: + self._availability_option = "--version" + + # This flag keeps track if a tool is available on the system or not. + # A value of `None` means that it has not been tested if a tool works + # or not. It will be set to the output of `check_available` when + # querying the `is_available` property. + # If `_is_available` is False, any call to `run` will immediately + # raise a RuntimeError. As long as it is still set to None (or True), + # the `run` method will work, allowing the `check_available` method + # to use `run` to determine if a tool is available or not. + self._is_available: Optional[bool] = None + + def check_available(self) -> bool: + '''Run a 'test' command to check if this tool is available in the + system. + :returns: whether the tool is working (True) or not. + ''' + try: + self.run(self._availability_option) + except (RuntimeError, FileNotFoundError): + return False + return True + + @property + def is_available(self) -> bool: + '''Checks if the tool is available or not. It will call a tool-specific + function check_available to determine this, but will cache the results + to avoid testing a tool more than once. + + :returns: whether the tool is available (i.e. installed and + working). + ''' + if self._is_available is None: + self._is_available = self.check_available() + return self._is_available + + @property + def is_compiler(self) -> bool: + '''Returns whether this tool is a (Fortran or C) compiler or not.''' + return self._category.is_compiler + + @property + def exec_name(self) -> str: + ''':returns: the name of the executable.''' + return self._exec_name + + @property + def name(self) -> str: + ''':returns: the name of the tool.''' + return self._name + + @property + def category(self) -> Category: + ''':returns: the category of this tool.''' + return self._category + + @property + def flags(self) -> Flags: + ''':returns: the flags to be used with this tool.''' + return self._flags + + @property + def logger(self) -> logging.Logger: + ''':returns: a logger object for convenience.''' + return self._logger + + def __str__(self): + return f"{type(self).__name__} - {self._name}: {self._exec_name}" + + def run(self, + additional_parameters: Optional[ + Union[str, List[Union[Path, str]]]] = None, + env: Optional[Dict[str, str]] = None, + cwd: Optional[Union[Path, str]] = None, + capture_output=True) -> str: + """ + Run the binary as a subprocess. + + :param additional_parameters: + List of strings to be sent to :func:`subprocess.run` as the + command. + :param env: + Optional env for the command. By default it will use the current + session's environment. + :param capture_output: + If True, capture and return stdout. If False, the command will + print its output directly to the console. + + :raises RuntimeError: if the code is not available. + :raises RuntimeError: if the return code of the executable is not 0. + """ + + command = [self.exec_name] + self.flags + if additional_parameters: + if isinstance(additional_parameters, str): + command.append(additional_parameters) + else: + # Convert everything to a str, this is useful for supporting + # paths as additional parameter + command.extend(str(i) for i in additional_parameters) + + # self._is_available is None when it is not known yet whether a tool + # is available or not. Testing for `False` only means this `run` + # function can be used to test if a tool is available. + if self._is_available is False: + raise RuntimeError(f"Tool '{self.name}' is not available to run " + f"'{command}'.") + self._logger.debug(f'run_command: {" ".join(command)}') + try: + res = subprocess.run(command, capture_output=capture_output, + env=env, cwd=cwd, check=False) + except FileNotFoundError as err: + raise RuntimeError(f"Command '{command}' could not be " + f"executed.") from err + if res.returncode != 0: + msg = (f'Command failed with return code {res.returncode}:\n' + f'{command}') + if res.stdout: + msg += f'\n{res.stdout.decode()}' + if res.stderr: + msg += f'\n{res.stderr.decode()}' + raise RuntimeError(msg) + if capture_output: + return res.stdout.decode() + return "" + + +class CompilerSuiteTool(Tool): + '''A tool that is part of a compiler suite (typically compiler + and linker). + + :param name: name of the tool. + :param exec_name: name of the executable to start. + :param suite: name of the compiler suite. + :param category: the Category to which this tool belongs. + ''' + def __init__(self, name: str, exec_name: Union[str, Path], suite: str, + category: Category): + super().__init__(name, exec_name, category) + self._suite = suite + + @property + def suite(self) -> str: + ''':returns: the compiler suite of this tool.''' + return self._suite diff --git a/source/fab/tools/tool_box.py b/source/fab/tools/tool_box.py new file mode 100644 index 00000000..7704feeb --- /dev/null +++ b/source/fab/tools/tool_box.py @@ -0,0 +1,64 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This file contains the ToolBox class. +''' + +import warnings +from typing import Dict + +from fab.tools.category import Category +from fab.tools.tool import Tool +from fab.tools.tool_repository import ToolRepository + + +class ToolBox: + '''This class implements the tool box. It stores one tool for each + category to be used in a FAB build. + ''' + + def __init__(self) -> None: + self._all_tools: Dict[Category, Tool] = {} + + def __getitem__(self, category: Category) -> Tool: + '''A convenience function for get_tool.''' + return self.get_tool(category) + + def add_tool(self, tool: Tool, + silent_replace: bool = False) -> None: + '''Adds a tool for a given category. + + :param tool: the tool to add. + :param silent_replace: if set, no warning will be printed + if an existing tool is replaced. + + :raises RuntimeError: if the tool to be added is not available. + ''' + if not tool.is_available: + raise RuntimeError(f"Tool '{tool}' is not available.") + + if tool.category in self._all_tools and not silent_replace: + warnings.warn(f"Replacing existing tool " + f"'{self._all_tools[tool.category]}' with " + f"'{tool}'.") + self._all_tools[tool.category] = tool + + def get_tool(self, category: Category) -> Tool: + '''Returns the tool for the specified category. + + :param category: the name of the category in which to look + for the tool. + + :raises KeyError: if the category is not known. + ''' + + if category in self._all_tools: + return self._all_tools[category] + + # No tool was specified for this category, get the default tool + # from the ToolRepository: + tr = ToolRepository() + return tr.get_default(category) diff --git a/source/fab/tools/tool_repository.py b/source/fab/tools/tool_repository.py new file mode 100644 index 00000000..36aaa514 --- /dev/null +++ b/source/fab/tools/tool_repository.py @@ -0,0 +1,142 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This file contains the ToolRepository class. +''' + +# We can't declare _singleton and __new__() using ToolRepository, but +# it is allowed if we use this import: +from __future__ import annotations + +import logging +from typing import Any, Type + +from fab.tools.tool import Tool +from fab.tools.category import Category +from fab.tools.linker import Linker +from fab.tools.versioning import Fcm, Git, Subversion + + +class ToolRepository(dict): + '''This class implements the tool repository. It stores a list of + tools for various categories. For each compiler, it will automatically + create a tool called "linker-{compiler-name}" which can be used for + linking with the specified compiler. + ''' + + _singleton: None | ToolRepository = None + + def __new__(cls) -> ToolRepository: + '''Singleton access. Changes the value of _singleton so that the + constructor can verify that it is indeed called from here. + ''' + if not cls._singleton: + cls._singleton = super().__new__(cls) + + return cls._singleton + + def __init__(self): + # Note that in this singleton pattern the constructor is called each + # time the instance is requested (since we overwrite __new__). But + # we only want to initialise the instance once, so let the constructor + # not do anything if the singleton already exists: + if ToolRepository._singleton: + return + + self._logger = logging.getLogger(__name__) + super().__init__() + + # Create the list that stores all tools for each category: + for category in Category: + self[category] = [] + + # Add the FAB default tools: + # TODO: sort the defaults so that they actually work (since not all + # tools FAB knows about are available). For now, disable Fpp: + # We get circular dependencies if imported at top of the file: + # pylint: disable=import-outside-toplevel + from fab.tools import (Ar, Cpp, CppFortran, Gcc, Gfortran, + Icc, Ifort, Psyclone, Rsync) + + for cls in [Gcc, Icc, Gfortran, Ifort, Cpp, CppFortran, + Fcm, Git, Subversion, Ar, Psyclone, Rsync]: + self.add_tool(cls) + + def add_tool(self, cls: Type[Any]): + '''Creates an instance of the specified class and adds it + to the tool repository. + + :param cls: the tool to instantiate. + ''' + + # Note that we cannot declare `cls` to be `Type[Tool]`, since the + # Tool constructor requires arguments, but the classes used here are + # derived from Tool which do not require any arguments (e.g. Ifort) + + tool = cls() + # We do not test if a tool is actually available. The ToolRepository + # contains the tools that FAB knows about. It is the responsibility + # of the ToolBox to make sure only available tools are added. + self[tool.category].append(tool) + + # If we have a compiler, add the compiler as linker as well + if tool.is_compiler: + linker = Linker(name=f"linker-{tool.name}", compiler=tool) + self[linker.category].append(linker) + + def get_tool(self, category: Category, name: str) -> Tool: + ''':returns: the tool with a given name in the specified category. + + :param category: the name of the category in which to look + for the tool. + :param name: the name of the tool to find. + + :raises KeyError: if there is no tool in this category. + :raises KeyError: if no tool in the given category has the + requested name. + ''' + + if category not in self: + raise KeyError(f"Unknown category '{category}' " + f"in ToolRepository.get_tool().") + all_tools = self[category] + for tool in all_tools: + if tool.name == name: + return tool + raise KeyError(f"Unknown tool '{name}' in category '{category}' " + f"in ToolRepository.") + + def set_default_compiler_suite(self, suite: str): + '''Sets the default for linker and compilers to be of the + given compiler suite. + + :param suite: the name of the compiler suite to make the default. + ''' + for category in [Category.FORTRAN_COMPILER, Category.C_COMPILER, + Category.LINKER]: + all_members = [tool for tool in self[category] + if tool.suite == suite] + if len(all_members) == 0: + raise RuntimeError(f"Cannot find '{category}' " + f"in the suite '{suite}'.") + tool = all_members[0] + if tool != self[category][0]: + self[category].remove(tool) + self[category].insert(0, tool) + + def get_default(self, category: Category): + '''Returns the default tool for a given category, which is just + the first tool in the category. + + :param category: the category for which to return the default tool. + + :raises KeyError: if the category does not exist. + ''' + + if not isinstance(category, Category): + raise RuntimeError(f"Invalid category type " + f"'{type(category).__name__}'.") + return self[category][0] diff --git a/source/fab/tools/versioning.py b/source/fab/tools/versioning.py new file mode 100644 index 00000000..0ed6ae96 --- /dev/null +++ b/source/fab/tools/versioning.py @@ -0,0 +1,217 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +"""This file contains the base class for versioning tools like git and +subversion. It also contains derived classes Git, Subversion, and Fcm. +""" + +from pathlib import Path +from typing import Dict, List, Optional, Union + +from fab.tools.category import Category +from fab.tools.tool import Tool + + +class Versioning(Tool): + '''This is the base class for versioning tools like git and svn. + + :param name: the name of the tool. + :param exec_name: the name of the executable of this tool. + :param category: the category to which this tool belongs). + ''' + + def __init__(self, name: str, + exec_name: Union[str, Path], + category: Category): + super().__init__(name, exec_name, category, + availablility_option="help") + + +# ============================================================================= +class Git(Versioning): + '''This is the base class for git. + ''' + + def __init__(self): + super().__init__("git", "git", + category=Category.GIT) + + def current_commit(self, folder: Optional[Union[Path, str]] = None) -> str: + ''':returns: the hash of the current commit. + + :param folder: the folder for which to determine the current commitf + (defaults to .). + ''' + folder = folder or '.' + output = self.run(['log', '--oneline', '-n', '1'], cwd=folder) + commit = output.split()[0] + return commit + + def init(self, folder: Union[Path, str]): + '''Initialises a directory. + + :param folder: the directory to initialise. + ''' + self.run(['init', '.'], cwd=folder) + + def clean(self, folder: Union[Path, str]): + '''Removes all non versioned files in a directory. + + :param folder: the directory to clean. + ''' + self.run(['clean', '-f'], cwd=folder) + + def fetch(self, src: Union[str, Path], + dst: Union[str, Path], + revision: Union[None, str]): + '''Runs `git fetch` in the specified directory + + :param src: the source directory from which to fetch + :param revision: the revision to fetch (can be "" for latest revision) + :param dst: the directory in which to run fetch. + ''' + # todo: allow shallow fetch with --depth 1 + command: List[Union[str, Path]] = ['fetch', str(src)] + if revision: + command.append(revision) + self.run(command, cwd=str(dst), capture_output=False) + + def checkout(self, src: str, + dst: str = '', + revision: Optional[str] = None): + """Checkout or update a Git repo. + + :param src: the source directory from which to checkout. + :param dst: the directory in which to run checkout. + :param revision: the revision to check out (can be "" for + latest revision). + """ + self.fetch(src, dst, revision) + self.run(['checkout', 'FETCH_HEAD'], cwd=dst, capture_output=False) + + def merge(self, dst: Union[str, Path], + revision: Optional[str] = None): + """Merge a git repo into a local working copy. If the merge fails, + it will run `git merge --abort` to clean the directory. + + :param dst: the directory to merge in. + :param revision: the revision number (only used for error message, + it relies on git fetch running previously). + """ + try: + self.run(['merge', 'FETCH_HEAD'], cwd=dst, capture_output=False) + except RuntimeError as err: + self.run(['merge', '--abort'], cwd=dst, capture_output=False) + raise RuntimeError(f"Error merging {revision}. " + f"Merge aborted.\n{err}") from err + + +# ============================================================================= +class Subversion(Versioning): + '''This is the base class for subversion. Note that this is also the + base class for FCM, so it allows overwriting name, exec_name and + category, but will default to use svn. + + :param name: name of the tool, defaults to subversion. + :param exec_name: name of the executable, defaults to "svn". + :param category: the category, FCM or SUBVERSION (the latter is + the default) + ''' + + def __init__(self, name: Optional[str] = None, + exec_name: Optional[Union[str, Path]] = None, + category: Category = Category.SUBVERSION): + name = name or "subversion" + exec_name = exec_name or "svn" + super().__init__(name, exec_name, category=category) + + # pylint: disable-next=too-many-arguments + def execute(self, pre_commands: Optional[List[str]] = None, + revision: Optional[Union[int, str]] = None, + post_commands: Optional[List[str]] = None, + env: Optional[Dict[str, str]] = None, + cwd: Optional[Union[Path, str]] = None, + capture_output=True) -> str: + '''Executes a svn command. + + :param pre_commands: List of strings to be sent to + :func:`subprocess.run` as the command. + :param revision: optional revision number as argument + :param post_commands: List of additional strings to be sent to + :func:`subprocess.run` after the optional revision number. + :param env: Optional env for the command. By default it will use + the current session's environment. + :param capture_output: If True, capture and return stdout. If False, + the command will print its output directly to the console. + ''' + command: List[Union[str, Path]] = [] + if pre_commands: + command.extend(pre_commands) + if revision: + command.extend(["--revision", f"{revision}"]) + if post_commands: + command.extend(post_commands) + return super().run(command, env=env, cwd=cwd, + capture_output=capture_output) + + def export(self, src: Union[str, Path], + dst: Union[str, Path], + revision: Optional[str] = None): + '''Runs svn export. + + :param src: from where to export. + :param dst: destination path. + :param revision: revision to export. + ''' + self.execute(['export', '--force'], revision, [str(src), str(dst)]) + + def checkout(self, src: Union[str, Path], + dst: Union[str, Path], + revision: Optional[str] = None): + '''Runs svn checkout. + + :param src: from where to check out. + :param dst: destination path. + :param revision: revision to check out. + ''' + self.execute(["checkout"], revision, [str(src), str(dst)]) + + def update(self, dst: Union[str, Path], + revision: Optional[str] = None): + '''Runs svn checkout. + + :param dst: destination path. + :param revision: revision to check out. + ''' + self.execute(['update'], revision, cwd=dst) + + def merge(self, src: Union[str, Path], + dst: Union[str, Path], + revision: Optional[str] = None): + '''Runs svn merge. + + :param src: the src URI. + :param dst: destination path. + :param revision: revision to check out. + ''' + # We seem to need the url and version combined for this operation. + # The help for fcm merge says it accepts the --revision param, like + # other commands, but it doesn't seem to be recognised. + rev_url = f'{src}' + if revision is not None: + rev_url += f'@{revision}' + + self.execute(['merge', '--non-interactive', rev_url], cwd=dst) + + +# ============================================================================= +class Fcm(Subversion): + '''This is the base class for FCM. All commands will be mapped back + to the corresponding subversion commands. + ''' + + def __init__(self): + super().__init__("fcm", "fcm", Category.FCM) diff --git a/source/fab/util.py b/source/fab/util.py index 53a26476..95cb87d3 100644 --- a/source/fab/util.py +++ b/source/fab/util.py @@ -110,12 +110,12 @@ def file_walk(path: Union[str, Path], ignore_folders: Optional[List[Path]] = Non yield i -class Timer(object): +class Timer: """ A simple timing context manager. """ - def __init__(self): + def __init__(self) -> None: self.start: Optional[float] = None self.taken: Optional[float] = None @@ -160,7 +160,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): # todo: move this -class CompiledFile(object): +class CompiledFile: """ A Fortran or C file which has been compiled. diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..b8a95011 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,60 @@ +# ############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +# ############################################################################## + +'''This file is read by pytest and provides common fixtures. +''' + +from unittest import mock + +import pytest + +from fab.tools import Category, CCompiler, FortranCompiler, Linker, ToolBox + + +# This avoids pylint warnings about Redefining names from outer scope +@pytest.fixture(name="mock_c_compiler") +def fixture_mock_c_compiler(): + '''Provides a mock C-compiler.''' + mock_compiler = CCompiler("mock_c_compiler", "mock_exec", "suite") + mock_compiler.run = mock.Mock() + mock_compiler._version = "1.2.3" + mock_compiler._name = "mock_c_compiler" + mock_compiler._exec_name = "mock_c_compiler.exe" + return mock_compiler + + +@pytest.fixture(name="mock_fortran_compiler") +def fixture_mock_fortran_compiler(): + '''Provides a mock Fortran-compiler.''' + mock_compiler = FortranCompiler("mock_fortran_compiler", "mock_exec", + "suite", module_folder_flag="", + syntax_only_flag=None, compile_flag=None, + output_flag=None, omp_flag=None) + mock_compiler.run = mock.Mock() + mock_compiler._name = "mock_fortran_compiler" + mock_compiler._exec_name = "mock_fortran_compiler.exe" + mock_compiler._version = "1.2.3" + return mock_compiler + + +@pytest.fixture(name="mock_linker") +def fixture_mock_linker(): + '''Provides a mock linker.''' + mock_linker = Linker("mock_linker", "mock_linker.exe", + Category.FORTRAN_COMPILER) + mock_linker.run = mock.Mock() + mock_linker._version = "1.2.3" + return mock_linker + + +@pytest.fixture(name="tool_box") +def fixture_tool_box(mock_c_compiler, mock_fortran_compiler, mock_linker): + '''Provides a tool box with a mock Fortran and a mock C compiler.''' + tool_box = ToolBox() + tool_box.add_tool(mock_c_compiler) + tool_box.add_tool(mock_fortran_compiler) + tool_box.add_tool(mock_linker) + return tool_box diff --git a/tests/system_tests/CFortranInterop/test_CFortranInterop.py b/tests/system_tests/CFortranInterop/test_CFortranInterop.py index ec708e68..cc5632ae 100644 --- a/tests/system_tests/CFortranInterop/test_CFortranInterop.py +++ b/tests/system_tests/CFortranInterop/test_CFortranInterop.py @@ -16,6 +16,7 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran, preprocess_c +from fab.tools import ToolBox import pytest @@ -25,21 +26,18 @@ def test_CFortranInterop(tmp_path): # build - with BuildConfig(fab_workspace=tmp_path, project_label='foo', multiprocessing=False) as config, \ - pytest.warns(UserWarning, match="removing managed flag"): - - grab_folder(config, src=PROJECT_SOURCE), - find_source_files(config), - - c_pragma_injector(config), - preprocess_c(config), - preprocess_fortran(config), - - analyse(config, root_symbol='main'), - - compile_c(config, common_flags=['-c', '-std=c99']), - compile_fortran(config, common_flags=['-c']), - link_exe(config, linker='gcc', flags=['-lgfortran']), + with BuildConfig(fab_workspace=tmp_path, project_label='foo', + tool_box=ToolBox(), multiprocessing=False) as config: + grab_folder(config, src=PROJECT_SOURCE) + find_source_files(config) + c_pragma_injector(config) + preprocess_c(config) + preprocess_fortran(config) + analyse(config, root_symbol='main') + compile_c(config, common_flags=['-c', '-std=c99']) + with pytest.warns(UserWarning, match="Removing managed flag"): + compile_fortran(config, common_flags=['-c']) + link_exe(config, flags=['-lgfortran']) # todo: on an ubuntu vm, we needed these before the object files - investigate further # [ # '/lib/x86_64-linux-gnu/libc.so.6', diff --git a/tests/system_tests/CUserHeader/test_CUserHeader.py b/tests/system_tests/CUserHeader/test_CUserHeader.py index 04dac386..98d2ccb5 100644 --- a/tests/system_tests/CUserHeader/test_CUserHeader.py +++ b/tests/system_tests/CUserHeader/test_CUserHeader.py @@ -15,6 +15,7 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_c +from fab.tools import ToolBox PROJECT_SOURCE = Path(__file__).parent / 'project-source' @@ -22,18 +23,16 @@ def test_CUseHeader(tmp_path): # build - with BuildConfig(fab_workspace=tmp_path, project_label='foo', multiprocessing=False) as config: - - grab_folder(config, PROJECT_SOURCE), - - find_source_files(config), - - c_pragma_injector(config), - preprocess_c(config), - analyse(config, root_symbol='main'), - compile_c(config, common_flags=['-c', '-std=c99']), - - link_exe(config, linker='gcc', flags=['-lgfortran']), + with BuildConfig(fab_workspace=tmp_path, tool_box=ToolBox(), + project_label='foo', multiprocessing=False) as config: + + grab_folder(config, PROJECT_SOURCE) + find_source_files(config) + c_pragma_injector(config) + preprocess_c(config) + analyse(config, root_symbol='main') + compile_c(config, common_flags=['-c', '-std=c99']) + link_exe(config, flags=['-lgfortran']) assert len(config.artefact_store[EXECUTABLES]) == 1 diff --git a/tests/system_tests/FortranDependencies/test_FortranDependencies.py b/tests/system_tests/FortranDependencies/test_FortranDependencies.py index e5d22f2b..6ee57e3e 100644 --- a/tests/system_tests/FortranDependencies/test_FortranDependencies.py +++ b/tests/system_tests/FortranDependencies/test_FortranDependencies.py @@ -16,22 +16,24 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import ToolBox import pytest -def test_FortranDependencies(tmp_path): +def test_fortran_dependencies(tmp_path): # build - with BuildConfig(fab_workspace=tmp_path, project_label='foo', multiprocessing=False) as config, \ - pytest.warns(UserWarning, match="removing managed flag"): - grab_folder(config, src=Path(__file__).parent / 'project-source'), - find_source_files(config), - preprocess_fortran(config), # nothing to preprocess, actually, it's all little f90 files - analyse(config, root_symbol=['first', 'second']), - compile_c(config, common_flags=['-c', '-std=c99']), - compile_fortran(config, common_flags=['-c']), - link_exe(config, linker='gcc', flags=['-lgfortran']), + with BuildConfig(fab_workspace=tmp_path, tool_box=ToolBox(), + project_label='foo', multiprocessing=False) as config: + grab_folder(config, src=Path(__file__).parent / 'project-source') + find_source_files(config) + preprocess_fortran(config) # nothing to preprocess, actually, it's all little f90 files + analyse(config, root_symbol=['first', 'second']) + compile_c(config, common_flags=['-c', '-std=c99']) + with pytest.warns(UserWarning, match="Removing managed flag"): + compile_fortran(config, common_flags=['-c']) + link_exe(config, flags=['-lgfortran']) assert len(config.artefact_store[EXECUTABLES]) == 2 diff --git a/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py b/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py index 0888f536..cd22f528 100644 --- a/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py +++ b/tests/system_tests/FortranPreProcess/test_FortranPreProcess.py @@ -14,19 +14,22 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import ToolBox + import pytest def build(fab_workspace, fpp_flags=None): - with BuildConfig(fab_workspace=fab_workspace, project_label='foo', multiprocessing=False) as config, \ - pytest.warns(UserWarning, match="removing managed flag"): - grab_folder(config, Path(__file__).parent / 'project-source'), - find_source_files(config), - preprocess_fortran(config, common_flags=fpp_flags), - analyse(config, root_symbol=['stay_or_go_now']), - compile_fortran(config, common_flags=['-c']), - link_exe(config, linker='gcc', flags=['-lgfortran']), + with BuildConfig(fab_workspace=fab_workspace, tool_box=ToolBox(), + project_label='foo', multiprocessing=False) as config: + grab_folder(config, Path(__file__).parent / 'project-source') + find_source_files(config) + preprocess_fortran(config, common_flags=fpp_flags) + analyse(config, root_symbol=['stay_or_go_now']) + with pytest.warns(UserWarning, match="Removing managed flag"): + compile_fortran(config, common_flags=['-c']) + link_exe(config, flags=['-lgfortran']) return config diff --git a/tests/system_tests/MinimalC/test_MinimalC.py b/tests/system_tests/MinimalC/test_MinimalC.py index 36a32b0b..4d32751e 100644 --- a/tests/system_tests/MinimalC/test_MinimalC.py +++ b/tests/system_tests/MinimalC/test_MinimalC.py @@ -15,24 +15,24 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_c +from fab.tools import ToolBox PROJECT_SOURCE = Path(__file__).parent / 'project-source' -def test_MinimalC(tmp_path): +def test_minimal_c(tmp_path): # build - with BuildConfig(fab_workspace=tmp_path, project_label='foo', multiprocessing=False) as config: - - grab_folder(config, PROJECT_SOURCE), - find_source_files(config), - - c_pragma_injector(config), - preprocess_c(config), - analyse(config, root_symbol='main'), - compile_c(config, common_flags=['-c', '-std=c99']), - - link_exe(config, linker='gcc'), + with BuildConfig(fab_workspace=tmp_path, tool_box=ToolBox(), + project_label='foo', multiprocessing=False) as config: + + grab_folder(config, PROJECT_SOURCE) + find_source_files(config) + c_pragma_injector(config) + preprocess_c(config) + analyse(config, root_symbol='main') + compile_c(config, common_flags=['-c', '-std=c99']) + link_exe(config) assert len(config.artefact_store[EXECUTABLES]) == 1 diff --git a/tests/system_tests/MinimalFortran/test_MinimalFortran.py b/tests/system_tests/MinimalFortran/test_MinimalFortran.py index 455755cd..4d0efaab 100644 --- a/tests/system_tests/MinimalFortran/test_MinimalFortran.py +++ b/tests/system_tests/MinimalFortran/test_MinimalFortran.py @@ -14,23 +14,25 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import ToolBox import pytest PROJECT_SOURCE = Path(__file__).parent / 'project-source' -def test_MinimalFortran(tmp_path): +def test_minimal_fortran(tmp_path): # build - with BuildConfig(fab_workspace=tmp_path, project_label='foo', multiprocessing=False) as config, \ - pytest.warns(UserWarning, match="removing managed flag"): - grab_folder(config, PROJECT_SOURCE), - find_source_files(config), - preprocess_fortran(config), - analyse(config, root_symbol='test'), - compile_fortran(config, common_flags=['-c']), - link_exe(config, linker='gcc', flags=['-lgfortran']), + with BuildConfig(fab_workspace=tmp_path, tool_box=ToolBox(), + project_label='foo', multiprocessing=False) as config: + grab_folder(config, PROJECT_SOURCE) + find_source_files(config) + preprocess_fortran(config) + analyse(config, root_symbol='test') + with pytest.warns(UserWarning, match="Removing managed flag"): + compile_fortran(config, common_flags=['-c']) + link_exe(config, flags=['-lgfortran']) assert len(config.artefact_store[EXECUTABLES]) == 1 diff --git a/tests/system_tests/git/test_git.py b/tests/system_tests/git/test_git.py index 32895dfe..d343c7e8 100644 --- a/tests/system_tests/git/test_git.py +++ b/tests/system_tests/git/test_git.py @@ -23,46 +23,51 @@ import pytest from fab.build_config import BuildConfig -from fab.steps.grab.git import current_commit, git_checkout, git_merge +from fab.steps.grab.git import git_checkout, git_merge +from fab.tools import Git, ToolBox @pytest.fixture def config(tmp_path): - return BuildConfig('proj', fab_workspace=tmp_path) + return BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) -class TestGitCheckout(object): +class TestGitCheckout: # Check we can fetch from github. @pytest.fixture def url(self): return 'https://github.com/metomi/fab-test-data.git' def test_checkout_url(self, tmp_path, url, config): + git = Git() with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): git_checkout(config, src=url, dst_label='tiny_fortran') # todo: The commit will keep changing. Perhaps make a non-changing branch - assert current_commit(config.source_root / 'tiny_fortran') == '3cba55e' + assert git.current_commit(config.source_root / 'tiny_fortran') == '3cba55e' def test_checkout_branch(self, tmp_path, url, config): + git = Git() with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): git_checkout(config, src=url, dst_label='tiny_fortran', revision='main') - assert current_commit(config.source_root / 'tiny_fortran') == '3cba55e' + assert git.current_commit(config.source_root / 'tiny_fortran') == '3cba55e' def test_checkout_tag(self, tmp_path, url, config): + git = Git() with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): git_checkout(config, src=url, dst_label='tiny_fortran', revision='early') - assert current_commit(config.source_root / 'tiny_fortran') == 'ee56489' + assert git.current_commit(config.source_root / 'tiny_fortran') == 'ee56489' def test_checkout_commit(self, tmp_path, url, config): + git = Git() with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): git_checkout(config, src=url, dst_label='tiny_fortran', revision='ee5648928893701c5dbccdbf0561c0038352a5ff') - assert current_commit(config.source_root / 'tiny_fortran') == 'ee56489' + assert git.current_commit(config.source_root / 'tiny_fortran') == 'ee56489' # todo: we could do with a test to ensure left-over files from previous fetches are cleaned away -class TestGitMerge(object): +class TestGitMerge: @pytest.fixture def repo_url(self, tmp_path): diff --git a/tests/system_tests/incremental_fortran/test_incremental_fortran.py b/tests/system_tests/incremental_fortran/test_incremental_fortran.py index 56fcc5aa..b7b9b4a1 100644 --- a/tests/system_tests/incremental_fortran/test_incremental_fortran.py +++ b/tests/system_tests/incremental_fortran/test_incremental_fortran.py @@ -15,6 +15,7 @@ from fab.steps.grab.folder import grab_folder from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import ToolBox from fab.util import file_walk, get_prebuild_file_groups PROJECT_LABEL = 'tiny_project' @@ -36,19 +37,23 @@ class TestIncremental(object): def config(self, tmp_path): # tmp_path is a pytest fixture which differs per test, per run logging.getLogger('fab').setLevel(logging.WARNING) - with BuildConfig(project_label=PROJECT_LABEL, fab_workspace=tmp_path, multiprocessing=False) as grab_config: + with BuildConfig(project_label=PROJECT_LABEL, + tool_box=ToolBox(), fab_workspace=tmp_path, + multiprocessing=False) as grab_config: grab_folder(grab_config, Path(__file__).parent / 'project-source', dst_label='src') - build_config = BuildConfig(project_label=PROJECT_LABEL, fab_workspace=tmp_path, multiprocessing=False) + build_config = BuildConfig(project_label=PROJECT_LABEL, + tool_box=ToolBox(), fab_workspace=tmp_path, + multiprocessing=False) return build_config def run_steps(self, build_config): - find_source_files(build_config), - preprocess_fortran(build_config), - analyse(build_config, root_symbol='my_prog'), - compile_fortran(build_config), - link_exe(build_config, linker='gcc', flags=['-lgfortran']), + find_source_files(build_config) + preprocess_fortran(build_config) + analyse(build_config, root_symbol='my_prog') + compile_fortran(build_config) + link_exe(build_config, flags=['-lgfortran']) # Add a permissive cleanup step because we want to know about every file which is created, # across multiple runs of the build. Otherwise, an aggressive cleanup will be automatically added. cleanup_prebuilds(build_config, older_than=timedelta(weeks=1)) @@ -238,7 +243,9 @@ class TestCleanupPrebuilds(object): @pytest.mark.parametrize("kwargs,expect", in_out) def test_clean(self, tmp_path, kwargs, expect): - with BuildConfig(project_label=PROJECT_LABEL, fab_workspace=tmp_path, multiprocessing=False) as config: + with BuildConfig(project_label=PROJECT_LABEL, + tool_box=ToolBox(), + fab_workspace=tmp_path, multiprocessing=False) as config: remaining = self._prune(config, kwargs=kwargs) assert sorted(remaining) == expect @@ -246,7 +253,9 @@ def test_clean(self, tmp_path, kwargs, expect): def test_prune_unused(self, tmp_path): # pruning everything not current - with BuildConfig(project_label=PROJECT_LABEL, fab_workspace=tmp_path, multiprocessing=False) as config: + with BuildConfig(project_label=PROJECT_LABEL, + tool_box=ToolBox(), fab_workspace=tmp_path, + multiprocessing=False) as config: config._artefact_store = {CURRENT_PREBUILDS: { tmp_path / PROJECT_LABEL / BUILD_OUTPUT / PREBUILD / 'a.123.foo', tmp_path / PROJECT_LABEL / BUILD_OUTPUT / PREBUILD / 'a.456.foo', diff --git a/tests/system_tests/prebuild/test_prebuild.py b/tests/system_tests/prebuild/test_prebuild.py index 92e7ef2c..492a4832 100644 --- a/tests/system_tests/prebuild/test_prebuild.py +++ b/tests/system_tests/prebuild/test_prebuild.py @@ -12,6 +12,7 @@ from fab.steps.grab.prebuild import grab_pre_build from fab.steps.link import link_exe from fab.steps.preprocess import preprocess_fortran +from fab.tools import ToolBox from fab.util import file_walk @@ -26,16 +27,18 @@ def build_config(self, fab_workspace, grab_prebuild_folder=None): logging.getLogger('fab').setLevel(logging.WARNING) with BuildConfig( - project_label='test_prebuild', fab_workspace=fab_workspace, multiprocessing=False) as config: - grab_folder(config, Path(__file__).parent / 'project-source', dst_label='src'), + project_label='test_prebuild', tool_box=ToolBox(), + fab_workspace=fab_workspace, multiprocessing=False) as config: + grab_folder(config, Path(__file__).parent / 'project-source', + dst_label='src') # insert a prebuild grab step or don't insert anything if grab_prebuild_folder: grab_pre_build(config, grab_prebuild_folder) - find_source_files(config), - preprocess_fortran(config), - analyse(config, root_symbol='my_prog'), - compile_fortran(config), - link_exe(config, linker='gcc', flags=['-lgfortran']), + find_source_files(config) + preprocess_fortran(config) + analyse(config, root_symbol='my_prog') + compile_fortran(config) + link_exe(config, flags=['-lgfortran']) return config diff --git a/tests/system_tests/psyclone/test_psyclone_system_test.py b/tests/system_tests/psyclone/test_psyclone_system_test.py index 3d1f6e21..325e5d2e 100644 --- a/tests/system_tests/psyclone/test_psyclone_system_test.py +++ b/tests/system_tests/psyclone/test_psyclone_system_test.py @@ -17,8 +17,10 @@ from fab.steps.find_source_files import find_source_files from fab.steps.grab.folder import grab_folder from fab.steps.preprocess import preprocess_fortran -from fab.steps.psyclone import _analyse_x90s, _analyse_kernels, make_parsable_x90, preprocess_x90, \ - psyclone, tool_available, run_psyclone +from fab.steps.psyclone import (_analyse_x90s, _analyse_kernels, + make_parsable_x90, preprocess_x90, + psyclone) +from fab.tools import ToolBox, Psyclone from fab.util import file_checksum SAMPLE_KERNEL = Path(__file__).parent / 'kernel.f90' @@ -47,7 +49,7 @@ def test_make_parsable_x90(tmp_path): parsable_x90_path = make_parsable_x90(input_x90_path) x90_analyser = X90Analyser() - with BuildConfig('proj', fab_workspace=tmp_path) as config: + with BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) as config: x90_analyser._config = config # todo: code smell x90_analyser.run(parsable_x90_path) @@ -61,7 +63,7 @@ def test_make_parsable_x90(tmp_path): unlink(parsable_x90_path) -class TestX90Analyser(object): +class TestX90Analyser: expected_analysis_result = AnalysedX90( fpath=EXPECT_PARSABLE_X90, @@ -71,7 +73,7 @@ class TestX90Analyser(object): def run(self, tmp_path): parsable_x90_path = self.expected_analysis_result.fpath x90_analyser = X90Analyser() - with BuildConfig('proj', fab_workspace=tmp_path) as config: + with BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) as config: x90_analyser._config = config analysed_x90, _ = x90_analyser.run(parsable_x90_path) # type: ignore # don't delete the prebuild @@ -93,10 +95,11 @@ def test_prebuild(self, tmp_path): assert analysed_x90 == self.expected_analysis_result -class Test_analysis_for_x90s_and_kernels(object): +class Test_analysis_for_x90s_and_kernels: def test_analyse(self, tmp_path): - with BuildConfig('proj', fab_workspace=tmp_path) as config: + with BuildConfig('proj', fab_workspace=tmp_path, + tool_box=ToolBox()) as config: analysed_x90 = _analyse_x90s(config, x90s=[SAMPLE_X90]) all_kernel_hashes = _analyse_kernels(config, kernel_roots=[Path(__file__).parent]) @@ -116,15 +119,16 @@ def test_analyse(self, tmp_path): } -@pytest.mark.skipif(not tool_available(), reason="psyclone cli tool not available") -class TestPsyclone(object): +@pytest.mark.skipif(not Psyclone().is_available, reason="psyclone cli tool not available") +class TestPsyclone: """ Basic run of the psyclone step. """ @pytest.fixture def config(self, tmp_path): - config = BuildConfig('proj', fab_workspace=tmp_path, multiprocessing=False) + config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path, + multiprocessing=False) return config def steps(self, config): @@ -176,43 +180,44 @@ def test_prebuild(self, tmp_path, config): self.steps(config) # make sure no work gets done the second time round - with mock.patch('fab.parse.x90.X90Analyser.walk_nodes') as mock_x90_walk: - with mock.patch('fab.parse.fortran.FortranAnalyser.walk_nodes') as mock_fortran_walk: - with mock.patch('fab.steps.psyclone.run_psyclone') as mock_run: - with config, pytest.warns(UserWarning, match="no transformation script specified"): - self.steps(config) + with mock.patch('fab.parse.x90.X90Analyser.walk_nodes') as mock_x90_walk, \ + mock.patch('fab.parse.fortran.FortranAnalyser.walk_nodes') as mock_fortran_walk, \ + mock.patch('fab.tools.psyclone.Psyclone.process') as mock_run, \ + config, pytest.warns(UserWarning, match="no transformation script specified"): + self.steps(config) mock_x90_walk.assert_not_called() mock_fortran_walk.assert_not_called() mock_run.assert_not_called() -class TestTransformationScript(object): +class TestTransformationScript: """ Check whether transformation script is called with x90 file once and whether transformation script is passed to psyclone after '-s'. """ def test_transformation_script(self): + psyclone_tool = Psyclone() mock_transformation_script = mock.Mock(return_value=__file__) - with mock.patch('fab.steps.psyclone.run_command') as mock_run_command: + with mock.patch('fab.tools.psyclone.Psyclone.run') as mock_run_command: mock_transformation_script.return_value = Path(__file__) - run_psyclone(generated=Path(__file__), - modified_alg=Path(__file__), - x90_file=Path(__file__), - kernel_roots=[], - transformation_script=mock_transformation_script, - cli_args=[], - config=None, # type: ignore[arg-type] - ) + psyclone_tool.process(api="dynamo0.3", + x90_file=Path(__file__), + psy_file=Path(__file__), + alg_file=Path(__file__), + kernel_roots=[], + transformation_script=mock_transformation_script, + additional_parameters=[], + config=None, # type: ignore[arg-type] + ) # check whether x90 is passed to transformation_script mock_transformation_script.assert_called_once_with(Path(__file__), None) # check transformation_script is passed to psyclone command with '-s' - mock_run_command.assert_called_with(['psyclone', '-api', 'dynamo0.3', - '-l', 'all', - '-opsy', Path(__file__), - '-oalg', Path(__file__), - '-s', Path(__file__), - Path(__file__), - ]) + mock_run_command.assert_called_with( + additional_parameters=['-api', 'dynamo0.3', '-l', 'all', + '-opsy', Path(__file__), + '-oalg', Path(__file__), + '-s', Path(__file__), + __file__]) diff --git a/tests/system_tests/svn_fcm/test_svn_fcm_system_test.py b/tests/system_tests/svn_fcm/test_svn_fcm_system_test.py index da2de348..3e52e711 100644 --- a/tests/system_tests/svn_fcm/test_svn_fcm_system_test.py +++ b/tests/system_tests/svn_fcm/test_svn_fcm_system_test.py @@ -16,8 +16,10 @@ import pytest import fab +from fab.build_config import BuildConfig +from fab.tools import Fcm, Subversion, ToolBox from fab.steps.grab.fcm import fcm_checkout, fcm_export, fcm_merge -from fab.steps.grab.svn import svn_checkout, svn_export, svn_merge, tool_available +from fab.steps.grab.svn import svn_checkout, svn_export, svn_merge # Fcm isn't available in the github test images...unless we install it from github. @@ -26,12 +28,14 @@ checkout_funcs = [] merge_funcs: List[Callable] = [] -if tool_available('svn'): +svn = Subversion() +if svn.is_available: export_funcs.append(svn_export) checkout_funcs.append(svn_checkout) merge_funcs.append(svn_merge) -if tool_available('fcm'): +fcm = Fcm() +if fcm.is_available: export_funcs.append(fcm_export) checkout_funcs.append(fcm_checkout) merge_funcs.append(fcm_merge) @@ -40,45 +44,51 @@ warnings.warn('Neither svn not fcm are available for testing') -@pytest.fixture -def config(tmp_path): - return mock.Mock(source_root=tmp_path / 'fab_proj/source') +@pytest.fixture(name="config") +def config_fixture(tmp_path: Path) -> BuildConfig: + ''':Returns: a mock BuildConfig object.''' + return mock.Mock(source_root=tmp_path / 'fab_proj/source', + tool_box=ToolBox()) -@pytest.fixture -def repo_url(tmp_path): +@pytest.fixture(name="repo_url") +def repo_url_fixture(tmp_path: str) -> str: + '''Unpacks a gzip'ed repository into tmp_path and returns + its location.''' shutil.unpack_archive( Path(__file__).parent / 'repo.tar.gz', tmp_path) return f'file://{tmp_path}/repo' -@pytest.fixture -def trunk(repo_url): - # URL of the main branch. +@pytest.fixture(name="trunk") +def trunk_fixture(repo_url: str) -> str: + ''':returns:URL of the main branch. ''' return f'{repo_url}/proj/main/trunk' -@pytest.fixture -def file1_experiment_a(repo_url): - # A branch which modifies file 1. +@pytest.fixture(name="file1_experiment_a") +def file1_experiment_a_fixture(repo_url: str) -> str: + ''':returns: a branch which modifies file 1.''' return f'{repo_url}/proj/main/branches/dev/person_a/file1_experiment_a' -@pytest.fixture -def file1_experiment_b(repo_url): - # Another branch which modifies file 1. It should conflict with experiment a. +@pytest.fixture(name="file1_experiment_b") +def file1_experiment_b_fixture(repo_url: str) -> str: + '''Another branch which modifies file 1. It should conflict + with experiment a.''' return f'{repo_url}/proj/main/branches/dev/person_a/file1_experiment_b' -@pytest.fixture -def file2_experiment(repo_url): - # A branch which modifies file 2. - # It has two revisions, with different versions of the modification in r7 and r8. +@pytest.fixture(name="file2_experiment") +def file2_experiment_fixture(repo_url: str) -> str: + '''A branch which modifies file 2. It has two revisions, with different + versions of the modification in r7 and r8.''' return f'{repo_url}/proj/main/branches/dev/person_b/file2_experiment' -def confirm_trunk(config) -> bool: +def confirm_trunk(config: BuildConfig) -> bool: + ''':returns: whether the source directory is at trunk or not.''' file1_txt = (config.source_root / 'proj/file1.txt').read_text() file2_txt = (config.source_root / 'proj/file2.txt').read_text() if not file1_txt.startswith("This is sentence one in file one."): @@ -89,40 +99,46 @@ def confirm_trunk(config) -> bool: def confirm_file1_experiment_a(config) -> bool: - # Have we got the revision 7 text in file 2? + ''':returns: wheter we got the revision 7 text in file 2 or not.''' file1_txt = (config.source_root / 'proj/file2.txt').read_text() return file1_txt.startswith("This is sentence one, with Experiment A modification.") def confirm_file2_experiment_r7(config) -> bool: - # Have we got the revision 7 text in file 2? + ''':returns: Whether we got the revision 7 text in file 2.''' file2_txt = (config.source_root / 'proj/file2.txt').read_text() return file2_txt.strip().endswith("This is sentence two, with experimental modification.") def confirm_file2_experiment_r8(config) -> bool: - # Have we got the revision 7 text in file 2? + ''':returns:: whether we got the revision 7 text in file 2 or not.''' file2_txt = (config.source_root / 'proj/file2.txt').read_text() - return file2_txt.strip().endswith("This is sentence two, with further experimental modification.") + return file2_txt.strip().endswith("This is sentence two, with " + "further experimental modification.") -class TestExport(object): - - # Run the test twice, once with SvnExport and once with FcmExport - depending on which tools are available. +class TestExport(): + '''Test export related functionality. + ''' + # Run the test twice, once with SvnExport and once with FcmExport - + # depending on which tools are available. @pytest.mark.parametrize('export_func', export_funcs) @pytest.mark.filterwarnings("ignore: Python 3.14 will, " "by default, filter extracted tar archives " "and reject files or modify their metadata. " "Use the filter argument to control this behavior.") def test_export(self, file2_experiment, config, export_func): - # Export the "file 2 experiment" branch, which has different sentence from trunk in r1 and r2 + '''Export the "file 2 experiment" branch, which has different sentence + from trunk in r1 and r2.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): export_func(config, src=file2_experiment, dst_label='proj', revision=7) assert confirm_file2_experiment_r7(config) # Make sure we can export twice into the same folder. - # Todo: should the export step wipe the destination first? To remove residual, orphaned files? - with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): + # Todo: should the export step wipe the destination first? + # To remove residual, orphaned files? + with pytest.warns(UserWarning, match="_metric_send_conn not set, " + "cannot send metrics"): export_func(config, src=file2_experiment, dst_label='proj', revision=8) assert confirm_file2_experiment_r8(config) @@ -131,22 +147,26 @@ def test_export(self, file2_experiment, config, export_func): "by default, filter extracted tar archives " "and reject files or modify their metadata. " "Use the filter argument to control this behavior.") -class TestCheckout(object): +class TestCheckout(): + '''Checkout related tests.''' @pytest.mark.parametrize('checkout_func', checkout_funcs) def test_new_folder(self, trunk, config, checkout_func): + '''Tests that a new folder is created if required.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): checkout_func(config, src=trunk, dst_label='proj') assert confirm_trunk(config) @pytest.mark.parametrize('checkout_func', checkout_funcs) def test_working_copy(self, file2_experiment, config, checkout_func): - # Make sure we can checkout into a working copy. - # The scenario we're testing here is checking out across multiple builds. - # This will usually be the same revision. The first run in a new folder will be a checkout, - # and subsequent runs will use update, which can handle a version bump. - # Since we can change the revision and expect it to work, let's test that while we're here. - + '''Make sure we can checkout into a working copy. The scenario + we're testing here is checking out across multiple builds. This will + usually be the same revision. The first run in a new folder will be a + checkout, and subsequent runs will use update, which can handle a + version bump. Since we can change the revision and expect it to work, + let's test that while we're here.''' + + # pylint: disable=comparison-with-callable if checkout_func == svn_checkout: expect_tool = 'svn' elif checkout_func == fcm_checkout: @@ -154,29 +174,32 @@ def test_working_copy(self, file2_experiment, config, checkout_func): else: assert False - with mock.patch('fab.steps.grab.svn.run_command', wraps=fab.steps.grab.svn.run_command) as wrap, \ + with mock.patch('fab.tools.tool.subprocess.run', + wraps=fab.tools.tool.subprocess.run) as wrap, \ pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): checkout_func(config, src=file2_experiment, dst_label='proj', revision='7') assert confirm_file2_experiment_r7(config) wrap.assert_called_with([ expect_tool, 'checkout', '--revision', '7', - file2_experiment, str(config.source_root / 'proj')]) + file2_experiment, str(config.source_root / 'proj')], + capture_output=True, env=None, cwd=None, check=False) checkout_func(config, src=file2_experiment, dst_label='proj', revision='8') assert confirm_file2_experiment_r8(config) wrap.assert_called_with( [expect_tool, 'update', '--revision', '8'], - cwd=config.source_root / 'proj') + capture_output=True, env=None, + cwd=config.source_root / 'proj', check=False) @pytest.mark.parametrize('export_func,checkout_func', zip(export_funcs, checkout_funcs)) def test_not_working_copy(self, trunk, config, export_func, checkout_func): - # the export command just makes files, not a working copy + '''Test that the export command just makes files, not a working copy. ''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): export_func(config, src=trunk, dst_label='proj') # if we try to checkout into that folder, it should fail - with pytest.raises(ValueError): + with pytest.raises(RuntimeError): checkout_func(config, src=trunk, dst_label='proj') @@ -184,10 +207,12 @@ def test_not_working_copy(self, trunk, config, export_func, checkout_func): "by default, filter extracted tar archives " "and reject files or modify their metadata. " "Use the filter argument to control this behavior.") -class TestMerge(object): +class TestMerge(): + '''Various merge related tests.''' @pytest.mark.parametrize('checkout_func,merge_func', zip(checkout_funcs, merge_funcs)) def test_vanilla(self, trunk, file2_experiment, config, checkout_func, merge_func): + '''Test generic merging.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): # something to merge into; checkout trunk checkout_func(config, src=trunk, dst_label='proj') @@ -199,6 +224,7 @@ def test_vanilla(self, trunk, file2_experiment, config, checkout_func, merge_fun @pytest.mark.parametrize('checkout_func,merge_func', zip(checkout_funcs, merge_funcs)) def test_revision(self, trunk, file2_experiment, config, checkout_func, merge_func): + '''Test merging a specific revision.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): # something to merge into; checkout trunk checkout_func(config, src=trunk, dst_label='proj') @@ -210,15 +236,18 @@ def test_revision(self, trunk, file2_experiment, config, checkout_func, merge_fu @pytest.mark.parametrize('export_func,merge_func', zip(export_funcs, merge_funcs)) def test_not_working_copy(self, trunk, file2_experiment, config, export_func, merge_func): + '''Test error handling when merging into an exported file.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): export_func(config, src=trunk, dst_label='proj') # try to merge into an export - with pytest.raises(ValueError): + with pytest.raises(RuntimeError): merge_func(config, src=file2_experiment, dst_label='proj', revision=7) @pytest.mark.parametrize('checkout_func,merge_func', zip(checkout_funcs, merge_funcs)) - def test_conflict(self, file1_experiment_a, file1_experiment_b, config, checkout_func, merge_func): + def test_conflict(self, file1_experiment_a, file1_experiment_b, config, + checkout_func, merge_func): + '''Test conflict andling with a checkout.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): checkout_func(config, src=file1_experiment_a, dst_label='proj') confirm_file1_experiment_a(config) @@ -227,8 +256,11 @@ def test_conflict(self, file1_experiment_a, file1_experiment_b, config, checkout with pytest.raises(RuntimeError): merge_func(config, src=file1_experiment_b, dst_label='proj') - @pytest.mark.parametrize('checkout_func,merge_func', zip(checkout_funcs, merge_funcs)) - def test_multiple_merges(self, trunk, file1_experiment_a, file2_experiment, config, checkout_func, merge_func): + @pytest.mark.parametrize('checkout_func,merge_func', + zip(checkout_funcs, merge_funcs)) + def test_multiple_merges(self, trunk, file1_experiment_a, file2_experiment, + config, checkout_func, merge_func): + '''Check that multiple versions can be merged.''' with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): checkout_func(config, src=trunk, dst_label='proj') confirm_trunk(config) diff --git a/tests/system_tests/zero_config/test_zero_config.py b/tests/system_tests/zero_config/test_zero_config.py index 5ae56b3d..ad03d409 100644 --- a/tests/system_tests/zero_config/test_zero_config.py +++ b/tests/system_tests/zero_config/test_zero_config.py @@ -1,14 +1,12 @@ from pathlib import Path -from fab.cli import cli_fab -import shutil -import os -from unittest import mock - import pytest +from fab.cli import cli_fab +from fab.tools import ToolRepository + -class TestZeroConfig(object): +class TestZeroConfig: def test_fortran_dependencies(self, tmp_path): # test the sample project in the fortran dependencies system test @@ -37,11 +35,17 @@ def test_fortran_explicit_gfortran(self, tmp_path): # test the sample project in the fortran dependencies system test kwargs = {'project_label': 'fortran explicit gfortran', 'fab_workspace': tmp_path, 'multiprocessing': False} - cc = shutil.which('gcc') - fc = shutil.which('gfortran') + tr = ToolRepository() + tr.set_default_compiler_suite("gnu") + + # TODO: If the intel compiler should be used here, the linker will + # need an additional flag (otherwise duplicated `main` symbols will + # occur). The following code can be used e.g. in cli.py: + # + # if config.tool_box.get_tool(Category.LINKER).name == "linker-ifort": + # flags = ["-nofor-main"] - with mock.patch.dict(os.environ, CC=cc, FC=fc, LD=fc), \ - pytest.warns(DeprecationWarning, match="RootIncFiles is deprecated as .inc files are due to be removed."): + with pytest.warns(DeprecationWarning, match="RootIncFiles is deprecated as .inc files are due to be removed."): config = cli_fab( folder=Path(__file__).parent.parent / 'CFortranInterop', kwargs=kwargs) diff --git a/tests/unit_tests/parse/c/test_c_analyser.py b/tests/unit_tests/parse/c/test_c_analyser.py index 0446f4b6..934c8641 100644 --- a/tests/unit_tests/parse/c/test_c_analyser.py +++ b/tests/unit_tests/parse/c/test_c_analyser.py @@ -11,11 +11,12 @@ from fab.build_config import BuildConfig from fab.parse.c import CAnalyser, AnalysedC +from fab.tools import ToolBox def test_simple_result(tmp_path): c_analyser = CAnalyser() - c_analyser._config = BuildConfig('proj', fab_workspace=tmp_path) + c_analyser._config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) with mock.patch('fab.parse.AnalysedFile.save'): fpath = Path(__file__).parent / "test_c_analyser.c" @@ -31,9 +32,9 @@ def test_simple_result(tmp_path): assert artefact == c_analyser._config.prebuild_folder / f'test_c_analyser.{analysis.file_hash}.an' -class Test__locate_include_regions(object): +class Test__locate_include_regions: - def test_vanilla(self): + def test_vanilla(self) -> None: lines: List[Tuple[int, str]] = [ (5, "foo"), (10, "# pragma FAB SysIncludeStart"), @@ -56,7 +57,7 @@ def test_empty_file(self): self._run(lines=[], expect=[]) def _run(self, lines, expect): - class MockToken(object): + class MockToken: def __init__(self, spelling, line): self.spelling = spelling self.location = Mock(line=line) @@ -74,7 +75,7 @@ def __init__(self, spelling, line): assert analyser._include_region == expect -class Test__check_for_include(object): +class Test__check_for_include: def test_vanilla(self): analyser = CAnalyser() @@ -92,7 +93,7 @@ def test_vanilla(self): assert analyser._check_for_include(45) is None -class Test_process_symbol_declaration(object): +class Test_process_symbol_declaration: # definitions def test_external_definition(self): @@ -140,7 +141,7 @@ def _declaration(self, spelling, include_type): return usr_symbols -class Test_process_symbol_dependency(object): +class Test_process_symbol_dependency: def test_usr_symbol(self): analysed_file = self._dependency(spelling="foo", usr_symbols=["foo"]) diff --git a/tests/unit_tests/parse/fortran/test_fortran_analyser.py b/tests/unit_tests/parse/fortran/test_fortran_analyser.py index bf94aca9..6c334d5f 100644 --- a/tests/unit_tests/parse/fortran/test_fortran_analyser.py +++ b/tests/unit_tests/parse/fortran/test_fortran_analyser.py @@ -16,6 +16,7 @@ from fab.parse import EmptySourceFile from fab.parse.fortran import FortranAnalyser, AnalysedFortran from fab.parse.fortran_common import iter_content +from fab.tools import ToolBox # todo: test function binding @@ -45,7 +46,8 @@ class Test_Analyser(object): @pytest.fixture def fortran_analyser(self, tmp_path): fortran_analyser = FortranAnalyser() - fortran_analyser._config = BuildConfig('proj', fab_workspace=tmp_path) + fortran_analyser._config = BuildConfig('proj', ToolBox(), + fab_workspace=tmp_path) return fortran_analyser def test_empty_file(self, fortran_analyser): diff --git a/tests/unit_tests/steps/test_analyse.py b/tests/unit_tests/steps/test_analyse.py index 0e1db71b..79d0ef50 100644 --- a/tests/unit_tests/steps/test_analyse.py +++ b/tests/unit_tests/steps/test_analyse.py @@ -8,6 +8,7 @@ from fab.parse.fortran import AnalysedFortran, FortranParserWorkaround from fab.steps.analyse import _add_manual_results, _add_unreferenced_deps, _gen_file_deps, _gen_symbol_table, \ _parse_files +from fab.tools import ToolBox from fab.util import HashedFile @@ -119,7 +120,7 @@ def test_exceptions(self, tmp_path): pytest.warns(UserWarning, match="deprecated 'DEPENDS ON:'"): # The warning "deprecated 'DEPENDS ON:' comment found in fortran code" # is in "def _parse_files" in "source/steps/analyse.py" - config = BuildConfig('proj', fab_workspace=tmp_path) + config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) # the exception should be suppressed (and logged) and this step should run to completion _parse_files(config, files=[], fortran_analyser=mock.Mock(), c_analyser=mock.Mock()) diff --git a/tests/unit_tests/steps/test_archive_objects.py b/tests/unit_tests/steps/test_archive_objects.py index 583e4975..805459e3 100644 --- a/tests/unit_tests/steps/test_archive_objects.py +++ b/tests/unit_tests/steps/test_archive_objects.py @@ -1,29 +1,48 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## +""" +Test for the archive step. +""" + from unittest import mock from unittest.mock import call from fab.build_config import BuildConfig from fab.constants import OBJECT_FILES, OBJECT_ARCHIVES from fab.steps.archive_objects import archive_objects +from fab.tools import Category, ToolBox import pytest -class Test_archive_objects(object): +class TestArchiveObjects: + '''Test the achive step. + ''' def test_for_exes(self): - # as used when archiving before linking exes + '''As used when archiving before linking exes. + ''' targets = ['prog1', 'prog2'] - config = BuildConfig('proj') - config._artefact_store = {OBJECT_FILES: {target: [f'{target}.o', 'util.o'] for target in targets}} + config = BuildConfig('proj', ToolBox()) + config._artefact_store = {OBJECT_FILES: {target: [f'{target}.o', 'util.o'] + for target in targets}} - with mock.patch('fab.steps.archive_objects.run_command') as mock_run_command, \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): + mock_result = mock.Mock(returncode=0, return_value=123) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as mock_run_command, \ + pytest.warns(UserWarning, match="_metric_send_conn not set, " + "cannot send metrics"): archive_objects(config=config) # ensure the correct command line calls were made expected_calls = [ - call(['ar', 'cr', str(config.build_output / f'{target}.a'), f'{target}.o', 'util.o']) + call(['ar', 'cr', str(config.build_output / f'{target}.a'), + f'{target}.o', 'util.o'], + capture_output=True, env=None, cwd=None, check=False) for target in targets ] mock_run_command.assert_has_calls(expected_calls) @@ -33,20 +52,42 @@ def test_for_exes(self): target: [str(config.build_output / f'{target}.a')] for target in targets} def test_for_library(self): - # as used when building an object archive or archiving before linking a shared library - pass + '''As used when building an object archive or archiving before linking + a shared library. + ''' - config = BuildConfig('proj') + config = BuildConfig('proj', ToolBox()) config._artefact_store = {OBJECT_FILES: {None: ['util1.o', 'util2.o']}} - with mock.patch('fab.steps.archive_objects.run_command') as mock_run_command, \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): + mock_result = mock.Mock(returncode=0, return_value=123) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as mock_run_command, \ + pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): archive_objects(config=config, output_fpath=config.build_output / 'mylib.a') # ensure the correct command line calls were made mock_run_command.assert_called_once_with([ - 'ar', 'cr', str(config.build_output / 'mylib.a'), 'util1.o', 'util2.o']) + 'ar', 'cr', str(config.build_output / 'mylib.a'), 'util1.o', 'util2.o'], + capture_output=True, env=None, cwd=None, check=False) # ensure the correct artefacts were created assert config.artefact_store[OBJECT_ARCHIVES] == { None: [str(config.build_output / 'mylib.a')]} + + def test_incorrect_tool(self): + '''Test that an incorrect archive tool is detected + ''' + + config = BuildConfig('proj', ToolBox()) + tool_box = config.tool_box + cc = tool_box[Category.C_COMPILER] + # And set its category to C_COMPILER + cc._category = Category.AR + # So overwrite the C compiler with the re-categories Fortran compiler + tool_box.add_tool(cc) + + with pytest.raises(RuntimeError) as err: + archive_objects(config=config, + output_fpath=config.build_output / 'mylib.a') + assert ("Unexpected tool 'gcc' of type '' instead of Ar" in str(err.value)) diff --git a/tests/unit_tests/steps/test_compile_c.py b/tests/unit_tests/steps/test_compile_c.py index 9a58d990..93419b41 100644 --- a/tests/unit_tests/steps/test_compile_c.py +++ b/tests/unit_tests/steps/test_compile_c.py @@ -1,53 +1,92 @@ +# ############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +# ############################################################################## + +'''Tests the compile_c.py step. +''' + import os from pathlib import Path from unittest import mock -from unittest.mock import DEFAULT import pytest from fab.build_config import AddFlags, BuildConfig from fab.constants import BUILD_TREES, OBJECT_FILES from fab.parse.c import AnalysedC -from fab.steps.compile_c import _get_obj_combo_hash, compile_c +from fab.steps.compile_c import _get_obj_combo_hash, _compile_file, compile_c +from fab.tools import Category, Flags -@pytest.fixture -def content(tmp_path): - config = BuildConfig('proj', multiprocessing=False, fab_workspace=tmp_path) +# This avoids pylint warnings about Redefining names from outer scope +@pytest.fixture(name="content") +def fixture_content(tmp_path, tool_box): + '''Provides a test environment consisting of a config instance, + analysed file and expected hash.''' + + config = BuildConfig('proj', tool_box, multiprocessing=False, + fab_workspace=tmp_path) analysed_file = AnalysedC(fpath=Path(f'{config.source_root}/foo.c'), file_hash=0) - config.artefact_store[BUILD_TREES] = {None: {analysed_file.fpath: analysed_file}} - expect_hash = 9120682468 + config._artefact_store[BUILD_TREES] = {None: {analysed_file.fpath: analysed_file}} + expect_hash = 7435424994 return config, analysed_file, expect_hash +def test_compile_c_wrong_compiler(content): + '''Test if a non-C compiler is specified as c compiler. + ''' + config = content[0] + tb = config.tool_box + # Take the Fortran compiler + fc = tb[Category.FORTRAN_COMPILER] + # And set its category to C_COMPILER + fc._category = Category.C_COMPILER + # So overwrite the C compiler with the re-categorised Fortran compiler + tb.add_tool(fc, silent_replace=True) + + # Now check that _compile_file detects the incorrect class of the + # C compiler + mp_common_args = mock.Mock(config=config) + with pytest.raises(RuntimeError) as err: + _compile_file((None, mp_common_args)) + assert ("Unexpected tool 'mock_fortran_compiler' of type '' instead of CCompiler" + in str(err.value)) + + # This is more of an integration test than a unit test -class Test_CompileC(object): +class TestCompileC: + '''Test various functionalities of the C compilation step.''' def test_vanilla(self, content): - # ensure the command is formed correctly - config, analysed_file, expect_hash = content - + '''Ensure the command is formed correctly.''' + config, _, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] + print("XX", compiler, type(compiler), compiler.category) # run the step - with mock.patch.multiple( - 'fab.steps.compile_c', - run_command=DEFAULT, - send_metric=DEFAULT, - get_compiler_version=mock.Mock(return_value='1.2.3')) as values: + with mock.patch("fab.steps.compile_c.send_metric") as send_metric: with mock.patch('pathlib.Path.mkdir'): - with mock.patch.dict(os.environ, {'CC': 'foo_cc', 'CFLAGS': '-Denv_flag'}), \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): - compile_c( - config=config, path_flags=[AddFlags(match='$source/*', flags=['-I', 'foo/include', '-Dhello'])]) + with mock.patch.dict(os.environ, {'CFLAGS': '-Denv_flag'}), \ + pytest.warns(UserWarning, match="_metric_send_conn not set, " + "cannot send metrics"): + compile_c(config=config, + path_flags=[AddFlags(match='$source/*', + flags=['-I', 'foo/include', '-Dhello'])]) # ensure it made the correct command-line call from the child process - values['run_command'].assert_called_with([ - 'foo_cc', '-c', '-Denv_flag', '-I', 'foo/include', '-Dhello', - f'{config.source_root}/foo.c', '-o', str(config.prebuild_folder / f'foo.{expect_hash:x}.o'), - ]) + compiler.run.assert_called_with( + cwd=Path(config.source_root), + additional_parameters=['-c', '-Denv_flag', '-I', 'foo/include', + '-Dhello', 'foo.c', + '-o', str(config.prebuild_folder / + f'foo.{expect_hash:x}.o')], + ) # ensure it sent a metric from the child process - values['send_metric'].assert_called_once() + send_metric.assert_called_once() # ensure it created the correct artefact collection assert config.artefact_store[OBJECT_FILES] == { @@ -55,11 +94,12 @@ def test_vanilla(self, content): } def test_exception_handling(self, content): + '''Test exception handling if the compiler fails.''' config, _, _ = content - - # mock the run command to raise + compiler = config.tool_box[Category.C_COMPILER] + # mock the run command to raise an exception with pytest.raises(RuntimeError): - with mock.patch('fab.steps.compile_c.run_command', side_effect=Exception): + with mock.patch.object(compiler, "run", side_effect=Exception): with mock.patch('fab.steps.compile_c.send_metric') as mock_send_metric: with mock.patch('pathlib.Path.mkdir'): compile_c(config=config) @@ -68,35 +108,53 @@ def test_exception_handling(self, content): mock_send_metric.assert_not_called() -class Test_get_obj_combo_hash(object): +class TestGetObjComboHash: + '''Tests the object combo hash functionality.''' @pytest.fixture def flags(self): - return ['-c', '-Denv_flag', '-I', 'foo/include', '-Dhello'] + '''Returns the flag for these tests.''' + return Flags(['-Denv_flag', '-I', 'foo/include', '-Dhello']) def test_vanilla(self, content, flags): - _, analysed_file, expect_hash = content - result = _get_obj_combo_hash('foo_cc', '1.2.3', analysed_file, flags) + '''Test that we get the expected hashes in this test setup.''' + config, analysed_file, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] + result = _get_obj_combo_hash(compiler, analysed_file, flags) assert result == expect_hash def test_change_file(self, content, flags): - _, analysed_file, expect_hash = content + '''Check that a change in the file (simulated by changing + the hash) changes the obj combo hash.''' + config, analysed_file, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] analysed_file._file_hash += 1 - result = _get_obj_combo_hash('foo_cc', '1.2.3', analysed_file, flags) + result = _get_obj_combo_hash(compiler, analysed_file, flags) assert result == expect_hash + 1 def test_change_flags(self, content, flags): - _, analysed_file, expect_hash = content - flags = ['-Dfoo'] + flags - result = _get_obj_combo_hash('foo_cc', '1.2.3', analysed_file, flags) + '''Test that changing the flags changes the hash.''' + config, analysed_file, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] + flags = Flags(['-Dfoo'] + flags) + result = _get_obj_combo_hash(compiler, analysed_file, flags) assert result != expect_hash def test_change_compiler(self, content, flags): - _, analysed_file, expect_hash = content - result = _get_obj_combo_hash('ooh_cc', '1.2.3', analysed_file, flags) + '''Test that a change in the name of the compiler changes + the hash.''' + config, analysed_file, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] + # Change the name of the compiler + compiler._name = compiler.name + "XX" + result = _get_obj_combo_hash(compiler, analysed_file, flags) assert result != expect_hash def test_change_compiler_version(self, content, flags): - _, analysed_file, expect_hash = content - result = _get_obj_combo_hash('foo_cc', '1.2.4', analysed_file, flags) + '''Test that a change in the version number of the compiler + changes the hash.''' + config, analysed_file, expect_hash = content + compiler = config.tool_box[Category.C_COMPILER] + compiler._version = "9.8.7" + result = _get_obj_combo_hash(compiler, analysed_file, flags) assert result != expect_hash diff --git a/tests/unit_tests/steps/test_compile_fortran.py b/tests/unit_tests/steps/test_compile_fortran.py index 7f42662a..5fc6c629 100644 --- a/tests/unit_tests/steps/test_compile_fortran.py +++ b/tests/unit_tests/steps/test_compile_fortran.py @@ -1,4 +1,3 @@ -import os from pathlib import Path from typing import Dict from unittest import mock @@ -6,37 +5,66 @@ import pytest -from fab.build_config import BuildConfig +from fab.build_config import BuildConfig, FlagsConfig from fab.constants import BUILD_TREES, OBJECT_FILES from fab.parse.fortran import AnalysedFortran -from fab.steps.compile_fortran import compile_pass, get_compile_next, get_fortran_compiler, \ - get_mod_hashes, handle_compiler_args, MpCommonArgs, process_file, store_artefacts -from fab.steps.preprocess import get_fortran_preprocessor +from fab.steps.compile_fortran import ( + compile_pass, get_compile_next, + get_mod_hashes, handle_compiler_args, MpCommonArgs, process_file, + store_artefacts) +from fab.tools import Category, ToolBox from fab.util import CompiledFile -@pytest.fixture -def analysed_files(): +# This avoids pylint warnings about Redefining names from outer scope +@pytest.fixture(name="analysed_files") +def fixture_analysed_files(): a = AnalysedFortran(fpath=Path('a.f90'), file_deps={Path('b.f90')}, file_hash=0) b = AnalysedFortran(fpath=Path('b.f90'), file_deps={Path('c.f90')}, file_hash=0) c = AnalysedFortran(fpath=Path('c.f90'), file_hash=0) return a, b, c -@pytest.fixture -def artefact_store(analysed_files): +@pytest.fixture(name="artefact_store") +def fixture_artefact_store(analysed_files): build_tree = {af.fpath: af for af in analysed_files} artefact_store = {BUILD_TREES: {None: build_tree}} return artefact_store -class Test_compile_pass(object): - - def test_vanilla(self, analysed_files): +def test_compile_cc_wrong_compiler(tool_box): + '''Test if a non-C compiler is specified as c compiler. + ''' + config = BuildConfig('proj', tool_box) + # Take the Fortran compiler + cc = tool_box[Category.C_COMPILER] + # And set its category to C_COMPILER + cc._category = Category.FORTRAN_COMPILER + # So overwrite the C compiler with the re-categories Fortran compiler + tool_box.add_tool(cc, silent_replace=True) + + # Now check that _compile_file detects the incorrect class of the + # C compiler + mp_common_args = mock.Mock(config=config) + with pytest.raises(RuntimeError) as err: + process_file((None, mp_common_args)) + assert ("Unexpected tool 'mock_c_compiler' of type '' instead of FortranCompiler" + in str(err.value)) + with pytest.raises(RuntimeError) as err: + handle_compiler_args(config) + assert ("Unexpected tool 'mock_c_compiler' of type '' instead of FortranCompiler" + in str(err.value)) + + +class TestCompilePass: + + def test_vanilla(self, analysed_files, tool_box: ToolBox): # make sure it compiles b only a, b, c = analysed_files uncompiled = {a, b} - compiled = {c.fpath: mock.Mock(input_fpath=c.fpath)} + compiled: Dict[Path, CompiledFile] = {c.fpath: mock.Mock(input_fpath=c.fpath)} run_mp_results = [ ( @@ -48,18 +76,19 @@ def test_vanilla(self, analysed_files): # this gets filled in mod_hashes: Dict[str, int] = {} - config = BuildConfig('proj') + config = BuildConfig('proj', tool_box) + mp_common_args = MpCommonArgs(config, FlagsConfig(), {}, True) with mock.patch('fab.steps.compile_fortran.run_mp', return_value=run_mp_results): with mock.patch('fab.steps.compile_fortran.get_mod_hashes'): uncompiled_result = compile_pass(config=config, compiled=compiled, uncompiled=uncompiled, - mod_hashes=mod_hashes, mp_common_args=None) + mod_hashes=mod_hashes, mp_common_args=mp_common_args) assert Path('a.f90') not in compiled assert Path('b.f90') in compiled assert list(uncompiled_result)[0].fpath == Path('a.f90') -class Test_get_compile_next(object): +class TestGetCompileNext: def test_vanilla(self, analysed_files): a, b, c = analysed_files @@ -68,11 +97,11 @@ def test_vanilla(self, analysed_files): compile_next = get_compile_next(compiled, uncompiled) - assert compile_next == {b, } + assert compile_next == {b} def test_unable_to_compile_anything(self, analysed_files): # like vanilla, except c hasn't been compiled - a, b, c = analysed_files + a, b, _ = analysed_files to_compile = {a, b} already_compiled_files = {} @@ -80,7 +109,7 @@ def test_unable_to_compile_anything(self, analysed_files): get_compile_next(already_compiled_files, to_compile) -class Test_store_artefacts(object): +class TestStoreArtefacts: def test_vanilla(self): @@ -117,34 +146,33 @@ def test_vanilla(self): } -class Test_process_file(object): +# This avoids pylint warnings about Redefining names from outer scope +@pytest.fixture(name="content") +def fixture_content(tool_box): + flags = ['flag1', 'flag2'] + flags_config = mock.Mock() + flags_config.flags_for_path.return_value = flags - def content(self, flags=None): + analysed_file = AnalysedFortran(fpath=Path('foofile'), file_hash=34567) + analysed_file.add_module_dep('mod_dep_1') + analysed_file.add_module_dep('mod_dep_2') + analysed_file.add_module_def('mod_def_1') + analysed_file.add_module_def('mod_def_2') - flags = flags or ['flag1', 'flag2'] - flags_config = mock.Mock() - flags_config.flags_for_path.return_value = flags + obj_combo_hash = '17ef947fd' + mods_combo_hash = '10867b4f3' + mp_common_args = MpCommonArgs( + config=BuildConfig('proj', tool_box, fab_workspace=Path('/fab')), + flags=flags_config, + mod_hashes={'mod_dep_1': 12345, 'mod_dep_2': 23456}, + syntax_only=False, + ) + + return (mp_common_args, flags, analysed_file, obj_combo_hash, + mods_combo_hash) - analysed_file = AnalysedFortran(fpath=Path('foofile'), file_hash=34567) - analysed_file.add_module_dep('mod_dep_1') - analysed_file.add_module_dep('mod_dep_2') - analysed_file.add_module_def('mod_def_1') - analysed_file.add_module_def('mod_def_2') - - obj_combo_hash = '1eb0c2d19' - mods_combo_hash = '1747a9a0f' - - mp_common_args = MpCommonArgs( - config=BuildConfig('proj', fab_workspace=Path('/fab')), - flags=flags_config, - compiler='foo_cc', - compiler_version='1.2.3', - mod_hashes={'mod_dep_1': 12345, 'mod_dep_2': 23456}, - two_stage_flag=False, - stage=None, - ) - return mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash +class TestProcessFile: # Developer's note: If the "mods combo hash" changes you'll get an unhelpful message from pytest. # It'll come from this function but pytest won't tell you that. @@ -173,9 +201,9 @@ def ensure_mods_restored(self, mock_copy, mods_combo_hash): any_order=True, ) - def test_without_prebuild(self): + def test_without_prebuild(self, content): # call compile_file() and return a CompiledFile - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content flags_config = mock.Mock() flags_config.flags_for_path.return_value = flags @@ -192,7 +220,7 @@ def test_without_prebuild(self): # check we called the tool correctly mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) # check the correct mod files were copied to the prebuild folder self.ensure_mods_stored(mock_copy, mods_combo_hash) @@ -205,9 +233,9 @@ def test_without_prebuild(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_with_prebuild(self): + def test_with_prebuild(self, content): # If the mods and obj are prebuilt, don't compile. - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, _, analysed_file, obj_combo_hash, mods_combo_hash = content with mock.patch('pathlib.Path.exists', return_value=True): # mod def files and obj file all exist with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -228,11 +256,11 @@ def test_with_prebuild(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_file_hash(self): + def test_file_hash(self, content): # Changing the source hash must change the combo hash for the mods and obj. # Note: This test adds 1 to the analysed files hash. We're using checksums so # the resulting object file and mod file combo hashes can be expected to increase by 1 too. - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content analysed_file._file_hash += 1 obj_combo_hash = f'{int(obj_combo_hash, 16) + 1:x}' @@ -247,7 +275,7 @@ def test_file_hash(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -258,10 +286,12 @@ def test_file_hash(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_flags_hash(self): + def test_flags_hash(self, content): # changing the flags must change the object combo hash, but not the mods combo hash - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content(flags=['flag1', 'flag3']) - obj_combo_hash = '1ebce92ee' + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content + flags = ['flag1', 'flag3'] + mp_common_args.flags.flags_for_path.return_value = flags + obj_combo_hash = '17fbbadd2' with mock.patch('pathlib.Path.exists', side_effect=[True, True, False]): # mod files exist, obj file doesn't with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -272,7 +302,7 @@ def test_flags_hash(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -283,11 +313,11 @@ def test_flags_hash(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_deps_hash(self): + def test_deps_hash(self, content): # Changing the checksums of any mod dependency must change the object combo hash but not the mods combo hash. # Note the difference between mods we depend on and mods we define. # The mods we define are not affected by the mods we depend on. - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content mp_common_args.mod_hashes['mod_dep_1'] += 1 obj_combo_hash = f'{int(obj_combo_hash, 16) + 1:x}' @@ -300,7 +330,7 @@ def test_deps_hash(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) self.ensure_mods_stored(mock_copy, mods_combo_hash) @@ -312,13 +342,16 @@ def test_deps_hash(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_compiler_hash(self): + def test_compiler_hash(self, content): # changing the compiler must change the combo hash for the mods and obj - mp_common_args, flags, analysed_file, _, _ = self.content() + mp_common_args, flags, analysed_file, orig_obj_hash, orig_mods_hash = content + compiler = mp_common_args.config.tool_box[Category.FORTRAN_COMPILER] + compiler._name += "xx" - mp_common_args.compiler = 'bar_cc' - obj_combo_hash = '16c5a5a06' - mods_combo_hash = 'f5c8c6fc' + obj_combo_hash = '19dfa6c83' + mods_combo_hash = '12768d979' + assert obj_combo_hash != orig_obj_hash + assert mods_combo_hash != orig_mods_hash with mock.patch('pathlib.Path.exists', side_effect=[True, True, False]): # mod files exist, obj file doesn't with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -329,7 +362,7 @@ def test_compiler_hash(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -340,13 +373,16 @@ def test_compiler_hash(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_compiler_version_hash(self): + def test_compiler_version_hash(self, content): # changing the compiler version must change the combo hash for the mods and obj - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, orig_obj_hash, orig_mods_hash = content + compiler = mp_common_args.config.tool_box[Category.FORTRAN_COMPILER] + compiler._version = "9.8.7" - mp_common_args.compiler_version = '1.2.4' - obj_combo_hash = '17927b778' - mods_combo_hash = '10296246e' + obj_combo_hash = '1a87f4e07' + mods_combo_hash = '131edbafd' + assert orig_obj_hash != obj_combo_hash + assert orig_mods_hash != mods_combo_hash with mock.patch('pathlib.Path.exists', side_effect=[True, True, False]): # mod files exist, obj file doesn't with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -357,7 +393,7 @@ def test_compiler_version_hash(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -368,9 +404,9 @@ def test_compiler_version_hash(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_mod_missing(self): + def test_mod_missing(self, content): # if one of the mods we define is not present, we must recompile - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content with mock.patch('pathlib.Path.exists', side_effect=[False, True, True]): # one mod file missing with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -381,7 +417,7 @@ def test_mod_missing(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -392,9 +428,9 @@ def test_mod_missing(self): pb / f'mod_def_1.{mods_combo_hash}.mod' } - def test_obj_missing(self): + def test_obj_missing(self, content): # the object file we define is not present, so we must recompile - mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = self.content() + mp_common_args, flags, analysed_file, obj_combo_hash, mods_combo_hash = content with mock.patch('pathlib.Path.exists', side_effect=[True, True, False]): # object file missing with mock.patch('fab.steps.compile_fortran.compile_file') as mock_compile_file: @@ -405,7 +441,7 @@ def test_obj_missing(self): expect_object_fpath = Path(f'/fab/proj/build_output/_prebuild/foofile.{obj_combo_hash}.o') assert res == CompiledFile(input_fpath=analysed_file.fpath, output_fpath=expect_object_fpath) mock_compile_file.assert_called_once_with( - analysed_file, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) + analysed_file.fpath, flags, output_fpath=expect_object_fpath, mp_common_args=mp_common_args) self.ensure_mods_stored(mock_copy, mods_combo_hash) # check the correct artefacts were returned @@ -417,63 +453,18 @@ def test_obj_missing(self): } -class Test_constructor(object): - - def test_bare(self): - with mock.patch.dict(os.environ, FC='foofc', clear=True): - with mock.patch('fab.steps.compile_fortran.get_compiler_version'): - compiler, compiler_version, flags = handle_compiler_args() - assert compiler == 'foofc' - assert flags.common_flags == [] - - def test_with_flags(self): - with mock.patch.dict(os.environ, FC='foofc -monty', FFLAGS='--foo --bar'): - with mock.patch('fab.steps.compile_fortran.get_compiler_version'): - compiler, compiler_version, flags = handle_compiler_args() - assert compiler == 'foofc' - assert flags.common_flags == ['-monty', '--foo', '--bar'] - - def test_gfortran_managed_flags(self): - with mock.patch.dict(os.environ, FC='gfortran -c', FFLAGS='-J /mods'): - with mock.patch('fab.steps.compile_fortran.get_compiler_version'), \ - pytest.warns(UserWarning, match="removing managed flag"): - compiler, compiler_version, flags = handle_compiler_args() - assert compiler == 'gfortran' - assert flags.common_flags == [] - - def test_ifort_managed_flags(self): - with mock.patch.dict(os.environ, FC='ifort -c', FFLAGS='-module /mods'): - with mock.patch('fab.steps.compile_fortran.get_compiler_version'), \ - pytest.warns(UserWarning, match="removing managed flag"): - compiler, compiler_version, flags = handle_compiler_args() - assert compiler == 'ifort' - assert flags.common_flags == [] - - def test_no_compiler(self): - with mock.patch.dict(os.environ, clear=True): - with mock.patch('fab.steps.compile_fortran.run_command', side_effect=RuntimeError): - with pytest.raises(RuntimeError): - handle_compiler_args() - - def test_unknown_compiler(self): - with mock.patch.dict(os.environ, FC='foofc -c', FFLAGS='-J /mods'): - with mock.patch('fab.steps.compile_fortran.get_compiler_version'): - compiler, compiler_version, flags = handle_compiler_args() - assert compiler == 'foofc' - assert flags.common_flags == ['-c', '-J', '/mods'] - - # todo: test with args - handle_compiler_args(common_flags, path_flags) - - -class Test_get_mod_hashes(object): +class TestGetModHashes: + '''Contains hashing-tests.''' - def test_vanilla(self): + def test_vanilla(self, tool_box): + '''Test hashing. ''' # get a hash value for every module in the analysed file analysed_files = { mock.Mock(module_defs=['foo', 'bar']), } - config = BuildConfig('proj', fab_workspace=Path('/fab_workspace')) + config = BuildConfig('proj', tool_box, + fab_workspace=Path('/fab_workspace')) with mock.patch('pathlib.Path.exists', side_effect=[True, True]): with mock.patch( @@ -482,73 +473,3 @@ def test_vanilla(self): result = get_mod_hashes(analysed_files=analysed_files, config=config) assert result == {'foo': 123, 'bar': 456} - - -class Test_get_fortran_preprocessor(object): - - def test_from_env(self): - with mock.patch.dict(os.environ, values={'FPP': 'foo_pp --foo'}): - fpp, fpp_flags = get_fortran_preprocessor() - - assert fpp == 'foo_pp' - assert fpp_flags == ['--foo', '-P'] - - def test_empty_env_fpp(self): - # test with an empty FPP env var, and only fpp available at the command line - def mock_run_command(command): - if 'fpp' not in command: - raise RuntimeError('foo') - - with mock.patch.dict(os.environ, clear=True): - with mock.patch('fab.steps.preprocess.run_command', side_effect=mock_run_command): - fpp, fpp_flags = get_fortran_preprocessor() - - assert fpp == 'fpp' - assert fpp_flags == ['-P'] - - def test_empty_env_cpp(self): - # test with an empty FPP env var, and only cpp available at the command line - def mock_run_command(command): - if 'cpp' not in command: - raise RuntimeError('foo') - - with mock.patch.dict(os.environ, clear=True): - with mock.patch('fab.steps.preprocess.run_command', side_effect=mock_run_command): - fpp, fpp_flags = get_fortran_preprocessor() - - assert fpp == 'cpp' - assert fpp_flags == ['-traditional-cpp', '-P'] - - -class Test_get_fortran_compiler(object): - - def test_from_env(self): - with mock.patch.dict(os.environ, values={'FC': 'foo_c --foo'}): - fc, fc_flags = get_fortran_compiler() - - assert fc == 'foo_c' - assert fc_flags == ['--foo'] - - def test_empty_env_gfortran(self): - def mock_run_command(command): - if 'gfortran' not in command: - raise RuntimeError('foo') - - with mock.patch.dict(os.environ, clear=True): - with mock.patch('fab.steps.compile_fortran.run_command', side_effect=mock_run_command): - fc, fc_flags = get_fortran_compiler() - - assert fc == 'gfortran' - assert fc_flags == [] - - def test_empty_env_ifort(self): - def mock_run_command(command): - if 'ifort' not in command: - raise RuntimeError('foo') - - with mock.patch.dict(os.environ, clear=True): - with mock.patch('fab.steps.compile_fortran.run_command', side_effect=mock_run_command): - fc, fc_flags = get_fortran_compiler() - - assert fc == 'ifort' - assert fc_flags == [] diff --git a/tests/unit_tests/steps/test_grab.py b/tests/unit_tests/steps/test_grab.py index cb4292db..348dc293 100644 --- a/tests/unit_tests/steps/test_grab.py +++ b/tests/unit_tests/steps/test_grab.py @@ -9,11 +9,12 @@ from fab.steps.grab.fcm import fcm_export from fab.steps.grab.folder import grab_folder +from fab.tools import ToolBox import pytest -class TestGrabFolder(object): +class TestGrabFolder: def test_trailing_slash(self): with pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): @@ -27,30 +28,35 @@ def _common(self, grab_src, expect_grab_src): source_root = Path('/workspace/source') dst = 'bar' - mock_config = SimpleNamespace(source_root=source_root) + mock_config = SimpleNamespace(source_root=source_root, + tool_box=ToolBox()) with mock.patch('pathlib.Path.mkdir'): - with mock.patch('fab.steps.grab.run_command') as mock_run: + with mock.patch('fab.tools.tool.Tool.run') as mock_run: grab_folder(mock_config, src=grab_src, dst_label=dst) expect_dst = mock_config.source_root / dst - mock_run.assert_called_once_with(['rsync', '--times', '--links', '--stats', - '-ru', expect_grab_src, str(expect_dst)]) + mock_run.assert_called_once_with( + additional_parameters=['--times', '--links', '--stats', + '-ru', expect_grab_src, expect_dst]) -class TestGrabFcm(object): +class TestGrabFcm: def test_no_revision(self): source_root = Path('/workspace/source') source_url = '/www.example.com/bar' dst_label = 'bar' - mock_config = SimpleNamespace(source_root=source_root) + mock_config = SimpleNamespace(source_root=source_root, + tool_box=ToolBox()) with mock.patch('pathlib.Path.mkdir'): - with mock.patch('fab.steps.grab.svn.run_command') as mock_run, \ + with mock.patch('fab.tools.tool.Tool.run') as mock_run, \ pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): fcm_export(config=mock_config, src=source_url, dst_label=dst_label) - mock_run.assert_called_once_with(['fcm', 'export', '--force', source_url, str(source_root / dst_label)]) + mock_run.assert_called_once_with(['export', '--force', source_url, + str(source_root / dst_label)], + env=None, cwd=None, capture_output=True) def test_revision(self): source_root = Path('/workspace/source') @@ -58,14 +64,16 @@ def test_revision(self): dst_label = 'bar' revision = '42' - mock_config = SimpleNamespace(source_root=source_root) + mock_config = SimpleNamespace(source_root=source_root, + tool_box=ToolBox()) with mock.patch('pathlib.Path.mkdir'): - with mock.patch('fab.steps.grab.svn.run_command') as mock_run, \ + with mock.patch('fab.tools.tool.Tool.run') as mock_run, \ pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): fcm_export(mock_config, src=source_url, dst_label=dst_label, revision=revision) mock_run.assert_called_once_with( - ['fcm', 'export', '--force', '--revision', '42', f'{source_url}', str(source_root / dst_label)]) + ['export', '--force', '--revision', '42', f'{source_url}', str(source_root / dst_label)], + env=None, cwd=None, capture_output=True) # todo: test missing repo # def test_missing(self): diff --git a/tests/unit_tests/steps/test_link.py b/tests/unit_tests/steps/test_link.py index 57af5dfd..60a69a7a 100644 --- a/tests/unit_tests/steps/test_link.py +++ b/tests/unit_tests/steps/test_link.py @@ -9,27 +9,36 @@ from fab.constants import OBJECT_FILES from fab.steps.link import link_exe +from fab.tools import Linker import pytest -class TestLinkExe(object): - def test_run(self): - # ensure the command is formed correctly, with the flags at the end (why?!) +class TestLinkExe: + def test_run(self, tool_box): + # ensure the command is formed correctly, with the flags at the + # end (why?!) config = SimpleNamespace( project_workspace=Path('workspace'), artefact_store={OBJECT_FILES: {'foo': {'foo.o', 'bar.o'}}}, + tool_box=tool_box ) with mock.patch('os.getenv', return_value='-L/foo1/lib -L/foo2/lib'): - with mock.patch('fab.steps.link.run_command') as mock_run, \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): - link_exe(config, linker='foolink', flags=['-fooflag', '-barflag']) + # We need to create a linker here to pick up the env var: + linker = Linker("mock_link", "mock_link.exe", "mock-vendor") + # Mark the linker as available to it can be added to the tool box + linker._is_available = True + tool_box.add_tool(linker, silent_replace=True) + mock_result = mock.Mock(returncode=0, stdout="abc\ndef".encode()) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run, \ + pytest.warns(UserWarning, match="_metric_send_conn not " + "set, cannot send metrics"): + link_exe(config, flags=['-fooflag', '-barflag']) - mock_run.assert_called_with([ - 'foolink', '-o', 'workspace/foo', - *sorted(['foo.o', 'bar.o']), - '-L/foo1/lib', '-L/foo2/lib', - '-fooflag', '-barflag', - ]) + tool_run.assert_called_with( + ['mock_link.exe', '-L/foo1/lib', '-L/foo2/lib', 'bar.o', 'foo.o', + '-fooflag', '-barflag', '-o', 'workspace/foo'], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/steps/test_link_shared_object.py b/tests/unit_tests/steps/test_link_shared_object.py new file mode 100644 index 00000000..117971d1 --- /dev/null +++ b/tests/unit_tests/steps/test_link_shared_object.py @@ -0,0 +1,49 @@ +# ############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +# ############################################################################## + +'''Tests linking a shared library. +''' + +from pathlib import Path +from types import SimpleNamespace +from unittest import mock + +from fab.constants import OBJECT_FILES +from fab.steps.link import link_shared_object +from fab.tools import Linker + +import pytest + + +def test_run(tool_box): + '''Ensure the command is formed correctly, with the flags at the + end since they are typically libraries.''' + + config = SimpleNamespace( + project_workspace=Path('workspace'), + build_output=Path("workspace"), + artefact_store={OBJECT_FILES: {None: {'foo.o', 'bar.o'}}}, + tool_box=tool_box + ) + + with mock.patch('os.getenv', return_value='-L/foo1/lib -L/foo2/lib'): + # We need to create a linker here to pick up the env var: + linker = Linker("mock_link", "mock_link.exe", "vendor") + # Mark the linker as available so it can added to the tool box: + linker._is_available = True + tool_box.add_tool(linker, silent_replace=True) + mock_result = mock.Mock(returncode=0, stdout="abc\ndef".encode()) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run, \ + pytest.warns(UserWarning, match="_metric_send_conn not set, " + "cannot send metrics"): + link_shared_object(config, "/tmp/lib_my.so", + flags=['-fooflag', '-barflag']) + + tool_run.assert_called_with( + ['mock_link.exe', '-L/foo1/lib', '-L/foo2/lib', 'bar.o', 'foo.o', + '-fooflag', '-barflag', '-fPIC', '-shared', '-o', '/tmp/lib_my.so'], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/steps/test_preprocess.py b/tests/unit_tests/steps/test_preprocess.py index e45850f9..32e7e09f 100644 --- a/tests/unit_tests/steps/test_preprocess.py +++ b/tests/unit_tests/steps/test_preprocess.py @@ -6,16 +6,19 @@ from pathlib import Path from unittest import mock +import pytest + from fab.build_config import BuildConfig from fab.steps.preprocess import preprocess_fortran +from fab.tools import Category, ToolBox -class Test_preprocess_fortran(object): +class Test_preprocess_fortran: def test_big_little(self, tmp_path): # ensure big F90s are preprocessed and little f90s are copied - config = BuildConfig('proj', fab_workspace=tmp_path) + config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) big_f90 = Path(config.source_root / 'big.F90') little_f90 = Path(config.source_root / 'little.f90') @@ -38,3 +41,19 @@ def source_getter(artefact_store): ) mock_copy.assert_called_once_with(str(little_f90), mock.ANY) + + # Now test that an incorrect preprocessor is detected: + tool_box = config.tool_box + # Take the C preprocessor + cpp = tool_box[Category.C_PREPROCESSOR] + # And set its category to FORTRAN_PREPROCESSOR + cpp._category = Category.FORTRAN_PREPROCESSOR + # Now overwrite the Fortran preprocessor with the re-categorised + # C preprocessor: + tool_box.add_tool(cpp) + + with pytest.raises(RuntimeError) as err: + preprocess_fortran(config=config) + assert ("Unexpected tool 'cpp' of type '' instead of CppFortran" + in str(err.value)) diff --git a/tests/unit_tests/steps/test_root_inc_files.py b/tests/unit_tests/steps/test_root_inc_files.py index 9c61cb92..50466c19 100644 --- a/tests/unit_tests/steps/test_root_inc_files.py +++ b/tests/unit_tests/steps/test_root_inc_files.py @@ -5,15 +5,16 @@ from fab.build_config import BuildConfig from fab.steps.root_inc_files import root_inc_files +from fab.tools import ToolBox -class TestRootIncFiles(object): +class TestRootIncFiles: def test_vanilla(self): # ensure it copies the inc file inc_files = [Path('/foo/source/bar.inc')] - config = BuildConfig('proj') + config = BuildConfig('proj', ToolBox()) config.artefact_store['all_source'] = inc_files with mock.patch('fab.steps.root_inc_files.shutil') as mock_shutil: @@ -25,7 +26,7 @@ def test_vanilla(self): def test_skip_output_folder(self): # ensure it doesn't try to copy a file in the build output - config = BuildConfig('proj') + config = BuildConfig('proj', ToolBox()) inc_files = [Path('/foo/source/bar.inc'), config.build_output / 'fab.inc'] config.artefact_store['all_source'] = inc_files @@ -40,7 +41,7 @@ def test_name_clash(self): # ensure raises an exception if there is a name clash inc_files = [Path('/foo/source/bar.inc'), Path('/foo/sauce/bar.inc')] - config = BuildConfig('proj') + config = BuildConfig('proj', ToolBox()) config.artefact_store['all_source'] = inc_files with pytest.raises(FileExistsError): diff --git a/tests/unit_tests/test_build_config.py b/tests/unit_tests/test_build_config.py index 54a49ab8..b6c01fdd 100644 --- a/tests/unit_tests/test_build_config.py +++ b/tests/unit_tests/test_build_config.py @@ -3,12 +3,14 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution # ############################################################################## + from fab.build_config import BuildConfig from fab.steps import step from fab.steps.cleanup_prebuilds import CLEANUP_COUNT +from fab.tools import ToolBox -class TestBuildConfig(object): +class TestBuildConfig: def test_error_newlines(self, tmp_path): # Check cli tool errors have newlines displayed correctly. @@ -24,7 +26,7 @@ def simple_step(config): def test_add_cleanup(self): # ensure the cleanup step is added - with BuildConfig('proj') as config: + with BuildConfig('proj', ToolBox()) as config: assert CLEANUP_COUNT not in config.artefact_store - pass + assert CLEANUP_COUNT in config.artefact_store diff --git a/tests/unit_tests/test_config.py b/tests/unit_tests/test_config.py index a4334429..12357c37 100644 --- a/tests/unit_tests/test_config.py +++ b/tests/unit_tests/test_config.py @@ -1,15 +1,16 @@ from pathlib import Path from fab.build_config import AddFlags, BuildConfig - from fab.constants import SOURCE_ROOT +from fab.tools import ToolBox -class TestAddFlags(object): +class TestAddFlags: def test_run(self): add_flags = AddFlags(match="$source/foo/*", flags=['-I', '$relative/include']) - config = BuildConfig('proj', fab_workspace=Path("/fab_workspace")) + config = BuildConfig('proj', ToolBox(), + fab_workspace=Path("/fab_workspace")) # anything in $source/foo should get the include folder my_flags = ["-foo"] diff --git a/tests/unit_tests/test_tools.py b/tests/unit_tests/test_tools.py deleted file mode 100644 index 1898ff7f..00000000 --- a/tests/unit_tests/test_tools.py +++ /dev/null @@ -1,195 +0,0 @@ -# ############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -# ############################################################################## -from textwrap import dedent -from unittest import mock - -import pytest - -from fab.tools import remove_managed_flags, flags_checksum, get_tool, get_compiler_version, run_command - - -class Test_remove_managed_flags(object): - - def test_gfortran(self): - flags = ['--foo', '-J', 'nope', '--bar'] - with pytest.warns(UserWarning, match="removing managed flag"): - result = remove_managed_flags('gfortran', flags) - assert result == ['--foo', '--bar'] - - def test_ifort(self): - flags = ['--foo', '-module', 'nope', '--bar'] - with pytest.warns(UserWarning, match="removing managed flag"): - result = remove_managed_flags('ifort', flags) - assert result == ['--foo', '--bar'] - - def test_unknown_compiler(self): - flags = ['--foo', '-J', 'nope', '--bar'] - result = remove_managed_flags('foofc', flags) - assert result == ['--foo', '-J', 'nope', '--bar'] - - -class Test_flags_checksum(object): - - def test_vanilla(self): - # I think this is a poor testing pattern. - flags = ['one', 'two', 'three', 'four'] - assert flags_checksum(flags) == 3011366051 - - -class test_get_tool(object): - - def test_without_flag(self): - assert get_tool('gfortran') == ('gfortran', []) - - def test_with_flag(self): - assert get_tool('gfortran -c') == ('gfortran', ['-c']) - - -class Test_get_compiler_version(object): - - def _check(self, full_version_string, expect): - with mock.patch('fab.tools.run_command', return_value=full_version_string): - result = get_compiler_version(None) - assert result == expect - - def test_command_failure(self): - # if the command fails, we must return an empty string, not None, so it can still be hashed - with mock.patch('fab.tools.run_command', side_effect=RuntimeError()): - assert get_compiler_version(None) == '', 'expected empty string' - - def test_unknown_command_response(self): - # if the full version output is in an unknown format, we must return an empty string - self._check(full_version_string='foo fortran 1.2.3', expect='') - - def test_unknown_version_format(self): - # if the version is in an unknown format, we must return an empty string - full_version_string = dedent(""" - Foo Fortran (Foo) 5 123456 (Foo Hat 4.8.5-44) - Copyright (C) 2022 Foo Software Foundation, Inc. - """) - self._check(full_version_string=full_version_string, expect='') - - def test_2_part_version(self): - # test major.minor format - full_version_string = dedent(""" - Foo Fortran (Foo) 5.6 123456 (Foo Hat 4.8.5-44) - Copyright (C) 2022 Foo Software Foundation, Inc. - """) - self._check(full_version_string=full_version_string, expect='5.6') - - # Possibly overkill to cover so many gfortran versions but I had to go check them so might as well add them. - # Note: different sources, e.g conda, change the output slightly... - - def test_gfortran_4(self): - full_version_string = dedent(""" - GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44) - Copyright (C) 2015 Free Software Foundation, Inc. - - GNU Fortran comes with NO WARRANTY, to the extent permitted by law. - You may redistribute copies of GNU Fortran - under the terms of the GNU General Public License. - For more information about these matters, see the file named COPYING - - """) - - self._check(full_version_string=full_version_string, expect='4.8.5') - - def test_gfortran_6(self): - full_version_string = dedent(""" - GNU Fortran (GCC) 6.1.0 - Copyright (C) 2016 Free Software Foundation, Inc. - This is free software; see the source for copying conditions. There is NO - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - """) - - self._check(full_version_string=full_version_string, expect='6.1.0') - - def test_gfortran_8(self): - full_version_string = dedent(""" - GNU Fortran (conda-forge gcc 8.5.0-16) 8.5.0 - Copyright (C) 2018 Free Software Foundation, Inc. - This is free software; see the source for copying conditions. There is NO - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - """) - - self._check(full_version_string=full_version_string, expect='8.5.0') - - def test_gfortran_10(self): - full_version_string = dedent(""" - GNU Fortran (conda-forge gcc 10.4.0-16) 10.4.0 - Copyright (C) 2020 Free Software Foundation, Inc. - This is free software; see the source for copying conditions. There is NO - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - """) - - self._check(full_version_string=full_version_string, expect='10.4.0') - - def test_gfortran_12(self): - full_version_string = dedent(""" - GNU Fortran (conda-forge gcc 12.1.0-16) 12.1.0 - Copyright (C) 2022 Free Software Foundation, Inc. - This is free software; see the source for copying conditions. There is NO - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - """) - - self._check(full_version_string=full_version_string, expect='12.1.0') - - def test_ifort_14(self): - full_version_string = dedent(""" - ifort (IFORT) 14.0.3 20140422 - Copyright (C) 1985-2014 Intel Corporation. All rights reserved. - - """) - - self._check(full_version_string=full_version_string, expect='14.0.3') - - def test_ifort_15(self): - full_version_string = dedent(""" - ifort (IFORT) 15.0.2 20150121 - Copyright (C) 1985-2015 Intel Corporation. All rights reserved. - - """) - - self._check(full_version_string=full_version_string, expect='15.0.2') - - def test_ifort_17(self): - full_version_string = dedent(""" - ifort (IFORT) 17.0.7 20180403 - Copyright (C) 1985-2018 Intel Corporation. All rights reserved. - - """) - - self._check(full_version_string=full_version_string, expect='17.0.7') - - def test_ifort_19(self): - full_version_string = dedent(""" - ifort (IFORT) 19.0.0.117 20180804 - Copyright (C) 1985-2018 Intel Corporation. All rights reserved. - - """) - - self._check(full_version_string=full_version_string, expect='19.0.0.117') - - -class Test_run_command(object): - - def test_no_error(self): - mock_result = mock.Mock(returncode=0) - with mock.patch('fab.tools.subprocess.run', return_value=mock_result): - run_command([]) - - def test_error(self): - mock_result = mock.Mock(returncode=1) - mocked_error_message = 'mocked error message' - mock_result.stderr.decode = mock.Mock(return_value=mocked_error_message) - with mock.patch('fab.tools.subprocess.run', return_value=mock_result): - with pytest.raises(RuntimeError) as err: - run_command([]) - assert mocked_error_message in str(err.value) diff --git a/tests/unit_tests/tools/test_ar.py b/tests/unit_tests/tools/test_ar.py new file mode 100644 index 00000000..6307705b --- /dev/null +++ b/tests/unit_tests/tools/test_ar.py @@ -0,0 +1,51 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the ar implementation. +''' + +from pathlib import Path +from unittest import mock + +from fab.tools import Category, Ar + + +def test_ar_constructor(): + '''Test the ar constructor.''' + ar = Ar() + assert ar.category == Category.AR + assert ar.name == "ar" + assert ar.exec_name == "ar" + assert ar.flags == [] + + +def test_ar_check_available(): + '''Tests the is_available functionality.''' + ar = Ar() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + assert ar.check_available() + tool_run.assert_called_once_with( + ["ar", "--version"], capture_output=True, env=None, + cwd=None, check=False) + + # Test behaviour if a runtime error happens: + with mock.patch("fab.tools.tool.Tool.run", + side_effect=RuntimeError("")) as tool_run: + assert not ar.check_available() + + +def test_ar_create(): + '''Test creating an archive.''' + ar = Ar() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + ar.create(Path("out.a"), [Path("a.o"), "b.o"]) + tool_run.assert_called_with(['ar', 'cr', 'out.a', 'a.o', 'b.o'], + capture_output=True, env=None, cwd=None, + check=False) diff --git a/tests/unit_tests/tools/test_categories.py b/tests/unit_tests/tools/test_categories.py new file mode 100644 index 00000000..4df0e089 --- /dev/null +++ b/tests/unit_tests/tools/test_categories.py @@ -0,0 +1,27 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This module tests the Categories. +''' + +from fab.tools import Category + + +def test_category(): + '''Tests the categories.''' + # Make sure that str of a category only prints the name (which is more + # useful for error messages). + for cat in list(Category): + assert str(cat) == cat.name + + +def test_is_compiler(): + '''Tests that compiler correctly sets the `is_compiler` property.''' + for cat in Category: + if cat in [Category.FORTRAN_COMPILER, Category.C_COMPILER]: + assert cat.is_compiler + else: + assert not cat.is_compiler diff --git a/tests/unit_tests/tools/test_compiler.py b/tests/unit_tests/tools/test_compiler.py new file mode 100644 index 00000000..22814c71 --- /dev/null +++ b/tests/unit_tests/tools/test_compiler.py @@ -0,0 +1,336 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the compiler implementation. +''' + +import os +from pathlib import Path, PosixPath +from textwrap import dedent +from unittest import mock + +import pytest + +from fab.tools import (Category, CCompiler, Compiler, FortranCompiler, + Gcc, Gfortran, Icc, Ifort) + + +def test_compiler(): + '''Test the compiler constructor.''' + cc = CCompiler("gcc", "gcc", "gnu") + assert cc.category == Category.C_COMPILER + assert cc._compile_flag == "-c" + assert cc._output_flag == "-o" + assert cc.flags == [] + assert cc.suite == "gnu" + + fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J") + assert fc._compile_flag == "-c" + assert fc._output_flag == "-o" + assert fc.category == Category.FORTRAN_COMPILER + assert fc.suite == "gnu" + assert fc.flags == [] + + +def test_compiler_check_available(): + '''Check if check_available works as expected. The compiler class + uses internally get_version to test if a compiler works or not. + ''' + cc = CCompiler("gcc", "gcc", "gnu") + # The compiler uses get_version to check if it is available. + # First simulate a successful run: + with mock.patch.object(cc, "get_version", returncode=123): + assert cc.check_available() + + # Now test if get_version raises an error + with mock.patch.object(cc, "get_version", side_effect=RuntimeError("")): + assert not cc.check_available() + + +def test_compiler_hash(): + '''Test the hash functionality.''' + cc = CCompiler("gcc", "gcc", "gnu") + with mock.patch.object(cc, "_version", 567): + hash1 = cc.get_hash() + assert hash1 == 4646426180 + + # A change in the version number must change the hash: + with mock.patch.object(cc, "_version", 89): + hash2 = cc.get_hash() + assert hash2 != hash1 + + # A change in the name must change the hash, again: + cc._name = "new_name" + hash3 = cc.get_hash() + assert hash3 not in (hash1, hash2) + + +def test_compiler_with_env_fflags(): + '''Test that content of FFLAGS is added to the compiler flags.''' + with mock.patch.dict(os.environ, FFLAGS='--foo --bar'): + cc = CCompiler("gcc", "gcc", "gnu") + fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J") + assert cc.flags == ["--foo", "--bar"] + assert fc.flags == ["--foo", "--bar"] + + +def test_compiler_syntax_only(): + '''Tests handling of syntax only flags.''' + fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J") + assert not fc.has_syntax_only + fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J", + syntax_only_flag=None) + assert not fc.has_syntax_only + + fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J", + syntax_only_flag="-fsyntax-only") + fc.set_module_output_path("/tmp") + assert fc.has_syntax_only + assert fc._syntax_only_flag == "-fsyntax-only" + fc.run = mock.Mock() + fc.compile_file(Path("a.f90"), "a.o", syntax_only=True) + fc.run.assert_called_with(cwd=Path('.'), + additional_parameters=['-c', '-fsyntax-only', + "-J", '/tmp', 'a.f90', + '-o', 'a.o', ]) + + +def test_compiler_module_output(): + '''Tests handling of module output_flags.''' + fc = FortranCompiler("gfortran", "gfortran", suite="gnu", + module_folder_flag="-J") + fc.set_module_output_path("/module_out") + assert fc._module_output_path == "/module_out" + fc.run = mock.MagicMock() + fc.compile_file(Path("a.f90"), "a.o", syntax_only=True) + fc.run.assert_called_with(cwd=PosixPath('.'), + additional_parameters=['-c', '-J', '/module_out', + 'a.f90', '-o', 'a.o']) + + +def test_compiler_with_add_args(): + '''Tests that additional arguments are handled as expected.''' + fc = FortranCompiler("gfortran", "gfortran", "gnu", + module_folder_flag="-J") + fc.set_module_output_path("/module_out") + assert fc._module_output_path == "/module_out" + fc.run = mock.MagicMock() + with pytest.warns(UserWarning, match="Removing managed flag"): + fc.compile_file(Path("a.f90"), "a.o", add_flags=["-J/b", "-O3"], + syntax_only=True) + # Notice that "-J/b" has been removed + fc.run.assert_called_with(cwd=PosixPath('.'), + additional_parameters=['-c', "-O3", + '-J', '/module_out', + 'a.f90', '-o', 'a.o']) + + +class TestGetCompilerVersion: + '''Test `get_version`.''' + + def _check(self, full_version_string: str, expected: str): + '''Checks if the correct version is extracted from the + given full_version_string. + ''' + c = Compiler("gfortran", "gfortran", "gnu", + Category.FORTRAN_COMPILER) + with mock.patch.object(c, "run", + mock.Mock(return_value=full_version_string)): + assert c.get_version() == expected + # Now let the run method raise an exception, to make sure + # we get a cached value back (and the run method isn't called again): + with mock.patch.object(c, "run", + mock.Mock(side_effect=RuntimeError(""))): + assert c.get_version() == expected + + def test_command_failure(self): + '''If the command fails, we must return an empty string, not None, + so it can still be hashed.''' + c = Compiler("gfortran", "gfortran", "gnu", + Category.FORTRAN_COMPILER) + with mock.patch.object(c, 'run', side_effect=RuntimeError()): + assert c.get_version() == '', 'expected empty string' + with mock.patch.object(c, 'run', side_effect=FileNotFoundError()): + with pytest.raises(RuntimeError) as err: + c.get_version() + assert "Compiler not found: gfortran" in str(err.value) + + def test_unknown_command_response(self): + '''If the full version output is in an unknown format, + we must return an empty string.''' + self._check(full_version_string='foo fortran 1.2.3', expected='') + + def test_unknown_version_format(self): + '''If the version is in an unknown format, we must return an + empty string.''' + full_version_string = dedent(""" + Foo Fortran (Foo) 5 123456 (Foo Hat 4.8.5-44) + Copyright (C) 2022 Foo Software Foundation, Inc. + """) + self._check(full_version_string=full_version_string, expected='') + + def test_2_part_version(self): + '''Test major.minor format. ''' + full_version_string = dedent(""" + Foo Fortran (Foo) 5.6 123456 (Foo Hat 4.8.5-44) + Copyright (C) 2022 Foo Software Foundation, Inc. + """) + self._check(full_version_string=full_version_string, expected='5.6') + + # Possibly overkill to cover so many gfortran versions but I had to go + # check them so might as well add them. + # Note: different sources, e.g conda, change the output slightly... + + def test_gfortran_4(self): + '''Test gfortran 4.8.5 version detection.''' + full_version_string = dedent(""" + GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44) + Copyright (C) 2015 Free Software Foundation, Inc. + + GNU Fortran comes with NO WARRANTY, to the extent permitted by law. + You may redistribute copies of GNU Fortran + under the terms of the GNU General Public License. + For more information about these matters, see the file named COPYING + + """) + + self._check(full_version_string=full_version_string, expected='4.8.5') + + def test_gfortran_6(self): + '''Test gfortran 6.1.0 version detection.''' + full_version_string = dedent(""" + GNU Fortran (GCC) 6.1.0 + Copyright (C) 2016 Free Software Foundation, Inc. + This is free software; see the source for copying conditions. There is NO + warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + """) + + self._check(full_version_string=full_version_string, expected='6.1.0') + + def test_gfortran_8(self): + '''Test gfortran 8.5.0 version detection.''' + full_version_string = dedent(""" + GNU Fortran (conda-forge gcc 8.5.0-16) 8.5.0 + Copyright (C) 2018 Free Software Foundation, Inc. + This is free software; see the source for copying conditions. There is NO + warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + """) + + self._check(full_version_string=full_version_string, expected='8.5.0') + + def test_gfortran_10(self): + '''Test gfortran 10.4.0 version detection.''' + full_version_string = dedent(""" + GNU Fortran (conda-forge gcc 10.4.0-16) 10.4.0 + Copyright (C) 2020 Free Software Foundation, Inc. + This is free software; see the source for copying conditions. There is NO + warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + """) + + self._check(full_version_string=full_version_string, expected='10.4.0') + + def test_gfortran_12(self): + '''Test gfortran 12.1.0 version detection.''' + full_version_string = dedent(""" + GNU Fortran (conda-forge gcc 12.1.0-16) 12.1.0 + Copyright (C) 2022 Free Software Foundation, Inc. + This is free software; see the source for copying conditions. There is NO + warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + """) + + self._check(full_version_string=full_version_string, expected='12.1.0') + + def test_ifort_14(self): + '''Test ifort 14.0.3 version detection.''' + full_version_string = dedent(""" + ifort (IFORT) 14.0.3 20140422 + Copyright (C) 1985-2014 Intel Corporation. All rights reserved. + + """) + + self._check(full_version_string=full_version_string, expected='14.0.3') + + def test_ifort_15(self): + '''Test ifort 15.0.2 version detection.''' + full_version_string = dedent(""" + ifort (IFORT) 15.0.2 20150121 + Copyright (C) 1985-2015 Intel Corporation. All rights reserved. + + """) + + self._check(full_version_string=full_version_string, expected='15.0.2') + + def test_ifort_17(self): + '''Test ifort 17.0.7 version detection.''' + full_version_string = dedent(""" + ifort (IFORT) 17.0.7 20180403 + Copyright (C) 1985-2018 Intel Corporation. All rights reserved. + + """) + + self._check(full_version_string=full_version_string, expected='17.0.7') + + def test_ifort_19(self): + '''Test ifort 19.0.0.117 version detection.''' + full_version_string = dedent(""" + ifort (IFORT) 19.0.0.117 20180804 + Copyright (C) 1985-2018 Intel Corporation. All rights reserved. + + """) + + self._check(full_version_string=full_version_string, + expected='19.0.0.117') + + +def test_gcc(): + '''Tests the gcc class.''' + gcc = Gcc() + assert gcc.name == "gcc" + assert isinstance(gcc, CCompiler) + assert gcc.category == Category.C_COMPILER + + +def test_gfortran(): + '''Tests the gfortran class.''' + gfortran = Gfortran() + assert gfortran.name == "gfortran" + assert isinstance(gfortran, FortranCompiler) + assert gfortran.category == Category.FORTRAN_COMPILER + + +def test_icc(): + '''Tests the icc class.''' + icc = Icc() + assert icc.name == "icc" + assert isinstance(icc, CCompiler) + assert icc.category == Category.C_COMPILER + + +def test_ifort(): + '''Tests the ifort class.''' + ifort = Ifort() + assert ifort.name == "ifort" + assert isinstance(ifort, FortranCompiler) + assert ifort.category == Category.FORTRAN_COMPILER + + +def test_compiler_wrapper(): + '''Make sure we can easily create a compiler wrapper.''' + class MpiF90(Ifort): + '''A simple compiler wrapper''' + def __init__(self): + super().__init__(name="mpif90-intel", + exec_name="mpif90") + + mpif90 = MpiF90() + assert mpif90.suite == "intel-classic" + assert mpif90.category == Category.FORTRAN_COMPILER + assert mpif90.name == "mpif90-intel" + assert mpif90.exec_name == "mpif90" diff --git a/tests/unit_tests/tools/test_flags.py b/tests/unit_tests/tools/test_flags.py new file mode 100644 index 00000000..b51c691c --- /dev/null +++ b/tests/unit_tests/tools/test_flags.py @@ -0,0 +1,59 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the compiler implementation. +''' + +import pytest + +from fab.tools import Flags + + +def test_flags_constructor(): + '''Tests the constructor of Flags.''' + f1 = Flags() + assert isinstance(f1, list) + assert f1 == [] + f2 = Flags(["a"]) + assert isinstance(f2, list) + assert f2 == ["a"] + + +def test_remove_flags(): + '''Test remove_flags functionality.''' + flags = Flags() + flags.remove_flag("-c", False) + assert flags == [] + + all_flags = ['a.f90', '-c', '-o', 'a.o', '-fsyntax-only', "-J", "/tmp"] + flags = Flags(all_flags) + assert flags == all_flags + with pytest.warns(UserWarning, match="Removing managed flag"): + flags.remove_flag("-c") + del all_flags[1] + assert flags == all_flags + with pytest.warns(UserWarning, match="Removing managed flag"): + flags.remove_flag("-J", has_parameter=True) + del all_flags[-2:] + assert flags == all_flags + + for flags_in, expected in [(["-J", "b"], []), + (["-Jb"], []), + (["a", "-J", "c"], ["a"]), + (["a", "-Jc"], ["a"]), + (["a", "-J"], ["a"]), + ]: + flags = Flags(flags_in) + with pytest.warns(UserWarning, match="Removing managed flag"): + flags.remove_flag("-J", has_parameter=True) + assert flags == expected + + +def test_flags_checksum(): + '''Tests computation of the checksum.''' + # I think this is a poor testing pattern. + flags = Flags(['one', 'two', 'three', 'four']) + assert flags.checksum() == 3011366051 diff --git a/tests/unit_tests/tools/test_linker.py b/tests/unit_tests/tools/test_linker.py new file mode 100644 index 00000000..927cd008 --- /dev/null +++ b/tests/unit_tests/tools/test_linker.py @@ -0,0 +1,127 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the linker implementation. +''' + +from pathlib import Path +from unittest import mock + +import pytest + +from fab.tools import (Category, Linker) + + +def test_linker(mock_c_compiler, mock_fortran_compiler): + '''Test the linker constructor.''' + + linker = Linker(name="my_linker", exec_name="my_linker.exe", + suite="suite") + assert linker.category == Category.LINKER + assert linker.name == "my_linker" + assert linker.exec_name == "my_linker.exe" + assert linker.suite == "suite" + assert linker.flags == [] + + linker = Linker(name="my_linker", compiler=mock_c_compiler) + assert linker.category == Category.LINKER + assert linker.name == "my_linker" + assert linker.exec_name == mock_c_compiler.exec_name + assert linker.suite == mock_c_compiler.suite + assert linker.flags == [] + + linker = Linker(compiler=mock_c_compiler) + assert linker.category == Category.LINKER + assert linker.name == mock_c_compiler.name + assert linker.exec_name == mock_c_compiler.exec_name + assert linker.suite == mock_c_compiler.suite + assert linker.flags == [] + + linker = Linker(compiler=mock_fortran_compiler) + assert linker.category == Category.LINKER + assert linker.name == mock_fortran_compiler.name + assert linker.exec_name == mock_fortran_compiler.exec_name + assert linker.flags == [] + + with pytest.raises(RuntimeError) as err: + linker = Linker(name="no-exec-given") + assert ("Either specify name, exec name, and suite or a compiler when " + "creating Linker." in str(err.value)) + + +def test_linker_check_available(mock_c_compiler): + '''Tests the is_available functionality.''' + + # First test if a compiler is given. The linker will call the + # corresponding function in the compiler: + linker = Linker(compiler=mock_c_compiler) + with mock.patch.object(mock_c_compiler, "check_available", + return_value=True) as comp_run: + assert linker.check_available() + # It should be called once without any parameter + comp_run.assert_called_once_with() + + # Second test, no compiler is given. Mock Tool.run to + # return a success: + linker = Linker("ld", "ld", suite="gnu") + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + linker.check_available() + tool_run.assert_called_once_with( + ["ld", "--version"], capture_output=True, env=None, + cwd=None, check=False) + + # Third test: assume the tool does not exist, run will raise + # runtime error: + with mock.patch("fab.tools.tool.Tool.run", + side_effect=RuntimeError("")) as tool_run: + linker.check_available() + + +def test_linker_c(mock_c_compiler): + '''Test the link command line.''' + linker = Linker(compiler=mock_c_compiler) + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + linker.link([Path("a.o")], Path("a.out")) + tool_run.assert_called_with( + ["mock_c_compiler.exe", 'a.o', '-o', 'a.out'], capture_output=True, + env=None, cwd=None, check=False) + + with mock.patch.object(linker, "run") as link_run: + linker.link([Path("a.o")], Path("a.out"), add_libs=["-L", "/tmp"]) + link_run.assert_called_with(['a.o', '-L', '/tmp', '-o', 'a.out']) + + +def test_linker_add_compiler_flag(mock_c_compiler): + '''Test that a flag added to the compiler will be automatically + added to the link line (even if the flags are modified after + creating the linker ... in case that the user specifies additional + flags after creating the linker).''' + + linker = Linker(compiler=mock_c_compiler) + mock_c_compiler.flags.append("-my-flag") + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + linker.link([Path("a.o")], Path("a.out")) + tool_run.assert_called_with( + ['mock_c_compiler.exe', '-my-flag', 'a.o', '-o', 'a.out'], + capture_output=True, env=None, cwd=None, check=False) + + # Make also sure the code works if a linker is created without + # a compiler: + linker = Linker("no-compiler", "no-compiler.exe", "suite") + linker.flags.append("-some-other-flag") + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + linker.link([Path("a.o")], Path("a.out")) + tool_run.assert_called_with( + ['no-compiler.exe', '-some-other-flag', 'a.o', '-o', 'a.out'], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/tools/test_preprocessor.py b/tests/unit_tests/tools/test_preprocessor.py new file mode 100644 index 00000000..338ccb97 --- /dev/null +++ b/tests/unit_tests/tools/test_preprocessor.py @@ -0,0 +1,82 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the tool class. +''' + + +import logging +from pathlib import Path + +from unittest import mock + +from fab.tools import (Category, Cpp, CppFortran, Fpp, Preprocessor) + + +def test_preprocessor_constructor(): + '''Test the constructor.''' + tool = Preprocessor("cpp-fortran", "cpp", Category.FORTRAN_PREPROCESSOR) + assert str(tool) == "Preprocessor - cpp-fortran: cpp" + assert tool.exec_name == "cpp" + assert tool.name == "cpp-fortran" + assert tool.category == Category.FORTRAN_PREPROCESSOR + assert isinstance(tool.logger, logging.Logger) + + +def test_preprocessor_fpp_is_available(): + '''Test that is_available works as expected.''' + fpp = Fpp() + mock_run = mock.Mock(side_effect=RuntimeError("not found")) + with mock.patch("subprocess.run", mock_run): + assert not fpp.is_available + + # Reset the flag and pretend run returns a success: + fpp._is_available = None + mock_run = mock.Mock(returncode=0) + with mock.patch("fab.tools.tool.Tool.run", mock_run): + assert fpp.is_available + + +def test_preprocessor_cpp(): + '''Test cpp.''' + cpp = Cpp() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + cpp.run("--version") + tool_run.assert_called_with(["cpp", "--version"], capture_output=True, + env=None, cwd=None, check=False) + + # Reset the flag and raise an error when executing: + cpp._is_available = None + mock_run = mock.Mock(side_effect=RuntimeError("not found")) + with mock.patch("fab.tools.tool.Tool.run", mock_run): + assert not cpp.is_available + + +def test_preprocessor_cppfortran(): + '''Test cpp for Fortran, which adds additional command line options in.''' + cppf = CppFortran() + assert cppf.is_available + # First create a mock object that is the result of subprocess.run. + # Tool will only check `returncode` of this object. + mock_result = mock.Mock(returncode=0) + # Then set this result as result of a mock run function + mock_run = mock.Mock(return_value=mock_result) + + with mock.patch("subprocess.run", mock_run): + # First test calling without additional flags: + cppf.preprocess(Path("a.in"), Path("a.out")) + mock_run.assert_called_with( + ["cpp", "-traditional-cpp", "-P", "a.in", "a.out"], + capture_output=True, env=None, cwd=None, check=False) + + with mock.patch("subprocess.run", mock_run): + # Then test with added flags: + cppf.preprocess(Path("a.in"), Path("a.out"), ["-DDO_SOMETHING"]) + mock_run.assert_called_with( + ["cpp", "-traditional-cpp", "-P", "-DDO_SOMETHING", "a.in", "a.out"], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/tools/test_psyclone.py b/tests/unit_tests/tools/test_psyclone.py new file mode 100644 index 00000000..7d534fe2 --- /dev/null +++ b/tests/unit_tests/tools/test_psyclone.py @@ -0,0 +1,63 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the PSyclone implementation. +''' + +from unittest import mock + +from fab.tools import (Category, Psyclone) + + +def test_psyclone_constructor(): + '''Test the PSyclone constructor.''' + psyclone = Psyclone() + assert psyclone.category == Category.PSYCLONE + assert psyclone.name == "psyclone" + assert psyclone.exec_name == "psyclone" + assert psyclone.flags == [] + + +def test_psyclone_check_available(): + '''Tests the is_available functionality.''' + psyclone = Psyclone() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + assert psyclone.check_available() + tool_run.assert_called_once_with( + ["psyclone", "--version"], capture_output=True, env=None, + cwd=None, check=False) + + # Test behaviour if a runtime error happens: + with mock.patch("fab.tools.tool.Tool.run", + side_effect=RuntimeError("")) as tool_run: + assert not psyclone.check_available() + + +def test_psyclone_process(): + '''Test running PSyclone.''' + psyclone = Psyclone() + mock_result = mock.Mock(returncode=0) + # Create a mock function that returns a 'transformation script' + # called `script_called`: + transformation_function = mock.Mock(return_value="script_called") + config = mock.Mock() + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + psyclone.process(config=config, + api="dynamo0.3", + x90_file="x90_file", + psy_file="psy_file", + alg_file="alg_file", + transformation_script=transformation_function, + kernel_roots=["root1", "root2"], + additional_parameters=["-c", "psyclone.cfg"]) + tool_run.assert_called_with( + ['psyclone', '-api', 'dynamo0.3', '-l', 'all', '-opsy', 'psy_file', + '-oalg', 'alg_file', '-s', 'script_called', '-c', + 'psyclone.cfg', '-d', 'root1', '-d', 'root2', 'x90_file'], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/tools/test_rsync.py b/tests/unit_tests/tools/test_rsync.py new file mode 100644 index 00000000..6b3640bb --- /dev/null +++ b/tests/unit_tests/tools/test_rsync.py @@ -0,0 +1,59 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the rsync implementation. +''' + +from unittest import mock + +from fab.tools import (Category, Rsync) + + +def test_ar_constructor(): + '''Test the rsync constructor.''' + rsync = Rsync() + assert rsync.category == Category.RSYNC + assert rsync.name == "rsync" + assert rsync.exec_name == "rsync" + assert rsync.flags == [] + + +def test_rsync_check_available(): + '''Tests the is_available functionality.''' + rsync = Rsync() + with mock.patch("fab.tools.tool.Tool.run") as tool_run: + assert rsync.check_available() + tool_run.assert_called_once_with("--version") + + # Test behaviour if a runtime error happens: + with mock.patch("fab.tools.tool.Tool.run", + side_effect=RuntimeError("")) as tool_run: + assert not rsync.check_available() + + +def test_rsync_create(): + '''Test executing an rsync, and also make sure that src always + end on a '/'. + ''' + rsync = Rsync() + + # Test 1: src with / + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + rsync.execute(src="/src/", dst="/dst") + tool_run.assert_called_with( + ['rsync', '--times', '--links', '--stats', '-ru', '/src/', '/dst'], + capture_output=True, env=None, cwd=None, check=False) + + # Test 2: src without / + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + rsync.execute(src="/src", dst="/dst") + tool_run.assert_called_with( + ['rsync', '--times', '--links', '--stats', '-ru', '/src/', '/dst'], + capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/tools/test_tool.py b/tests/unit_tests/tools/test_tool.py new file mode 100644 index 00000000..dd892831 --- /dev/null +++ b/tests/unit_tests/tools/test_tool.py @@ -0,0 +1,143 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the tool class. +''' + + +import logging +from pathlib import Path +from unittest import mock + +import pytest + +from fab.tools import Category, CompilerSuiteTool, Tool + + +def test_tool_constructor(): + '''Test the constructor.''' + tool = Tool("gnu", "gfortran", Category.FORTRAN_COMPILER) + assert str(tool) == "Tool - gnu: gfortran" + assert tool.exec_name == "gfortran" + assert tool.name == "gnu" + assert tool.category == Category.FORTRAN_COMPILER + assert isinstance(tool.logger, logging.Logger) + assert tool.is_compiler + + linker = Tool("gnu", "gfortran", Category.LINKER) + assert str(linker) == "Tool - gnu: gfortran" + assert linker.exec_name == "gfortran" + assert linker.name == "gnu" + assert linker.category == Category.LINKER + assert isinstance(linker.logger, logging.Logger) + assert not linker.is_compiler + + # Check that a path is accepted + mytool = Tool("MyTool", Path("/bin/mytool")) + assert mytool.name == "MyTool" + # A path should be converted to a string, since this + # is later passed to the subprocess command + assert mytool.exec_name == "/bin/mytool" + assert mytool.category == Category.MISC + + # Check that if we specify no category, we get the default: + misc = Tool("misc", "misc") + assert misc.exec_name == "misc" + assert misc.name == "misc" + assert misc.category == Category.MISC + + +def test_tool_is_available(): + '''Test that is_available works as expected.''' + tool = Tool("gfortran", "gfortran", Category.FORTRAN_COMPILER) + with mock.patch.object(tool, "check_available", return_value=True): + assert tool.is_available + # Test the getter + tool._is_available = False + assert not tool.is_available + assert tool.is_compiler + + # Test the exception when trying to use in a non-existent tool: + with pytest.raises(RuntimeError) as err: + tool.run("--ops") + assert ("Tool 'gfortran' is not available to run '['gfortran', '--ops']'" + in str(err.value)) + + +class TestToolRun: + '''Test the run method of Tool.''' + + def test_no_error_no_args(self,): + '''Test usage of `run` without any errors when no additional + command line argument is provided.''' + tool = Tool("gnu", "gfortran", Category.FORTRAN_COMPILER) + mock_result = mock.Mock(returncode=0, return_value=123) + mock_result.stdout.decode = mock.Mock(return_value="123") + + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result): + assert tool.run(capture_output=True) == "123" + assert tool.run(capture_output=False) == "" + + def test_no_error_with_single_args(self): + '''Test usage of `run` without any errors when a single + command line argument is provided as string.''' + tool = Tool("gnu", "gfortran", Category.FORTRAN_COMPILER) + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + tool.run("a") + tool_run.assert_called_once_with( + ["gfortran", "a"], capture_output=True, env=None, + cwd=None, check=False) + + def test_no_error_with_multiple_args(self): + '''Test usage of `run` without any errors when more than + one command line argument is provided as a list.''' + tool = Tool("gnu", "gfortran", Category.FORTRAN_COMPILER) + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + tool.run(["a", "b"]) + tool_run.assert_called_once_with( + ["gfortran", "a", "b"], capture_output=True, env=None, + cwd=None, check=False) + + def test_error(self): + '''Tests the error handling of `run`. ''' + tool = Tool("gnu", "gfortran", Category.FORTRAN_COMPILER) + result = mock.Mock(returncode=1) + mocked_error_message = 'mocked error message' + result.stderr.decode = mock.Mock(return_value=mocked_error_message) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=result): + with pytest.raises(RuntimeError) as err: + tool.run() + assert mocked_error_message in str(err.value) + assert "Command failed with return code 1" in str(err.value) + + def test_error_file_not_found(self): + '''Tests the error handling of `run`. ''' + tool = Tool("does_not_exist", "does_not_exist", + Category.FORTRAN_COMPILER) + with mock.patch('fab.tools.tool.subprocess.run', + side_effect=FileNotFoundError("not found")): + with pytest.raises(RuntimeError) as err: + tool.run() + assert ("Command '['does_not_exist']' could not be executed." + in str(err.value)) + + +def test_suite_tool(): + '''Test the constructor.''' + tool = CompilerSuiteTool("gnu", "gfortran", "gnu", + Category.FORTRAN_COMPILER) + assert str(tool) == "CompilerSuiteTool - gnu: gfortran" + assert tool.exec_name == "gfortran" + assert tool.name == "gnu" + assert tool.suite == "gnu" + assert tool.category == Category.FORTRAN_COMPILER + assert isinstance(tool.logger, logging.Logger) diff --git a/tests/unit_tests/tools/test_tool_box.py b/tests/unit_tests/tools/test_tool_box.py new file mode 100644 index 00000000..5ac55ac4 --- /dev/null +++ b/tests/unit_tests/tools/test_tool_box.py @@ -0,0 +1,72 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This module tests the TooBox class. +''' +from unittest import mock +import warnings + +import pytest + +from fab.tools import Category, CCompiler, Gfortran, ToolBox, ToolRepository + + +def test_tool_box_constructor(): + '''Tests the ToolBox constructor.''' + tb = ToolBox() + assert isinstance(tb._all_tools, dict) + + +def test_tool_box_get_tool(): + '''Tests get_tool.''' + tb = ToolBox() + # No tool is defined, so the default Fortran compiler must be returned: + default_compiler = tb.get_tool(Category.FORTRAN_COMPILER) + tr = ToolRepository() + assert default_compiler is tr.get_default(Category.FORTRAN_COMPILER) + # Check that dictionary-like access works as expected: + assert tb[Category.FORTRAN_COMPILER] == default_compiler + + # Now add gfortran as Fortran compiler to the tool box + tr_gfortran = tr.get_tool(Category.FORTRAN_COMPILER, "gfortran") + tb.add_tool(tr_gfortran) + gfortran = tb.get_tool(Category.FORTRAN_COMPILER) + assert gfortran is tr_gfortran + + +def test_tool_box_add_tool_replacement(): + '''Test that replacing a tool raises a warning, and that this + warning can be disabled.''' + + tb = ToolBox() + mock_compiler1 = CCompiler("mock_c_compiler1", "mock_exec1", "suite") + mock_compiler1._is_available = True + mock_compiler2 = CCompiler("mock_c_compiler2", "mock_exec2", "suite") + mock_compiler2._is_available = True + + tb.add_tool(mock_compiler1) + + warn_message = (f"Replacing existing tool '{mock_compiler1}' with " + f"'{mock_compiler2}'.") + with pytest.warns(UserWarning, match=warn_message): + tb.add_tool(mock_compiler2) + + with warnings.catch_warnings(): + warnings.simplefilter("error") + tb.add_tool(mock_compiler1, silent_replace=True) + + +def test_tool_box_add_tool_not_avail(): + '''Test that tools that are not available cannot be added to + a tool box.''' + + tb = ToolBox() + gfortran = Gfortran() + # Mark this compiler to be not available: + with mock.patch.object(gfortran, "check_available", return_value=False): + with pytest.raises(RuntimeError) as err: + tb.add_tool(gfortran) + assert f"Tool '{gfortran}' is not available" in str(err.value) diff --git a/tests/unit_tests/tools/test_tool_repository.py b/tests/unit_tests/tools/test_tool_repository.py new file mode 100644 index 00000000..4a315150 --- /dev/null +++ b/tests/unit_tests/tools/test_tool_repository.py @@ -0,0 +1,96 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''This module tests the ToolRepository. +''' + +import pytest + + +from fab.tools import Category, Gcc, Gfortran, Ifort, Linker, ToolRepository + + +def test_tool_repository_get_singleton_new(): + '''Tests the singleton behaviour.''' + ToolRepository._singleton = None + tr1 = ToolRepository() + tr2 = ToolRepository() + assert tr1 == tr2 + ToolRepository._singleton = None + tr3 = ToolRepository() + assert tr1 is not tr3 + + +def test_tool_repository_constructor(): + '''Tests the ToolRepository constructor.''' + tr = ToolRepository() + assert Category.C_COMPILER in tr + assert Category.FORTRAN_COMPILER in tr + + +def test_tool_repository_get_tool(): + '''Tests get_tool.''' + tr = ToolRepository() + gfortran = tr.get_tool(Category.FORTRAN_COMPILER, "gfortran") + assert isinstance(gfortran, Gfortran) + + ifort = tr.get_tool(Category.FORTRAN_COMPILER, "ifort") + assert isinstance(ifort, Ifort) + + +def test_tool_repository_get_tool_error(): + '''Tests error handling during tet_tool.''' + tr = ToolRepository() + with pytest.raises(KeyError) as err: + tr.get_tool("unknown-category", "something") + assert "Unknown category 'unknown-category'" in str(err.value) + + with pytest.raises(KeyError) as err: + tr.get_tool(Category.C_COMPILER, "something") + assert ("Unknown tool 'something' in category 'C_COMPILER'" + in str(err.value)) + + +def test_tool_repository_get_default(): + '''Tests get_default.''' + tr = ToolRepository() + gfortran = tr.get_default(Category.FORTRAN_COMPILER) + assert isinstance(gfortran, Gfortran) + + gcc_linker = tr.get_default(Category.LINKER) + assert isinstance(gcc_linker, Linker) + assert gcc_linker.name == "linker-gcc" + + gcc = tr.get_default(Category.C_COMPILER) + assert isinstance(gcc, Gcc) + + +def test_tool_repository_get_default_error(): + '''Tests error handling in get_default.''' + tr = ToolRepository() + with pytest.raises(RuntimeError) as err: + tr.get_default("unknown-category") + assert "Invalid category type 'str'." in str(err.value) + + +def test_tool_repository_default_compiler_suite(): + '''Tests the setting of default suite for compiler and linker.''' + tr = ToolRepository() + tr.set_default_compiler_suite("gnu") + for cat in [Category.C_COMPILER, Category.FORTRAN_COMPILER, + Category.LINKER]: + def_tool = tr.get_default(cat) + assert def_tool.suite == "gnu" + + tr.set_default_compiler_suite("intel-classic") + for cat in [Category.C_COMPILER, Category.FORTRAN_COMPILER, + Category.LINKER]: + def_tool = tr.get_default(cat) + assert def_tool.suite == "intel-classic" + with pytest.raises(RuntimeError) as err: + tr.set_default_compiler_suite("does-not-exist") + assert ("Cannot find 'FORTRAN_COMPILER' in the suite 'does-not-exist'" + in str(err.value)) diff --git a/tests/unit_tests/tools/test_versioning.py b/tests/unit_tests/tools/test_versioning.py new file mode 100644 index 00000000..a3b21896 --- /dev/null +++ b/tests/unit_tests/tools/test_versioning.py @@ -0,0 +1,295 @@ +############################################################################## +# (c) Crown copyright Met Office. All rights reserved. +# For further details please refer to the file COPYRIGHT +# which you should have received as part of this distribution +############################################################################## + +'''Tests the compiler implementation. +''' + +from unittest import mock + +import pytest + +from fab.tools import Category, Fcm, Git, Subversion, Versioning + + +class TestGit: + '''Contains all git related tests.''' + + def test_versioning_constructor(self): + '''Test the versioning constructor.''' + versioning = Versioning("versioning", "versioning.exe", Category.GIT) + assert versioning.category == Category.GIT + assert versioning.name == "versioning" + assert versioning.flags == [] + assert versioning.exec_name == "versioning.exe" + + def test_git_constructor(self): + '''Test the git constructor.''' + git = Git() + assert git.category == Category.GIT + assert git.flags == [] + + def test_git_check_available(self): + '''Check if check_available works as expected. + ''' + git = Git() + with mock.patch.object(git, "run", return_value=0): + assert git.check_available() + + # Now test if run raises an error + with mock.patch.object(git, "run", side_effect=RuntimeError("")): + assert not git.check_available() + + def test_git_current_commit(self): + '''Check current_commit functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned, and stdout of the + # subprocess run method must be encoded (i.e. decode is called later) + mock_result = mock.Mock(returncode=0, stdout="abc\ndef".encode()) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + assert "abc" == git.current_commit() + + tool_run.assert_called_once_with( + ['git', 'log', '--oneline', '-n', '1'], capture_output=True, + env=None, cwd='.', check=False) + + # Test if we specify a path + mock_result = mock.Mock(returncode=0, stdout="abc\ndef".encode()) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + assert "abc" == git.current_commit("/not-exist") + + tool_run.assert_called_once_with( + ['git', 'log', '--oneline', '-n', '1'], capture_output=True, + env=None, cwd="/not-exist", check=False) + + def test_git_init(self): + '''Check init functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + git.init("/src") + tool_run.assert_called_once_with( + ['git', 'init', '.'], capture_output=True, env=None, + cwd='/src', check=False) + + def test_git_clean(self): + '''Check clean functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + git.clean('/src') + tool_run.assert_called_once_with( + ['git', 'clean', '-f'], capture_output=True, env=None, + cwd='/src', check=False) + + def test_git_fetch(self): + '''Check getch functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + git.fetch("/src", "/dst", revision="revision") + tool_run.assert_called_once_with( + ['git', 'fetch', "/src", "revision"], capture_output=False, + env=None, cwd='/dst', check=False) + + with mock.patch.object(git, "run", + side_effect=RuntimeError("ERR")) as run: + with pytest.raises(RuntimeError) as err: + git.fetch("/src", "/dst", revision="revision") + assert "ERR" in str(err.value) + run.assert_called_once_with(['fetch', "/src", "revision"], cwd="/dst", + capture_output=False) + + def test_git_checkout(self): + '''Check checkout functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned + + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + git.checkout("/src", "/dst", revision="revision") + tool_run.assert_any_call(['git', 'fetch', "/src", "revision"], + cwd='/dst', capture_output=False, env=None, + check=False) + tool_run.assert_called_with(['git', 'checkout', "FETCH_HEAD"], + cwd="/dst", capture_output=False, + env=None, check=False) + + with mock.patch.object(git, "run", + side_effect=RuntimeError("ERR")) as run: + with pytest.raises(RuntimeError) as err: + git.checkout("/src", "/dst", revision="revision") + assert "ERR" in str(err.value) + run.assert_called_with(['fetch', "/src", "revision"], cwd="/dst", + capture_output=False) + + def test_git_merge(self): + '''Check merge functionality. The tests here will actually + mock the git results, so they will work even if git is not installed. + The system_tests will test an actual check out etc. ''' + + git = Git() + # Note that only the first line will be returned + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + git.merge("/dst", revision="revision") + tool_run.assert_called_once_with( + ['git', 'merge', 'FETCH_HEAD'], capture_output=False, + env=None, cwd='/dst', check=False) + + # Test the behaviour if merge fails, but merge --abort works: + # Simple function that raises an exception only the first time + # it is called. + def raise_1st_time(): + yield RuntimeError + yield 0 + + with mock.patch.object(git, "run", + side_effect=raise_1st_time()) as run: + with pytest.raises(RuntimeError) as err: + git.merge("/dst", revision="revision") + assert "Error merging revision. Merge aborted." in str(err.value) + run.assert_any_call(['merge', "FETCH_HEAD"], cwd="/dst", + capture_output=False) + run.assert_any_call(['merge', "--abort"], cwd="/dst", + capture_output=False) + + # Test behaviour if both merge and merge --abort fail + with mock.patch.object(git, "run", + side_effect=RuntimeError("ERR")) as run: + with pytest.raises(RuntimeError) as err: + git.merge("/dst", revision="revision") + assert "ERR" in str(err.value) + run.assert_called_with(['merge', "--abort"], cwd="/dst", + capture_output=False) + + +# ============================================================================ +class TestSvn: + '''Contains all svn related tests.''' + + def test_svn_constructor(self): + '''Test the git constructor.''' + svn = Subversion() + assert svn.category == Category.SUBVERSION + assert svn.flags == [] + assert svn.name == "subversion" + assert svn.exec_name == "svn" + + def test_svn_export(self): + '''Check export svn functionality. The tests here will actually + mock the git results, so they will work even if subversion is not + installed. The system_tests will test an actual check out etc. ''' + + svn = Subversion() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.export("/src", "/dst", revision="123") + + tool_run.assert_called_once_with( + ["svn", "export", "--force", "--revision", "123", "/src", "/dst"], + env=None, cwd=None, capture_output=True, check=False) + + # Test if we don't specify a revision + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.export("/src", "/dst") + tool_run.assert_called_once_with( + ["svn", "export", "--force", "/src", "/dst"], + env=None, cwd=None, capture_output=True, check=False) + + def test_svn_checkout(self): + '''Check checkout svn functionality. The tests here will actually + mock the git results, so they will work even if subversion is not + installed. The system_tests will test an actual check out etc. ''' + + svn = Subversion() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.checkout("/src", "/dst", revision="123") + + tool_run.assert_called_once_with( + ["svn", "checkout", "--revision", "123", "/src", "/dst"], + env=None, cwd=None, capture_output=True, check=False) + + # Test if we don't specify a revision + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.checkout("/src", "/dst") + tool_run.assert_called_once_with( + ["svn", "checkout", "/src", "/dst"], + env=None, cwd=None, capture_output=True, check=False) + + def test_svn_update(self): + '''Check update svn functionality. The tests here will actually + mock the git results, so they will work even if subversion is not + installed. The system_tests will test an actual check out etc. ''' + + svn = Subversion() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.update("/dst", revision="123") + + tool_run.assert_called_once_with( + ["svn", "update", "--revision", "123"], + env=None, cwd="/dst", capture_output=True, check=False) + + def test_svn_merge(self): + '''Check merge svn functionality. The tests here will actually + mock the git results, so they will work even if subversion is not + installed. The system_tests will test an actual check out etc. ''' + + svn = Subversion() + mock_result = mock.Mock(returncode=0) + with mock.patch('fab.tools.tool.subprocess.run', + return_value=mock_result) as tool_run: + svn.merge("/src", "/dst", "123") + + tool_run.assert_called_once_with( + ["svn", "merge", "--non-interactive", "/src@123"], + env=None, cwd="/dst", capture_output=True, check=False) + + +# ============================================================================ +class TestFcm: + '''Contains all FCM related tests.''' + + def test_fcm_constructor(self): + '''Test the fcb constructor.''' + fcm = Fcm() + assert fcm.category == Category.FCM + assert fcm.flags == [] + assert fcm.name == "fcm" + assert fcm.exec_name == "fcm" From 529522b4042ba4ee3338253245e25ee85f168c77 Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Mon, 24 Jun 2024 01:59:27 +0100 Subject: [PATCH 9/9] Re worked README. (#315) Co-authored-by: Joerg Henrichs --- README.md | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 03b76595..fb426047 100644 --- a/README.md +++ b/README.md @@ -2,14 +2,15 @@ ![](https://github.com/Metomi/fab/workflows/Build/badge.svg) -The "Fab" build system aims to provide a quick and easy build process tailored towards a -specific subset of scientific software developers. Quick should be in both use and -operation. Easy should mean the simple things are simple and the complicated things -possible. +The "Fab" project aims to provide the means to quickly and easily compile +software in a way tailored for scientific software development. It aims to be +quick both in terms of use and operation. Meanwhile ease should mean the +simple things are simple and the complicated things are possible. -The tool is not intended for general use outside its intended domain although you are, -of course, welcome to use it there. Just don't expect high priority to be given to -features which do not target scientific software development. +Fab is not intended to replace existing tools for compiling general +application software. It targets different problems to, for instance, CMake +derived build systems. This means that if your usage falls outside the focus +of development you shouldn't expect high priority on your feature requests. ## Licence @@ -17,10 +18,13 @@ The software is made available under a 3-clause BSD licence. ## Installation -You may simply use `pip install sci-fab`. +The tool is easily installed using `pip install sci-fab`. ## Usage -Although Fab is in its initial development phases right now and much of the -functionality is yet to be added, the command line interface to the tool is -in place and can be run using the command `fab` +Fab offers two modes of operation. In "zero configuration" mode it is used +directly as a tool by running `fab`. This examines the currently selected +directory and tries to build whatever it finds there. + +In "framework" mode it offers a library of building-blocks which a developer +can use to create a build system customised to the needs of their software.