diff --git a/.github/ISSUE_TEMPLATE/issue_template.md b/.github/ISSUE_TEMPLATE/issue_template.md index 73026daad..5f7422953 100644 --- a/.github/ISSUE_TEMPLATE/issue_template.md +++ b/.github/ISSUE_TEMPLATE/issue_template.md @@ -38,8 +38,8 @@ If you foresee them in a particular order or priority, please use numbering --> ### Next Steps - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4372bd64c..465e701a1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,13 +1,13 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files - - repo: https://github.com/psf/black - rev: 23.9.1 + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.10.0 hooks: - id: black files: ^tedana/ @@ -16,3 +16,13 @@ repos: hooks: - id: isort files: ^tedana/ + - repo: https://github.com/ikamensh/flynt/ + rev: 1.0.1 + hooks: + - id: flynt + files: ^tedana/ + - repo: https://github.com/DanielNoord/pydocstringformatter + rev: v0.7.3 + hooks: + - id: pydocstringformatter + files: ^tedana/ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index bd9010bf7..0cbc71799 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -56,7 +56,7 @@ reported by contacting our and [Stefano Moia](https://github.com/smoia). Confidentiality will be respected in reporting. The Code of Conduct Enforcement Team any take any action they deem -necessary for the safety of the `tedana` community, including but not +necessary for the safety of the `tedana` community, including but not limited to: * facilitating a conversation between the two parties involved in the violation of the code of conduct diff --git a/LICENSE b/LICENSE index 35a0945f8..2a6790ccb 100644 --- a/LICENSE +++ b/LICENSE @@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. - + Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a @@ -111,7 +111,7 @@ modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. - + GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION @@ -158,7 +158,7 @@ Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - + 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 @@ -216,7 +216,7 @@ instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. - + Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. @@ -267,7 +267,7 @@ Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. - + 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work @@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. - + 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined @@ -370,7 +370,7 @@ subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. - + 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or @@ -422,7 +422,7 @@ conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. - + 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is @@ -456,7 +456,7 @@ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS - + How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest diff --git a/docs/_static/decision_tree_kundu.tex b/docs/_static/decision_tree_kundu.tex index e2192171c..72b2052bb 100644 --- a/docs/_static/decision_tree_kundu.tex +++ b/docs/_static/decision_tree_kundu.tex @@ -6,7 +6,7 @@ \pagecolor{white} %-------------------------defining colorblind friendly colors -% Using pale color scheme in Figure 6 +% Using pale color scheme in Figure 6 % by Paul Tol https://personal.sron.nl/~pault/ \definecolor{cbblue}{HTML}{BBCCEE} \definecolor{cbcyan}{HTML}{CCEEFF} @@ -15,7 +15,7 @@ \definecolor{cbred}{HTML}{FFCCCC} \definecolor{cbgrey}{HTML}{DDDDDD} -% -------------------------defining nodes +% -------------------------defining nodes \tikzstyle{input} = [trapezium, trapezium left angle =80, trapezium right angle = 100, minimum width= 3cm, minimum height=0.5cm, text centered, draw=black, fill=cbblue] \tikzstyle{process} = [rectangle, minimum width = 3cm, minimum height = 1cm, diff --git a/docs/_static/decision_tree_legend.tex b/docs/_static/decision_tree_legend.tex index 0fda0384f..e5b9a2edd 100644 --- a/docs/_static/decision_tree_legend.tex +++ b/docs/_static/decision_tree_legend.tex @@ -6,7 +6,7 @@ \pagecolor{white} %-------------------------defining colorblind friendly colors -% Using pale color scheme in Figure 6 +% Using pale color scheme in Figure 6 % by Paul Tol https://personal.sron.nl/~pault/ \definecolor{cbblue}{HTML}{BBCCEE} \definecolor{cbcyan}{HTML}{CCEEFF} @@ -15,7 +15,7 @@ \definecolor{cbred}{HTML}{FFCCCC} \definecolor{cbgrey}{HTML}{DDDDDD} -% -------------------------defining nodes +% -------------------------defining nodes \tikzstyle{input} = [trapezium, trapezium left angle =80, trapezium right angle = 100, minimum width= 3cm, minimum height=0.5cm, text centered, draw=black, fill=cbblue] \tikzstyle{process} = [rectangle, minimum width = 3cm, minimum height = 0cm, @@ -54,4 +54,4 @@ \end{tikzpicture} -\end{document} \ No newline at end of file +\end{document} diff --git a/docs/_static/decision_tree_minimal.tex b/docs/_static/decision_tree_minimal.tex index f8b922e68..623201165 100644 --- a/docs/_static/decision_tree_minimal.tex +++ b/docs/_static/decision_tree_minimal.tex @@ -6,7 +6,7 @@ \pagecolor{white} %-------------------------defining colorblind friendly colors -% Using pale color scheme in Figure 6 +% Using pale color scheme in Figure 6 % by Paul Tol https://personal.sron.nl/~pault/ \definecolor{cbblue}{HTML}{BBCCEE} \definecolor{cbcyan}{HTML}{CCEEFF} @@ -15,7 +15,7 @@ \definecolor{cbred}{HTML}{FFCCCC} \definecolor{cbgrey}{HTML}{DDDDDD} -% -------------------------defining nodes +% -------------------------defining nodes \tikzstyle{input} = [trapezium, trapezium left angle =80, trapezium right angle = 100, minimum width= 3cm, minimum height=0.5cm, text centered, draw=black, fill=cbblue] \tikzstyle{process} = [rectangle, minimum width = 3cm, minimum height = 1cm, diff --git a/docs/building_decision_trees.rst b/docs/building_decision_trees.rst index f90b7c828..bcfe913aa 100644 --- a/docs/building_decision_trees.rst +++ b/docs/building_decision_trees.rst @@ -205,7 +205,7 @@ that is used to check whether results are plausible & can help avoid mistakes. A narrative description of the tree that could be used in report logging. This should include any citations, which must be included in the `references BibTeX file`_. - + .. _references BibTeX file: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/references.bib - necessary_metrics diff --git a/docs/included_decision_trees.rst b/docs/included_decision_trees.rst index 156404bcd..262f4fe31 100644 --- a/docs/included_decision_trees.rst +++ b/docs/included_decision_trees.rst @@ -28,7 +28,7 @@ a component was given a specific classification. Each step in the flow chart is labeled with a ``node`` number. If ``tedana`` is run using one of these trees, those node numbers will match the numbers in the ``ICA status table`` and the -``ICA decision tree`` that are described in +``ICA decision tree`` that are described in :ref:`output-filename-descriptions`. These node numbers can be used to see when in the process a component's classifiation changed. diff --git a/docs/multi-echo.rst b/docs/multi-echo.rst index 9e78ebadf..cd91f20cb 100644 --- a/docs/multi-echo.rst +++ b/docs/multi-echo.rst @@ -546,7 +546,7 @@ For more details, see the `fmriprep workflows page`_ and :ref:`collecting fMRIPr .. _fmriprep workflows page: https://fmriprep.readthedocs.io/en/stable/workflows.html -`fmrwhy`_ runs BIDS-compatible fMRI analysis with SPM12 and supports multi-echo data, +`fmrwhy`_ runs BIDS-compatible fMRI analysis with SPM12 and supports multi-echo data, but it is no longer being actively maintained. .. _fmrwhy: https://fmrwhy.readthedocs.io diff --git a/docs/outputs.rst b/docs/outputs.rst index 53716f79d..517a428ed 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -23,7 +23,7 @@ future processing. ``tedana`` allows for multiple file naming conventions. The k and naming options for each convention that can be set using the ``--convention`` option are in `outputs.json`_. The output of ``tedana`` also includes a file called ``registry.json`` or ``desc-tedana_registry.json`` that includes the keys and the matching -file names for the output. The table below lists both these keys and the default +file names for the output. The table below lists both these keys and the default "BIDS Derivatives" file names. .. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json @@ -399,7 +399,7 @@ component (selected in the summary view, see below). It includes three different (selected in the summary view). The x-axis represents time (in units of TR and seconds), and the y-axis represents signal levels (in arbitrary units). Finally, the color of the trace informs us about the component classification status. - Plausibly BOLD-weighted components might have responses that follow the task design, + Plausibly BOLD-weighted components might have responses that follow the task design, while components that are less likely to be BOLD-weighted might have large signal spikes or slow drifts. If a high variance component time series initially has a few very high magnitude volumes, that is a sign non-steady state volumes were not removed diff --git a/docs/zenodo.js b/docs/zenodo.js index 9ef762c5d..ea368acae 100644 --- a/docs/zenodo.js +++ b/docs/zenodo.js @@ -23,7 +23,7 @@ String.prototype.endsWith = function(suffix) { }; function getZenodoIDFromTag(conceptRecID, tagName, callback) { - getContent('https://zenodo.org/api/records/?q=conceptrecid:' + conceptRecID + '%20AND%20related.identifier:*github*' + tagName + '&all_versions&sort=-version', + getContent('https://zenodo.org/api/records/?q=conceptrecid:' + conceptRecID + '%20AND%20related.identifier:*github*' + tagName + '&all_versions&sort=-version', 'application/json', function(err, data) { if (err !== null) { @@ -42,7 +42,7 @@ function getZenodoIDFromTag(conceptRecID, tagName, callback) { } function getLatestIDFromconceptID(conceptRecID, callback) { - getContent('https://zenodo.org/api/records/' + conceptRecID, + getContent('https://zenodo.org/api/records/' + conceptRecID, 'application/json', function(err, data) { if (err !== null) { diff --git a/pyproject.toml b/pyproject.toml index b53abf3be..92798da19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,9 +51,14 @@ tests = [ "codecov", "coverage", "flake8>=3.7", + "flake8-absolute-import", "flake8-black", + "flake8-docstrings", "flake8-isort", "flake8-pyproject", + "flake8-unused-arguments", + "flake8-use-fstring", + "pep8-naming", "pytest", "pytest-cov>=4.0.0", "requests", @@ -130,9 +135,15 @@ exclude = [ ignore = ["E203", "E402", "W503"] per-file-ignores = [ "*/__init__.py: F401", + "tedana/tests/*: D", ] docstring-convention = "numpy" +[tool.pydocstringformatter] +write = true +strip-whitespaces = true +max-summary-lines = 1 + [tool.coverage.run] branch = true omit = [ diff --git a/tedana/__init__.py b/tedana/__init__.py index cec68b396..b28484ada 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""tedana: A Python package for TE-dependent analysis of multi-echo data.""" +"""Tedana: A Python package for TE-dependent analysis of multi-echo data.""" import warnings from tedana.__about__ import __copyright__, __credits__, __packagename__, __version__ diff --git a/tedana/bibtex.py b/tedana/bibtex.py index 4734bbfc7..b884bde4f 100644 --- a/tedana/bibtex.py +++ b/tedana/bibtex.py @@ -1,6 +1,4 @@ -""" -Utilities for tedana package -""" +"""Utilities for managing the tedana bibliography.""" import logging import os.path as op import re @@ -27,7 +25,7 @@ def find_braces(string): Returns ------- - :obj:`list` of :obj:`tuple` of :obj:`int` + : obj:`list` of :obj:`tuple` of :obj:`int` A list of two-element tuples of indices of matched braces. """ toret = {} diff --git a/tedana/combine.py b/tedana/combine.py index 449491e56..dff6741ab 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -1,6 +1,4 @@ -""" -Functions to optimally combine data across echoes. -""" +"""Functions to optimally combine data across echoes.""" import logging import numpy as np @@ -10,9 +8,9 @@ def _combine_t2s(data, tes, ft2s, report=True): - """ - Combine data across echoes using weighted averaging according to voxel- - (and sometimes volume-) wise estimates of T2*. + """Combine data across echoes using weighted averaging according to estimates of T2*. + + The T2* estimates may be voxel- or voxel- and volume-wise. This method was proposed in :footcite:t:`posse1999enhancement`. @@ -61,12 +59,10 @@ def _combine_t2s(data, tes, ft2s, report=True): def _combine_paid(data, tes, report=True): - """ - Combine data across echoes using SNR/signal and TE via the - parallel-acquired inhomogeneity desensitized (PAID) ME-fMRI combination - method. + """Combine data across echoes using the PAID combination method. - This method was first proposed in :footcite:t:`poser2006bold`. + This method uses SNR/signal and TE via the parallel-acquired inhomogeneity desensitized (PAID) + ME-fMRI combination method :footcite:t:`poser2006bold`. Parameters ---------- @@ -101,8 +97,9 @@ def _combine_paid(data, tes, report=True): return combined -def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True): - """ +def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s"): + r"""Optimally combine BOLD data across TEs. + Optimally combine BOLD data across TEs, using only those echos with reliable signal across at least three echos. If the number of echos providing reliable signal is greater than three but less than the total number of collected echos, we assume that later @@ -125,8 +122,6 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True combmode : {'t2s', 'paid'}, optional How to combine data. Either 'paid' or 't2s'. If 'paid', argument 't2s' is not required. Default is 't2s'. - verbose : :obj:`bool`, optional - Whether to print status updates. Default is True. Returns ------- @@ -142,8 +137,8 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True 1. Estimate voxel- and TE-specific weights based on estimated :math:`T_2^*`: .. math:: - w(T_2^*)_n = \\frac{TE_n * exp(\\frac{-TE}\ - {T_{2(est)}^*})}{\\sum TE_n * exp(\\frac{-TE}{T_{2(est)}^*})} + w(T_2^*)_n = \frac{TE_n * exp(\frac{-TE}\ + {T_{2(est)}^*})}{\sum TE_n * exp(\frac{-TE}{T_{2(est)}^*})} 2. Perform weighted average per voxel and TR across TEs based on weights estimated in the previous step. @@ -162,8 +157,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True if len(tes) != data.shape[1]: raise ValueError( "Number of echos provided does not match second " - "dimension of input data: {0} != " - "{1}".format(len(tes), data.shape[1]) + f"dimension of input data: {len(tes)} != {data.shape[1]}" ) if adaptive_mask.ndim != 1: @@ -171,7 +165,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True elif adaptive_mask.shape[0] != data.shape[0]: raise ValueError( "Mask and data do not have same number of " - "voxels/samples: {0} != {1}".format(adaptive_mask.shape[0], data.shape[0]) + f"voxels/samples: {adaptive_mask.shape[0]} != {data.shape[0]}" ) if combmode not in ["t2s", "paid"]: diff --git a/tedana/decay.py b/tedana/decay.py index 92677ed8d..18b9b766e 100644 --- a/tedana/decay.py +++ b/tedana/decay.py @@ -1,6 +1,4 @@ -""" -Functions to estimate S0 and T2* from multi-echo data. -""" +"""Functions to estimate S0 and T2* from multi-echo data.""" import logging import numpy as np @@ -14,9 +12,7 @@ def _apply_t2s_floor(t2s, echo_times): - """ - Apply a floor to T2* values to prevent zero division errors during - optimal combination. + """Apply a floor to T2* values to prevent zero division errors during optimal combination. Parameters ---------- @@ -47,17 +43,15 @@ def _apply_t2s_floor(t2s, echo_times): n_voxels = temp_arr.size floor_percent = 100 * n_bad_voxels / n_voxels LGR.debug( - "T2* values for {0}/{1} voxels ({2:.2f}%) have been " - "identified as close to zero and have been " - "adjusted".format(n_bad_voxels, n_voxels, floor_percent) + f"T2* values for {n_bad_voxels}/{n_voxels} voxels ({floor_percent:.2f}%) have been " + "identified as close to zero and have been adjusted" ) t2s_corrected[bad_voxel_idx] = np.min(-echo_times) / np.log(eps) return t2s_corrected def monoexponential(tes, s0, t2star): - """ - Specifies a monoexponential model for use with scipy curve fitting + """Specify a monoexponential model for use with scipy curve fitting. Parameters ---------- @@ -70,15 +64,14 @@ def monoexponential(tes, s0, t2star): Returns ------- - :obj:`float` + : obj:`float` Predicted signal """ return s0 * np.exp(-tes / t2star) def fit_monoexponential(data_cat, echo_times, adaptive_mask, report=True): - """ - Fit monoexponential decay model with nonlinear curve-fitting. + """Fit monoexponential decay model with nonlinear curve-fitting. Parameters ---------- @@ -99,14 +92,14 @@ def fit_monoexponential(data_cat, echo_times, adaptive_mask, report=True): t2s_limited, s0_limited, t2s_full, s0_full : (S,) :obj:`numpy.ndarray` T2* and S0 estimate maps. + See Also + -------- + : func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` + parameter. + Notes ----- This method is slower, but more accurate, than the log-linear approach. - - See Also - -------- - :func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` - parameter. """ if report: RepLGR.info( @@ -175,9 +168,9 @@ def fit_monoexponential(data_cat, echo_times, adaptive_mask, report=True): if fail_count: fail_percent = 100 * fail_count / len(voxel_idx) LGR.debug( - "With {0} echoes, monoexponential fit failed on {1}/{2} " - "({3:.2f}%) voxel(s), used log linear estimate " - "instead".format(echo_num, fail_count, len(voxel_idx), fail_percent) + f"With {echo_num} echoes, monoexponential fit failed on " + f"{fail_count}/{len(voxel_idx)} ({fail_percent:.2f}%) voxel(s), " + "used log linear estimate instead" ) t2s_asc_maps[:, i_echo] = t2s_full @@ -221,7 +214,7 @@ def fit_loglinear(data_cat, echo_times, adaptive_mask, report=True): Returns ------- - t2s_limited, s0_limited, t2s_full, s0_full: (S,) :obj:`numpy.ndarray` + t2s_limited, s0_limited, t2s_full, s0_full : (S,) :obj:`numpy.ndarray` T2* and S0 estimate maps. Notes @@ -275,10 +268,10 @@ def fit_loglinear(data_cat, echo_times, adaptive_mask, report=True): # make IV matrix: intercept/TEs x (time series * echos) x = np.column_stack([np.ones(echo_num), [-te for te in echo_times[:echo_num]]]) - X = np.repeat(x, n_vols, axis=0) + iv_arr = np.repeat(x, n_vols, axis=0) # Log-linear fit - betas = np.linalg.lstsq(X, log_data, rcond=None)[0] + betas = np.linalg.lstsq(iv_arr, log_data, rcond=None)[0] t2s = 1.0 / betas[1, :].T s0 = np.exp(betas[0, :]).T @@ -298,8 +291,7 @@ def fit_loglinear(data_cat, echo_times, adaptive_mask, report=True): def fit_decay(data, tes, mask, adaptive_mask, fittype, report=True): - """ - Fit voxel-wise monoexponential decay models to `data` + """Fit voxel-wise monoexponential decay models to ``data``. Parameters ---------- @@ -338,6 +330,11 @@ def fit_decay(data, tes, mask, adaptive_mask, fittype, report=True): only one echo, the full map uses the S0 estimate from the first two echoes. + See Also + -------- + : func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` + parameter. + Notes ----- This function replaces infinite values in the :math:`T_2^*` map with 500 and @@ -345,22 +342,16 @@ def fit_decay(data, tes, mask, adaptive_mask, fittype, report=True): Additionally, very small :math:`T_2^*` values above zero are replaced with a floor value to prevent zero-division errors later on in the workflow. It also replaces NaN values in the :math:`S_0` map with 0. - - See Also - -------- - :func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` - parameter. """ if data.shape[1] != len(tes): raise ValueError( - "Second dimension of data ({0}) does not match number " - "of echoes provided (tes; {1})".format(data.shape[1], len(tes)) + f"Second dimension of data ({data.shape[1]}) does not match number " + f"of echoes provided (tes; {len(tes)})" ) elif not (data.shape[0] == mask.shape[0] == adaptive_mask.shape[0]): raise ValueError( - "First dimensions (number of samples) of data ({0}), " - "mask ({1}), and adaptive_mask ({2}) do not " - "match".format(data.shape[0], mask.shape[0], adaptive_mask.shape[0]) + f"First dimensions (number of samples) of data ({data.shape[0]}), " + f"mask ({mask.shape[0]}), and adaptive_mask ({adaptive_mask.shape[0]}) do not match" ) data = data.copy() @@ -380,7 +371,7 @@ def fit_decay(data, tes, mask, adaptive_mask, fittype, report=True): data_masked, tes, adaptive_mask_masked, report=report ) else: - raise ValueError("Unknown fittype option: {}".format(fittype)) + raise ValueError(f"Unknown fittype option: {fittype}") t2s_limited[np.isinf(t2s_limited)] = 500.0 # why 500? # let's get rid of negative values, but keep zeros where limited != full @@ -400,15 +391,14 @@ def fit_decay(data, tes, mask, adaptive_mask, fittype, report=True): # set a hard cap for the T2* map # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile cap_t2s = stats.scoreatpercentile(t2s_limited.flatten(), 99.5, interpolation_method="lower") - LGR.debug("Setting cap on T2* map at {:.5f}".format(cap_t2s * 10)) + LGR.debug(f"Setting cap on T2* map at {cap_t2s * 10:.5f}") t2s_limited[t2s_limited > cap_t2s * 10] = cap_t2s return t2s_limited, s0_limited, t2s_full, s0_full def fit_decay_ts(data, tes, mask, adaptive_mask, fittype): - """ - Fit voxel- and timepoint-wise monoexponential decay models to `data` + """Fit voxel- and timepoint-wise monoexponential decay models to ``data``. Parameters ---------- @@ -447,8 +437,8 @@ def fit_decay_ts(data, tes, mask, adaptive_mask, fittype): See Also -------- - :func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` - parameter. + : func:`tedana.utils.make_adaptive_mask` : The function used to create the ``adaptive_mask`` + parameter. """ n_samples, _, n_vols = data.shape tes = np.array(tes) diff --git a/tedana/decomposition/__init__.py b/tedana/decomposition/__init__.py index dbe480f97..a469d21e1 100644 --- a/tedana/decomposition/__init__.py +++ b/tedana/decomposition/__init__.py @@ -1,7 +1,7 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: - -from .ica import tedica -from .pca import tedpca +"""Functions for decomposing BOLD signals.""" +from tedana.decomposition.ica import tedica +from tedana.decomposition.pca import tedpca __all__ = ["tedpca", "tedica"] diff --git a/tedana/decomposition/_utils.py b/tedana/decomposition/_utils.py deleted file mode 100644 index a6c1adf8d..000000000 --- a/tedana/decomposition/_utils.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Utility functions for tedana decomposition -""" -import logging - -import numpy as np -from scipy import stats - -LGR = logging.getLogger("GENERAL") -RepLGR = logging.getLogger("REPORT") - - -def eimask(dd, ees=None): - """ - Returns mask for data between [0.001, 5] * 98th percentile of dd - - Parameters - ---------- - dd : (S x E x T) array_like - Input data, where `S` is samples, `E` is echos, and `T` is time - ees : (N,) :obj:`list` - Indices of echos to assess from `dd` in calculating output - - Returns - ------- - imask : (S x N) :obj:`numpy.ndarray` - Boolean array denoting - """ - - if ees is None: - ees = range(dd.shape[1]) - imask = np.zeros((dd.shape[0], len(ees)), dtype=bool) - for ee in ees: - if len(ees) == 1: - LGR.debug("Creating eimask for optimal combination") - else: - LGR.debug("Creating eimask for echo {}".format(ee)) - perc98 = stats.scoreatpercentile(dd[:, ee, :].flatten(), 98, interpolation_method="lower") - lthr, hthr = 0.001 * perc98, 5 * perc98 - LGR.debug("Eimask threshold boundaries: {:.03f} {:.03f}".format(lthr, hthr)) - m = dd[:, ee, :].mean(axis=1) - imask[np.logical_and(m > lthr, m < hthr), ee] = True - - return imask diff --git a/tedana/decomposition/ica.py b/tedana/decomposition/ica.py index ff69626a7..0365f81b8 100644 --- a/tedana/decomposition/ica.py +++ b/tedana/decomposition/ica.py @@ -1,6 +1,4 @@ -""" -ICA and related signal decomposition methods for tedana -""" +"""ICA and related signal decomposition methods for tedana.""" import logging import warnings @@ -13,8 +11,7 @@ def tedica(data, n_components, fixed_seed, maxit=500, maxrestart=10): - """ - Perform ICA on `data` and returns mixing matrix + """Perform ICA on ``data`` and return mixing matrix. Parameters ---------- @@ -72,16 +69,15 @@ def tedica(data, n_components, fixed_seed, maxit=500, maxrestart=10): w = list(filter(lambda i: issubclass(i.category, UserWarning), w)) if len(w): LGR.warning( - "ICA with random seed {0} failed to converge after {1} " - "iterations".format(fixed_seed, ica.n_iter_) + f"ICA with random seed {fixed_seed} failed to converge after {ica.n_iter_} " + "iterations" ) if i_attempt < maxrestart - 1: fixed_seed += 1 - LGR.warning("Random seed updated to {0}".format(fixed_seed)) + LGR.warning(f"Random seed updated to {fixed_seed}") else: LGR.info( - "ICA with random seed {0} converged in {1} " - "iterations".format(fixed_seed, ica.n_iter_) + f"ICA with random seed {fixed_seed} converged in {ica.n_iter_} iterations" ) break diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index c95afaa52..2847a7424 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -1,6 +1,4 @@ -""" -PCA and related signal decomposition methods for tedana -""" +"""PCA and related signal decomposition methods for tedana.""" import logging from numbers import Number @@ -20,8 +18,7 @@ def low_mem_pca(data): - """ - Run Singular Value Decomposition (SVD) on input data. + """Run Singular Value Decomposition (SVD) on input data. Parameters ---------- @@ -53,21 +50,16 @@ def low_mem_pca(data): def tedpca( data_cat, data_oc, - combmode, mask, adaptive_mask, - t2sG, io_generator, tes, algorithm="aic", kdaw=10.0, rdaw=1.0, - verbose=False, low_mem=False, ): - """ - Use principal components analysis (PCA) to identify and remove thermal - noise from multi-echo data. + r"""Use principal components analysis (PCA) to identify and remove thermal noise from data. Parameters ---------- @@ -75,10 +67,6 @@ def tedpca( Input functional data data_oc : (S x T) array_like Optimally combined time series data - combmode : {'t2s', 'paid'} str - How optimal combination of echos should be made, where 't2s' indicates - using the method of Posse 1999 and 'paid' indicates using the method of - Poser 2006 mask : (S,) array_like Boolean mask array adaptive_mask : (S,) array_like @@ -86,8 +74,6 @@ def tedpca( for that voxel. This mask may be thresholded; for example, with values less than 3 set to 0. For more information on thresholding, see `make_adaptive_mask`. - t2sG : (S,) array_like - Map of voxel-wise T2* estimates. io_generator : :obj:`tedana.io.OutputGenerator` The output generation object for this workflow tes : :obj:`list` @@ -108,8 +94,6 @@ def tedpca( rdaw : :obj:`float`, optional Dimensionality augmentation weight for Rho calculations when `algorithm` is 'kundu'. Must be a non-negative float, or -1 (a special value). Default is 1. - verbose : :obj:`bool`, optional - Whether to output files from fitmodels_direct or not. Default: False low_mem : :obj:`bool`, optional Whether to use incremental PCA (for low-memory systems) or not. This is only compatible with the "kundu" or "kundu-stabilize" algorithms. @@ -127,13 +111,13 @@ def tedpca( ====================== ================================================= Notation Meaning ====================== ================================================= - :math:`\\kappa` Component pseudo-F statistic for TE-dependent + :math:`\kappa` Component pseudo-F statistic for TE-dependent (BOLD) model. - :math:`\\rho` Component pseudo-F statistic for TE-independent + :math:`\rho` Component pseudo-F statistic for TE-independent (artifact) model. :math:`v` Voxel :math:`V` Total number of voxels in mask - :math:`\\zeta` Something + :math:`\zeta` Something :math:`c` Component :math:`p` Something else ====================== ================================================= @@ -143,22 +127,21 @@ def tedpca( 1. Variance normalize either multi-echo or optimally combined data, depending on settings. 2. Decompose normalized data using PCA or SVD. - 3. Compute :math:`{\\kappa}` and :math:`{\\rho}`: + 3. Compute :math:`{\kappa}` and :math:`{\rho}`: .. math:: - {\\kappa}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \ - F_{c,v,R_2^*}}{\\sum {\\zeta}_{c,v}^p} + {\kappa}_c = \frac{\sum_{v}^V {\zeta}_{c,v}^p * \ + F_{c,v,R_2^*}}{\sum {\zeta}_{c,v}^p} - {\\rho}_c = \\frac{\\sum_{v}^V {\\zeta}_{c,v}^p * \ - F_{c,v,S_0}}{\\sum {\\zeta}_{c,v}^p} + {\rho}_c = \frac{\sum_{v}^V {\zeta}_{c,v}^p * \ + F_{c,v,S_0}}{\sum {\zeta}_{c,v}^p} 4. Some other stuff. Something about elbows. 5. Classify components as thermal noise if they meet both of the following criteria: - - Nonsignificant :math:`{\\kappa}` and :math:`{\\rho}`. + - Nonsignificant :math:`{\kappa}` and :math:`{\rho}`. - Nonsignificant variance explained. - Generated Files --------------- diff --git a/tedana/docs.py b/tedana/docs.py index 8154d716a..515f1c4f7 100644 --- a/tedana/docs.py +++ b/tedana/docs.py @@ -113,7 +113,6 @@ def _indentcount_lines(lines): 1 >>> _indentcount_lines([' ']) 0 - """ indentno = sys.maxsize for line in lines: @@ -137,7 +136,6 @@ def fill_doc(f): ------- f : callable The function, potentially with an updated ``__doc__``. - """ docstring = f.__doc__ if not docstring: diff --git a/tedana/gscontrol.py b/tedana/gscontrol.py index 61e8cedc5..d365e2bd7 100644 --- a/tedana/gscontrol.py +++ b/tedana/gscontrol.py @@ -1,6 +1,4 @@ -""" -Global signal control methods -""" +"""Global signal control methods.""" import logging import numpy as np @@ -15,8 +13,7 @@ def gscontrol_raw(catd, optcom, n_echos, io_generator, dtrank=4): - """ - Removes global signal from individual echo `catd` and `optcom` time series + """Remove global signal from individual echo ``catd`` and ``optcom`` time series. This function uses the spatial global signal estimation approach to to removal global signal out of individual echo time series datasets. The @@ -52,37 +49,35 @@ def gscontrol_raw(catd, optcom, n_echos, io_generator, dtrank=4): ) if catd.shape[0] != optcom.shape[0]: raise ValueError( - "First dimensions of catd ({0}) and optcom ({1}) do not " - "match".format(catd.shape[0], optcom.shape[0]) + f"First dimensions of catd ({catd.shape[0]}) and optcom ({optcom.shape[0]}) do not " + "match" ) elif catd.shape[1] != n_echos: raise ValueError( - "Second dimension of catd ({0}) does not match " - "n_echos ({1})".format(catd.shape[1], n_echos) + f"Second dimension of catd ({catd.shape[1]}) does not match n_echos ({n_echos})" ) elif catd.shape[2] != optcom.shape[1]: raise ValueError( - "Third dimension of catd ({0}) does not match " - "second dimension of optcom " - "({1})".format(catd.shape[2], optcom.shape[1]) + f"Third dimension of catd ({catd.shape[2]}) does not match second dimension of optcom " + f"({optcom.shape[1]})" ) # Legendre polynomial basis for denoising bounds = np.linspace(-1, 1, optcom.shape[-1]) - Lmix = np.column_stack([lpmv(0, vv, bounds) for vv in range(dtrank)]) + legendre_arr = np.column_stack([lpmv(0, vv, bounds) for vv in range(dtrank)]) # compute mean, std, mask local to this function # inefficient, but makes this function a bit more modular - Gmu = optcom.mean(axis=-1) # temporal mean - Gmask = Gmu != 0 + temporal_mean = optcom.mean(axis=-1) # temporal mean + temporal_mean_mask = temporal_mean != 0 # find spatial global signal - dat = optcom[Gmask] - Gmu[Gmask][:, np.newaxis] - sol = np.linalg.lstsq(Lmix, dat.T, rcond=None)[0] # Legendre basis for detrending - detr = dat - np.dot(sol.T, Lmix.T)[0] + dat = optcom[temporal_mean_mask] - temporal_mean[temporal_mean_mask][:, np.newaxis] + sol = np.linalg.lstsq(legendre_arr, dat.T, rcond=None)[0] # Legendre basis for detrending + detr = dat - np.dot(sol.T, legendre_arr.T)[0] sphis = (detr).min(axis=1) sphis -= sphis.mean() - io_generator.save_file(utils.unmask(sphis, Gmask), "gs img") + io_generator.save_file(utils.unmask(sphis, temporal_mean_mask), "gs img") # find time course ofc the spatial global signal # make basis with the Legendre basis @@ -91,35 +86,33 @@ def gscontrol_raw(catd, optcom, n_echos, io_generator, dtrank=4): glsig_df = pd.DataFrame(data=glsig.T, columns=["global_signal"]) io_generator.save_file(glsig_df, "global signal time series tsv") - glbase = np.hstack([Lmix, glsig.T]) + glbase = np.hstack([legendre_arr, glsig.T]) # Project global signal out of optimally combined data sol = np.linalg.lstsq(np.atleast_2d(glbase), dat.T, rcond=None)[0] tsoc_nogs = ( dat - np.dot(np.atleast_2d(sol[dtrank]).T, np.atleast_2d(glbase.T[dtrank])) - + Gmu[Gmask][:, np.newaxis] + + temporal_mean[temporal_mean_mask][:, np.newaxis] ) io_generator.save_file(optcom, "has gs combined img") - dm_optcom = utils.unmask(tsoc_nogs, Gmask) + dm_optcom = utils.unmask(tsoc_nogs, temporal_mean_mask) io_generator.save_file(dm_optcom, "removed gs combined img") # Project glbase out of each echo dm_catd = catd.copy() # don't overwrite catd for echo in range(n_echos): - dat = dm_catd[:, echo, :][Gmask] + dat = dm_catd[:, echo, :][temporal_mean_mask] sol = np.linalg.lstsq(np.atleast_2d(glbase), dat.T, rcond=None)[0] e_nogs = dat - np.dot(np.atleast_2d(sol[dtrank]).T, np.atleast_2d(glbase.T[dtrank])) - dm_catd[:, echo, :] = utils.unmask(e_nogs, Gmask) + dm_catd[:, echo, :] = utils.unmask(e_nogs, temporal_mean_mask) return dm_catd, dm_optcom def minimum_image_regression(optcom_ts, mmix, mask, comptable, io_generator): - """ - Perform minimum image regression (MIR) to remove T1-like effects from - BOLD-like components. + """Perform minimum image regression (MIR) to remove T1-like effects from BOLD-like components. While this method has not yet been described in detail in any publications, we recommend that users cite :footcite:t:`kundu2013integrated`. @@ -201,28 +194,28 @@ def minimum_image_regression(optcom_ts, mmix, mask, comptable, io_generator): glob_sig = np.linalg.lstsq(t1_map, optcom_z, rcond=None)[0] # Remove T1-like global signal from MEHK time series - mehk_noT1gs = mehk_ts - np.dot( + mehk_no_t1_gs = mehk_ts - np.dot( np.linalg.lstsq(glob_sig.T, mehk_ts.T, rcond=None)[0].T, glob_sig ) - hik_ts = mehk_noT1gs * optcom_std # rescale + hik_ts = mehk_no_t1_gs * optcom_std # rescale io_generator.save_file(utils.unmask(hik_ts, mask), "ICA accepted mir denoised img") # Make denoised version of T1-corrected time series - medn_ts = optcom_mean + ((mehk_noT1gs + resid) * optcom_std) + medn_ts = optcom_mean + ((mehk_no_t1_gs + resid) * optcom_std) io_generator.save_file(utils.unmask(medn_ts, mask), "mir denoised img") # Orthogonalize mixing matrix w.r.t. T1-GS - mmix_noT1gs = mmix.T - np.dot(np.linalg.lstsq(glob_sig.T, mmix, rcond=None)[0].T, glob_sig) - mmix_noT1gs_z = stats.zscore(mmix_noT1gs, axis=-1) - mmix_noT1gs_z = np.vstack( - (np.atleast_2d(np.ones(max(glob_sig.shape))), glob_sig, mmix_noT1gs_z) + mmix_no_t1_gs = mmix.T - np.dot(np.linalg.lstsq(glob_sig.T, mmix, rcond=None)[0].T, glob_sig) + mmix_no_t1_gs_z = stats.zscore(mmix_no_t1_gs, axis=-1) + mmix_no_t1_gs_z = np.vstack( + (np.atleast_2d(np.ones(max(glob_sig.shape))), glob_sig, mmix_no_t1_gs_z) ) # Write T1-corrected components and mixing matrix - comp_pes_norm = np.linalg.lstsq(mmix_noT1gs_z.T, optcom_z.T, rcond=None)[0].T + comp_pes_norm = np.linalg.lstsq(mmix_no_t1_gs_z.T, optcom_z.T, rcond=None)[0].T io_generator.save_file( utils.unmask(comp_pes_norm[:, 2:], mask), "ICA accepted mir component weights img", ) - mixing_df = pd.DataFrame(data=mmix_noT1gs.T, columns=comptable["Component"].values) + mixing_df = pd.DataFrame(data=mmix_no_t1_gs.T, columns=comptable["Component"].values) io_generator.save_file(mixing_df, "ICA MIR mixing tsv") diff --git a/tedana/io.py b/tedana/io.py index 0d99fc9b7..d4342fe04 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -1,4 +1,4 @@ -"""The io module handles most file input and output in the `tedana` workflow. +"""Handle most file input and output in the `tedana` workflow. Other functions in the module help write outputs which require multiple data sources, assist in writing per-echo verbose outputs, or act as helper @@ -39,6 +39,7 @@ class CustomEncoder(json.JSONEncoder): """ def default(self, obj): + """Return the default encoder for CustomEncoder.""" # int64 non-serializable but is a numpy output if isinstance(obj, np.int32) or isinstance(obj, np.int64): return int(obj) @@ -345,6 +346,13 @@ def save_tsv(self, data, name): deblanked.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) def save_self(self): + """Save the registry to a json file. + + Returns + ------- + fname + Full file path for output file. + """ fname = self.save_file(self.registry, "registry json") return fname @@ -364,6 +372,13 @@ def __init__(self, path): self._registry = load_json(self._full_path) def get_file_path(self, description): + """Get file path. + + Parameters + ---------- + description : str + Description of the file to get the path for. + """ if description in self._registry.keys(): return op.join(self._base_dir, self._registry[description]) else: @@ -371,6 +386,7 @@ def get_file_path(self, description): def get_file_contents(self, description): """Get file contents. + Notes ----- Since we restrict to just these three types, this function should always return. @@ -390,6 +406,18 @@ def registry(self): def versiontuple(v): + """Convert a version string into a tuple of ints. + + Parameters + ---------- + v : str + Version string to convert. + + Returns + ------- + tuple + Tuple of ints corresponding to the version string. + """ return tuple(map(int, (v.split(".")))) @@ -413,7 +441,7 @@ def load_json(path: str) -> dict: Parameters ---------- - path: str + path : str The path to the json file to load Returns @@ -455,8 +483,8 @@ def add_decomp_prefix(comp_num, prefix, max_value): Component name in the form _ """ n_digits = int(np.log10(max_value)) + 1 - comp_name = "{0:08d}".format(int(comp_num)) - comp_name = "{0}_{1}".format(prefix, comp_name[8 - n_digits :]) + comp_name = f"{int(comp_num):08d}" + comp_name = f"{prefix}_{comp_name[8 - n_digits :]}" return comp_name @@ -495,7 +523,7 @@ def denoise_ts(data, mmix, mask, comptable): # get variance explained by retained components betas = get_coeffs(dmdata.T, mmix, mask=None) varexpl = (1 - ((dmdata.T - betas.dot(mmix.T)) ** 2.0).sum() / (dmdata**2.0).sum()) * 100 - LGR.info("Variance explained by decomposition: {:.02f}%".format(varexpl)) + LGR.info(f"Variance explained by decomposition: {varexpl:.02f}%") # create component-based data hikts = utils.unmask(betas[:, acc].dot(mmix.T[acc, :]), mask) @@ -521,7 +549,7 @@ def write_split_ts(data, mmix, mask, comptable, io_generator, echo=0): Reference image to dictate how outputs are saved to disk out_dir : :obj:`str`, optional Output directory. - echo: :obj: `int`, optional + echo : :obj: `int`, optional Echo number to generate filenames, used by some verbose functions. Default 0. @@ -551,24 +579,24 @@ def write_split_ts(data, mmix, mask, comptable, io_generator, echo=0): fout = io_generator.save_file(hikts, "high kappa ts split img", echo=echo) else: fout = io_generator.save_file(hikts, "high kappa ts img") - LGR.info("Writing high-Kappa time series: {}".format(fout)) + LGR.info(f"Writing high-Kappa time series: {fout}") if len(rej) != 0: if echo != 0: fout = io_generator.save_file(lowkts, "low kappa ts split img", echo=echo) else: fout = io_generator.save_file(lowkts, "low kappa ts img") - LGR.info("Writing low-Kappa time series: {}".format(fout)) + LGR.info(f"Writing low-Kappa time series: {fout}") if echo != 0: fout = io_generator.save_file(dnts, "denoised ts split img", echo=echo) else: fout = io_generator.save_file(dnts, "denoised ts img") - LGR.info("Writing denoised time series: {}".format(fout)) + LGR.info(f"Writing denoised time series: {fout}") -def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): +def writeresults(ts, mask, comptable, mmix, io_generator): """Denoise `ts` and save all resulting files to disk. Parameters @@ -584,11 +612,13 @@ def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): mmix : (C x T) array_like Mixing matrix for converting input data to component space, where `C` is components and `T` is the same as in `data` - n_vols : :obj:`int` - Number of volumes in original time series ref_img : :obj:`str` or img_like Reference image to dictate how outputs are saved to disk + See Also + -------- + tedana.io.write_split_ts : Writes out time series files + Generated Files --------------- @@ -605,27 +635,23 @@ def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): desc-ICAAccepted_stat-z_components.nii.gz Z-normalized spatial component maps for accepted components. ========================================= ===================================== - - See Also - -------- - tedana.io.write_split_ts: Writes out time series files """ acc = comptable[comptable.classification == "accepted"].index.values write_split_ts(ts, mmix, mask, comptable, io_generator) - ts_B = get_coeffs(ts, mmix, mask) - fout = io_generator.save_file(ts_B, "ICA components img") - LGR.info("Writing full ICA coefficient feature set: {}".format(fout)) + ts_pes = get_coeffs(ts, mmix, mask) + fout = io_generator.save_file(ts_pes, "ICA components img") + LGR.info(f"Writing full ICA coefficient feature set: {fout}") if len(acc) != 0: - fout = io_generator.save_file(ts_B[:, acc], "ICA accepted components img") - LGR.info("Writing denoised ICA coefficient feature set: {}".format(fout)) + fout = io_generator.save_file(ts_pes[:, acc], "ICA accepted components img") + LGR.info(f"Writing denoised ICA coefficient feature set: {fout}") # write feature versions of components feats = computefeats2(split_ts(ts, mmix, mask, comptable)[0], mmix[:, acc], mask) feats = utils.unmask(feats, mask) fname = io_generator.save_file(feats, "z-scored ICA accepted components img") - LGR.info("Writing Z-normalized spatial component maps: {}".format(fname)) + LGR.info(f"Writing Z-normalized spatial component maps: {fname}") def writeresults_echoes(catd, mmix, mask, comptable, io_generator): @@ -646,6 +672,10 @@ def writeresults_echoes(catd, mmix, mask, comptable, io_generator): ref_img : :obj:`str` or img_like Reference image to dictate how outputs are saved to disk + See Also + -------- + tedana.io.write_split_ts : Writes out the files. + Generated Files --------------- @@ -659,13 +689,9 @@ def writeresults_echoes(catd, mmix, mask, comptable, io_generator): echo-[echo]_desc-Denoised_bold.nii.gz Denoised timeseries for echo number ``echo``. ===================================== =================================== - - See Also - -------- - tedana.io.write_split_ts: Writes out the files. """ for i_echo in range(catd.shape[1]): - LGR.info("Writing Kappa-filtered echo #{:01d} timeseries".format(i_echo + 1)) + LGR.info(f"Writing Kappa-filtered echo #{i_echo + 1:01d} timeseries") write_split_ts(catd[:, i_echo, :], mmix, mask, comptable, io_generator, echo=(i_echo + 1)) @@ -804,15 +830,15 @@ def prep_data_for_json(d) -> dict: d : dict A dictionary that will be converted into something JSON serializable + Returns + ------- + An attempt at JSON serializable data + Raises ------ ValueError if it cannot force the dictionary to be serializable TypeError if you do not supply a dict - Returns - ------- - An attempt at JSON serializable data - Notes ----- Use this to make something serializable when writing JSON to disk. @@ -848,7 +874,7 @@ def str_to_component_list(s: str) -> List[int]: Parameters ---------- - s: str + s : str The string to convert into a list of component indices. Returns @@ -895,7 +921,7 @@ def fname_to_component_list(fname: str) -> List[int]: Parameters ---------- - fname: str + fname : str The name of the file to read the list of component indices from. Returns diff --git a/tedana/metrics/__init__.py b/tedana/metrics/__init__.py index 5e9dadd7b..a2873b93c 100644 --- a/tedana/metrics/__init__.py +++ b/tedana/metrics/__init__.py @@ -2,6 +2,6 @@ # ex: set sts=4 ts=4 sw=4 et: """TE-dependence and TE-independence metrics.""" -from .collect import generate_metrics +from tedana.metrics.collect import generate_metrics __all__ = ["generate_metrics"] diff --git a/tedana/metrics/_utils.py b/tedana/metrics/_utils.py index 299da94d0..699368491 100644 --- a/tedana/metrics/_utils.py +++ b/tedana/metrics/_utils.py @@ -27,13 +27,13 @@ def dependency_resolver(dict_, requested_metrics, base_inputs): Returns ------- - required_metrics :obj:`list` + required_metrics : obj:`list` A comprehensive list of all metrics and inputs required to generate all of the requested inputs. """ not_found = [k for k in requested_metrics if k not in dict_.keys()] if not_found: - raise ValueError("Unknown metric(s): {}".format(", ".join(not_found))) + raise ValueError(f"Unknown metric(s): {', '.join(not_found)}") required_metrics = requested_metrics escape_counter = 0 @@ -43,7 +43,7 @@ def dependency_resolver(dict_, requested_metrics, base_inputs): if k in dict_.keys(): new_metrics = dict_[k] elif k not in base_inputs: - print("Warning: {} not found".format(k)) + print(f"Warning: {k} not found") required_metrics_new += new_metrics if set(required_metrics) == set(required_metrics_new): # There are no more parent metrics to calculate @@ -138,6 +138,5 @@ def check_mask(data, mask): n_bad_voxels = len(zero_idx[0]) if n_bad_voxels > 0: raise ValueError( - "{0} voxels in masked data have zero variance. " - "Mask is too liberal.".format(n_bad_voxels) + f"{n_bad_voxels} voxels in masked data have zero variance. " "Mask is too liberal." ) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index f4c731876..8452d6592 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -61,25 +61,24 @@ def generate_metrics( if metrics is None: metrics = ["map weight"] - RepLGR.info("The following metrics were calculated: {}.".format(", ".join(metrics))) + RepLGR.info(f"The following metrics were calculated: {', '.join(metrics)}.") if not (data_cat.shape[0] == data_optcom.shape[0] == adaptive_mask.shape[0]): raise ValueError( - "First dimensions (number of samples) of data_cat ({0}), " - "data_optcom ({1}), and adaptive_mask ({2}) do not " - "match".format(data_cat.shape[0], data_optcom.shape[0], adaptive_mask.shape[0]) + f"First dimensions (number of samples) of data_cat ({data_cat.shape[0]}), " + f"data_optcom ({data_optcom.shape[0]}), and adaptive_mask " + f"({adaptive_mask.shape[0]}) do not match" ) elif data_cat.shape[1] != len(tes): raise ValueError( - "Second dimension of data_cat ({0}) does not match " - "number of echoes provided (tes; " - "{1})".format(data_cat.shape[1], len(tes)) + f"Second dimension of data_cat ({data_cat.shape[1]}) does not match " + f"number of echoes provided (tes; {len(tes)})" ) elif not (data_cat.shape[2] == data_optcom.shape[1] == mixing.shape[0]): raise ValueError( - "Number of volumes in data_cat ({0}), " - "data_optcom ({1}), and mixing ({2}) do not " - "match.".format(data_cat.shape[2], data_optcom.shape[1], mixing.shape[0]) + f"Number of volumes in data_cat ({data_cat.shape[2]}), " + f"data_optcom ({data_optcom.shape[1]}), and mixing ({mixing.shape[0]}) do not " + "match." ) # Derive mask from thresholded adaptive mask @@ -151,13 +150,13 @@ def generate_metrics( if ("map FT2" in required_metrics) or ("map FS0" in required_metrics): LGR.info("Calculating F-statistic maps") - m_T2, m_S0, p_m_T2, p_m_S0 = dependence.calculate_f_maps( + m_t2, m_s0, p_m_t2, p_m_s0 = dependence.calculate_f_maps( data_cat, metric_maps["map Z"], mixing, adaptive_mask, tes ) - metric_maps["map FT2"] = m_T2 - metric_maps["map FS0"] = m_S0 - metric_maps["map predicted T2"] = p_m_T2 - metric_maps["map predicted S0"] = p_m_S0 + metric_maps["map FT2"] = m_t2 + metric_maps["map FS0"] = m_s0 + metric_maps["map predicted T2"] = p_m_t2 + metric_maps["map predicted S0"] = p_m_s0 if io_generator.verbose: io_generator.save_file( @@ -220,9 +219,9 @@ def generate_metrics( if ("kappa" in required_metrics) or ("rho" in required_metrics): LGR.info("Calculating kappa and rho") comptable["kappa"], comptable["rho"] = dependence.calculate_dependence_metrics( - F_T2_maps=metric_maps["map FT2"], - F_S0_maps=metric_maps["map FS0"], - Z_maps=metric_maps["map Z"], + f_t2_maps=metric_maps["map FT2"], + f_s0_maps=metric_maps["map FS0"], + z_maps=metric_maps["map Z"], ) # Generic metrics @@ -275,9 +274,9 @@ def generate_metrics( comptable["signal-noise_t"], comptable["signal-noise_p"], ) = dependence.compute_signal_minus_noise_t( - Z_maps=metric_maps["map Z"], - Z_clmaps=metric_maps["map Z clusterized"], - F_T2_maps=metric_maps["map FT2"], + z_maps=metric_maps["map Z"], + z_clmaps=metric_maps["map Z clusterized"], + f_t2_maps=metric_maps["map FT2"], ) if "signal-noise_z" in required_metrics: @@ -323,12 +322,12 @@ def generate_metrics( # Write verbose metrics if needed if io_generator.verbose: write_betas = "map echo betas" in metric_maps - write_T2S0 = "map predicted T2" in metric_maps + write_t2s0 = "map predicted T2" in metric_maps if write_betas: betas = metric_maps["map echo betas"] - if write_T2S0: - pred_T2_maps = metric_maps["map predicted T2"] - pred_S0_maps = metric_maps["map predicted S0"] + if write_t2s0: + pred_t2_maps = metric_maps["map predicted T2"] + pred_s0_maps = metric_maps["map predicted S0"] for i_echo in range(len(tes)): if write_betas: @@ -339,17 +338,17 @@ def generate_metrics( echo=(i_echo + 1), ) - if write_T2S0: - echo_pred_T2_maps = pred_T2_maps[:, i_echo, :] + if write_t2s0: + echo_pred_t2_maps = pred_t2_maps[:, i_echo, :] io_generator.save_file( - utils.unmask(echo_pred_T2_maps, mask), + utils.unmask(echo_pred_t2_maps, mask), "echo T2 " + label + " split img", echo=(i_echo + 1), ) - echo_pred_S0_maps = pred_S0_maps[:, i_echo, :] + echo_pred_s0_maps = pred_s0_maps[:, i_echo, :] io_generator.save_file( - utils.unmask(echo_pred_S0_maps, mask), + utils.unmask(echo_pred_s0_maps, mask), "echo S0 " + label + " split img", echo=(i_echo + 1), ) @@ -385,11 +384,11 @@ def generate_metrics( def get_metadata(comptable): - """Fills in metric metadata for a given comptable + """Fill in metric metadata for a given comptable. Parameters ---------- - comptable: pandas.DataFrame + comptable : pandas.DataFrame The component table for this workflow Returns @@ -398,7 +397,6 @@ def get_metadata(comptable): which we have a metadata description, plus the "Component" metadata description (always). """ - metric_metadata = {} if "kappa" in comptable: metric_metadata["kappa"] = { diff --git a/tedana/metrics/dependence.py b/tedana/metrics/dependence.py index 1cdfe7852..85d1ee297 100644 --- a/tedana/metrics/dependence.py +++ b/tedana/metrics/dependence.py @@ -101,23 +101,23 @@ def calculate_z_maps(weights, z_max=8): Returns ------- - Z_maps : (M x C) array_like + z_maps : (M x C) array_like Z-statistic maps for components, reflecting voxel-wise component loadings. """ - Z_maps = stats.zscore(weights, axis=0) - extreme_idx = np.abs(Z_maps) > z_max - Z_maps[extreme_idx] = z_max * np.sign(Z_maps[extreme_idx]) - return Z_maps + z_maps = stats.zscore(weights, axis=0) + extreme_idx = np.abs(z_maps) > z_max + z_maps[extreme_idx] = z_max * np.sign(z_maps[extreme_idx]) + return z_maps -def calculate_f_maps(data_cat, Z_maps, mixing, adaptive_mask, tes, f_max=500): +def calculate_f_maps(data_cat, z_maps, mixing, adaptive_mask, tes, f_max=500): """Calculate pseudo-F-statistic maps for TE-dependence and -independence models. Parameters ---------- data_cat : (M x E x T) array_like Multi-echo data, already masked. - Z_maps : (M x C) array_like + z_maps : (M x C) array_like Z-statistic maps for components, reflecting voxel-wise component loadings. mixing : (T x C) array_like Mixing matrix @@ -132,14 +132,14 @@ def calculate_f_maps(data_cat, Z_maps, mixing, adaptive_mask, tes, f_max=500): Returns ------- - F_T2_maps, F_S0_maps, pred_T2_maps, pred_S0_maps : (M x C) array_like + f_t2_maps, f_s0_maps, pred_t2_maps, pred_s0_maps : (M x C) array_like Pseudo-F-statistic maps for TE-dependence and -independence models, respectively. """ - assert data_cat.shape[0] == Z_maps.shape[0] == adaptive_mask.shape[0] + assert data_cat.shape[0] == z_maps.shape[0] == adaptive_mask.shape[0] assert data_cat.shape[1] == tes.shape[0] assert data_cat.shape[2] == mixing.shape[0] - assert Z_maps.shape[1] == mixing.shape[1] + assert z_maps.shape[1] == mixing.shape[1] # TODO: Remove mask arg from get_coeffs me_betas = get_coeffs(data_cat, mixing, mask=np.ones(data_cat.shape[:2], bool), add_const=True) @@ -148,13 +148,13 @@ def calculate_f_maps(data_cat, Z_maps, mixing, adaptive_mask, tes, f_max=500): tes = np.reshape(tes, (n_echos, 1)) # set up Xmats - X1 = mu.T # Model 1 - X2 = np.tile(tes, (1, n_voxels)) * mu.T # Model 2 + x1 = mu.T # Model 1 + x2 = np.tile(tes, (1, n_voxels)) * mu.T # Model 2 - F_T2_maps = np.zeros([n_voxels, n_components]) - F_S0_maps = np.zeros([n_voxels, n_components]) - pred_T2_maps = np.zeros([n_voxels, len(tes), n_components]) - pred_S0_maps = np.zeros([n_voxels, len(tes), n_components]) + f_t2_maps = np.zeros([n_voxels, n_components]) + f_s0_maps = np.zeros([n_voxels, n_components]) + pred_t2_maps = np.zeros([n_voxels, len(tes), n_components]) + pred_s0_maps = np.zeros([n_voxels, len(tes), n_components]) for i_comp in range(n_components): # size of comp_betas is (n_echoes, n_samples) @@ -168,31 +168,31 @@ def calculate_f_maps(data_cat, Z_maps, mixing, adaptive_mask, tes, f_max=500): # S0 Model # (S,) model coefficient map - coeffs_S0 = (comp_betas[:j_echo] * X1[:j_echo, :]).sum(axis=0) / ( - X1[:j_echo, :] ** 2 + coeffs_s0 = (comp_betas[:j_echo] * x1[:j_echo, :]).sum(axis=0) / ( + x1[:j_echo, :] ** 2 ).sum(axis=0) - pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) - SSE_S0 = (comp_betas[:j_echo] - pred_S0) ** 2 - SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map - F_S0 = (alpha - SSE_S0) * (j_echo - 1) / (SSE_S0) - F_S0[F_S0 > f_max] = f_max - F_S0_maps[mask_idx, i_comp] = F_S0[mask_idx] + pred_s0 = x1[:j_echo, :] * np.tile(coeffs_s0, (j_echo, 1)) + sse_s0 = (comp_betas[:j_echo] - pred_s0) ** 2 + sse_s0 = sse_s0.sum(axis=0) # (S,) prediction error map + f_s0 = (alpha - sse_s0) * (j_echo - 1) / (sse_s0) + f_s0[f_s0 > f_max] = f_max + f_s0_maps[mask_idx, i_comp] = f_s0[mask_idx] # T2 Model - coeffs_T2 = (comp_betas[:j_echo] * X2[:j_echo, :]).sum(axis=0) / ( - X2[:j_echo, :] ** 2 + coeffs_t2 = (comp_betas[:j_echo] * x2[:j_echo, :]).sum(axis=0) / ( + x2[:j_echo, :] ** 2 ).sum(axis=0) - pred_T2 = X2[:j_echo] * np.tile(coeffs_T2, (j_echo, 1)) - SSE_T2 = (comp_betas[:j_echo] - pred_T2) ** 2 - SSE_T2 = SSE_T2.sum(axis=0) - F_T2 = (alpha - SSE_T2) * (j_echo - 1) / (SSE_T2) - F_T2[F_T2 > f_max] = f_max - F_T2_maps[mask_idx, i_comp] = F_T2[mask_idx] + pred_t2 = x2[:j_echo] * np.tile(coeffs_t2, (j_echo, 1)) + sse_t2 = (comp_betas[:j_echo] - pred_t2) ** 2 + sse_t2 = sse_t2.sum(axis=0) + f_t2 = (alpha - sse_t2) * (j_echo - 1) / (sse_t2) + f_t2[f_t2 > f_max] = f_max + f_t2_maps[mask_idx, i_comp] = f_t2[mask_idx] - pred_S0_maps[mask_idx, :j_echo, i_comp] = pred_S0.T[mask_idx, :] - pred_T2_maps[mask_idx, :j_echo, i_comp] = pred_T2.T[mask_idx, :] + pred_s0_maps[mask_idx, :j_echo, i_comp] = pred_s0.T[mask_idx, :] + pred_t2_maps[mask_idx, :j_echo, i_comp] = pred_t2.T[mask_idx, :] - return F_T2_maps, F_S0_maps, pred_T2_maps, pred_S0_maps + return f_t2_maps, f_s0_maps, pred_t2_maps, pred_s0_maps def threshold_map(maps, mask, ref_img, threshold, csize=None): @@ -305,17 +305,17 @@ def threshold_to_match(maps, n_sig_voxels, mask, ref_img, csize=None): return clmaps -def calculate_dependence_metrics(F_T2_maps, F_S0_maps, Z_maps): +def calculate_dependence_metrics(f_t2_maps, f_s0_maps, z_maps): """Calculate Kappa and Rho metrics from F-statistic maps. Just a weighted average over voxels. Parameters ---------- - F_T2_maps, F_S0_maps : (S x C) array_like + f_t2_maps, f_s0_maps : (S x C) array_like Pseudo-F-statistic maps for TE-dependence and -independence models, respectively. - Z_maps : (S x C) array_like + z_maps : (S x C) array_like Z-statistic maps for components, reflecting voxel-wise component loadings. Returns @@ -324,19 +324,19 @@ def calculate_dependence_metrics(F_T2_maps, F_S0_maps, Z_maps): Averaged pseudo-F-statistics for TE-dependence and -independence models, respectively. """ - assert F_T2_maps.shape == F_S0_maps.shape == Z_maps.shape + assert f_t2_maps.shape == f_s0_maps.shape == z_maps.shape RepLGR.info( "Kappa (kappa) and Rho (rho) were calculated as measures of " "TE-dependence and TE-independence, respectively." ) - weight_maps = Z_maps**2.0 - n_components = Z_maps.shape[1] + weight_maps = z_maps**2.0 + n_components = z_maps.shape[1] kappas, rhos = np.zeros(n_components), np.zeros(n_components) for i_comp in range(n_components): - kappas[i_comp] = np.average(F_T2_maps[:, i_comp], weights=weight_maps[:, i_comp]) - rhos[i_comp] = np.average(F_S0_maps[:, i_comp], weights=weight_maps[:, i_comp]) + kappas[i_comp] = np.average(f_t2_maps[:, i_comp], weights=weight_maps[:, i_comp]) + rhos[i_comp] = np.average(f_s0_maps[:, i_comp], weights=weight_maps[:, i_comp]) return kappas, rhos @@ -401,7 +401,7 @@ def compute_dice(clmaps1, clmaps2, axis=0): return dice_values -def compute_signal_minus_noise_z(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): +def compute_signal_minus_noise_z(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): """Compare signal and noise z-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of @@ -412,11 +412,11 @@ def compute_signal_minus_noise_z(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): Parameters ---------- - Z_maps : (S x C) array_like + z_maps : (S x C) array_like Z-statistic maps for components, reflecting voxel-wise component loadings. - Z_clmaps : (S x C) array_like + z_clmaps : (S x C) array_like Cluster-extent thresholded Z-statistic maps for components. - F_T2_maps : (S x C) array_like + f_t2_maps : (S x C) array_like Pseudo-F-statistic maps for components from TE-dependence models. Each voxel reflects the model fit for the component weights to the TE-dependence model across echoes. @@ -430,33 +430,31 @@ def compute_signal_minus_noise_z(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): signal_minus_noise_p : (C) array_like P-values from component-wise signal > noise paired t-tests. """ - assert Z_maps.shape == Z_clmaps.shape == F_T2_maps.shape + assert z_maps.shape == z_clmaps.shape == f_t2_maps.shape - n_components = Z_maps.shape[1] + n_components = z_maps.shape[1] signal_minus_noise_z = np.zeros(n_components) signal_minus_noise_p = np.zeros(n_components) - noise_idx = (np.abs(Z_maps) > z_thresh) & (Z_clmaps == 0) + noise_idx = (np.abs(z_maps) > z_thresh) & (z_clmaps == 0) countnoise = noise_idx.sum(axis=0) - countsignal = Z_clmaps.sum(axis=0) + countsignal = z_clmaps.sum(axis=0) for i_comp in range(n_components): - noise_FT2_Z = 0.5 * np.log(F_T2_maps[noise_idx[:, i_comp], i_comp]) - signal_FT2_Z = 0.5 * np.log(F_T2_maps[Z_clmaps[:, i_comp] == 1, i_comp]) - n_noise_dupls = noise_FT2_Z.size - np.unique(noise_FT2_Z).size + noise_ft2_z = 0.5 * np.log(f_t2_maps[noise_idx[:, i_comp], i_comp]) + signal_ft2_z = 0.5 * np.log(f_t2_maps[z_clmaps[:, i_comp] == 1, i_comp]) + n_noise_dupls = noise_ft2_z.size - np.unique(noise_ft2_z).size if n_noise_dupls: LGR.debug( - "For component {}, {} duplicate noise F-values " - "detected.".format(i_comp, n_noise_dupls) + f"For component {i_comp}, {n_noise_dupls} duplicate noise F-values detected." ) - n_signal_dupls = signal_FT2_Z.size - np.unique(signal_FT2_Z).size + n_signal_dupls = signal_ft2_z.size - np.unique(signal_ft2_z).size if n_signal_dupls: LGR.debug( - "For component {}, {} duplicate signal F-values " - "detected.".format(i_comp, n_signal_dupls) + f"For component {i_comp}, {n_signal_dupls} duplicate signal F-values detected." ) dof = countnoise[i_comp] + countsignal[i_comp] - 2 t_value, signal_minus_noise_p[i_comp] = stats.ttest_ind( - signal_FT2_Z, noise_FT2_Z, equal_var=False + signal_ft2_z, noise_ft2_z, equal_var=False ) signal_minus_noise_z[i_comp] = t_to_z(t_value, dof) @@ -465,7 +463,7 @@ def compute_signal_minus_noise_z(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): return signal_minus_noise_z, signal_minus_noise_p -def compute_signal_minus_noise_t(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): +def compute_signal_minus_noise_t(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): """Compare signal and noise t-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of @@ -475,11 +473,11 @@ def compute_signal_minus_noise_t(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): Parameters ---------- - Z_maps : (S x C) array_like + z_maps : (S x C) array_like Z-statistic maps for components, reflecting voxel-wise component loadings. - Z_clmaps : (S x C) array_like + z_clmaps : (S x C) array_like Cluster-extent thresholded Z-statistic maps for components. - F_T2_maps : (S x C) array_like + f_t2_maps : (S x C) array_like Pseudo-F-statistic maps for components from TE-dependence models. Each voxel reflects the model fit for the component weights to the TE-dependence model across echoes. @@ -493,18 +491,18 @@ def compute_signal_minus_noise_t(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95): signal_minus_noise_p : (C) array_like P-values from component-wise signal > noise paired t-tests. """ - assert Z_maps.shape == Z_clmaps.shape == F_T2_maps.shape + assert z_maps.shape == z_clmaps.shape == f_t2_maps.shape - n_components = Z_maps.shape[1] + n_components = z_maps.shape[1] signal_minus_noise_t = np.zeros(n_components) signal_minus_noise_p = np.zeros(n_components) - noise_idx = (np.abs(Z_maps) > z_thresh) & (Z_clmaps == 0) + noise_idx = (np.abs(z_maps) > z_thresh) & (z_clmaps == 0) for i_comp in range(n_components): # NOTE: Why only compare distributions of *unique* F-statistics? - noise_FT2_Z = np.log10(np.unique(F_T2_maps[noise_idx[:, i_comp], i_comp])) - signal_FT2_Z = np.log10(np.unique(F_T2_maps[Z_clmaps[:, i_comp] == 1, i_comp])) + noise_ft2_z = np.log10(np.unique(f_t2_maps[noise_idx[:, i_comp], i_comp])) + signal_ft2_z = np.log10(np.unique(f_t2_maps[z_clmaps[:, i_comp] == 1, i_comp])) (signal_minus_noise_t[i_comp], signal_minus_noise_p[i_comp]) = stats.ttest_ind( - signal_FT2_Z, noise_FT2_Z, equal_var=False + signal_ft2_z, noise_ft2_z, equal_var=False ) signal_minus_noise_t = np.nan_to_num(signal_minus_noise_t, 0) @@ -557,7 +555,7 @@ def compute_countnoise(stat_maps, stat_cl_maps, stat_thresh=1.95): return countnoise -def generate_decision_table_score(kappa, dice_FT2, signal_minus_noise_t, countnoise, countsigFT2): +def generate_decision_table_score(kappa, dice_ft2, signal_minus_noise_t, countnoise, countsig_ft2): """Generate a five-metric decision table. Metrics are ranked in either descending or ascending order if they measure TE-dependence or @@ -567,7 +565,7 @@ def generate_decision_table_score(kappa, dice_FT2, signal_minus_noise_t, countno ---------- kappa : (C) array_like Pseudo-F-statistics for TE-dependence model. - dice_FT2 : (C) array_like + dice_ft2 : (C) array_like Dice similarity index for cluster-extent thresholded beta maps and cluster-extent thresholded TE-dependence F-statistic maps. signal_minus_noise_t : (C) array_like @@ -575,7 +573,7 @@ def generate_decision_table_score(kappa, dice_FT2, signal_minus_noise_t, countno countnoise : (C) array_like Numbers of significant non-cluster voxels from the thresholded beta maps. - countsigFT2 : (C) array_like + countsig_ft2 : (C) array_like Numbers of significant voxels from clusters from the thresholded TE-dependence F-statistic maps. @@ -586,19 +584,19 @@ def generate_decision_table_score(kappa, dice_FT2, signal_minus_noise_t, countno """ assert ( kappa.shape - == dice_FT2.shape + == dice_ft2.shape == signal_minus_noise_t.shape == countnoise.shape - == countsigFT2.shape + == countsig_ft2.shape ) d_table_rank = np.vstack( [ len(kappa) - stats.rankdata(kappa), - len(kappa) - stats.rankdata(dice_FT2), + len(kappa) - stats.rankdata(dice_ft2), len(kappa) - stats.rankdata(signal_minus_noise_t), stats.rankdata(countnoise), - len(kappa) - stats.rankdata(countsigFT2), + len(kappa) - stats.rankdata(countsig_ft2), ] ).T d_table_score = d_table_rank.mean(axis=1) diff --git a/tedana/reporting/__init__.py b/tedana/reporting/__init__.py index 825cc6eb6..bb52bca1f 100644 --- a/tedana/reporting/__init__.py +++ b/tedana/reporting/__init__.py @@ -1,8 +1,6 @@ -""" -Reporting code for tedana -""" +"""Reporting code for tedana.""" -from .html_report import generate_report -from .static_figures import comp_figures, pca_results +from tedana.reporting.html_report import generate_report +from tedana.reporting.static_figures import comp_figures, pca_results __all__ = ["generate_report", "comp_figures", "pca_results"] diff --git a/tedana/reporting/data/README.md b/tedana/reporting/data/README.md index 8d5d5ba52..1410117cf 100644 --- a/tedana/reporting/data/README.md +++ b/tedana/reporting/data/README.md @@ -1,3 +1,3 @@ This directory contains data required for tedana reporting. -html/ : HTML templates \ No newline at end of file +html/ : HTML templates diff --git a/tedana/reporting/data/html/__init__.py b/tedana/reporting/data/html/__init__.py index e69de29bb..5000bb868 100644 --- a/tedana/reporting/data/html/__init__.py +++ b/tedana/reporting/data/html/__init__.py @@ -0,0 +1 @@ +"""HTML templates for tedana reporting.""" diff --git a/tedana/reporting/data/html/report_carpet_buttons_template.html b/tedana/reporting/data/html/report_carpet_buttons_template.html index 41422b86f..61c39e30a 100644 --- a/tedana/reporting/data/html/report_carpet_buttons_template.html +++ b/tedana/reporting/data/html/report_carpet_buttons_template.html @@ -18,4 +18,4 @@ \ No newline at end of file + diff --git a/tedana/reporting/dynamic_figures.py b/tedana/reporting/dynamic_figures.py index 2eef07dc3..a72748f53 100644 --- a/tedana/reporting/dynamic_figures.py +++ b/tedana/reporting/dynamic_figures.py @@ -1,3 +1,4 @@ +"""Dynamic figures for tedana report.""" from math import pi import numpy as np @@ -53,16 +54,16 @@ def _create_data_struct(comptable_path, color_mapping=color_mapping): """ - Create Bokeh ColumnDataSource with all info dynamic plots need + Create Bokeh ColumnDataSource with all info dynamic plots need. Parameters ---------- - comptable: str + comptable : str file path to component table, JSON format Returns ------- - cds: bokeh.models.ColumnDataSource + cds : bokeh.models.ColumnDataSource Data structure with all the fields to plot or hover over """ unused_cols = [ @@ -134,20 +135,20 @@ def _create_data_struct(comptable_path, color_mapping=color_mapping): def _create_kr_plt(comptable_cds, kappa_elbow=None, rho_elbow=None): """ - Create Dymamic Kappa/Rho Scatter Plot + Create Dymamic Kappa/Rho Scatter Plot. Parameters ---------- - comptable_cds: bokeh.models.ColumnDataSource + comptable_cds : bokeh.models.ColumnDataSource Data structure containing a limited set of columns from the comp_table - kappa_elbow, rho_elbow: :obj:`float` :obj:`int` + kappa_elbow, rho_elbow : :obj:`float` :obj:`int` The elbow thresholds for kappa and rho to display on the plots Defaults=None Returns ------- - fig: bokeh.plotting.figure.Figure + fig : bokeh.plotting.figure.Figure Bokeh scatter plot of kappa vs. rho """ # Create Panel for the Kappa - Rho Scatter @@ -234,35 +235,35 @@ def _create_sorted_plt( comptable_cds, n_comps, x_var, y_var, title=None, x_label=None, y_label=None, elbow=None ): """ - Create dynamic sorted plots + Create dynamic sorted plots. Parameters ---------- - comptable_ds: bokeh.models.ColumnDataSource + comptable_ds : bokeh.models.ColumnDataSource Data structure containing a limited set of columns from the comp_table - x_var: str + x_var : str Name of variable for the x-axis - y_var: str + y_var : str Name of variable for the y-axis - title: str + title : str Plot title - x_label: str + x_label : str X-axis label - y_label: str + y_label : str Y-axis label - elbow: :obj:`float` :obj:`int` + elbow : :obj:`float` :obj:`int` The elbow threshold for kappa or rho to display on the plot Default=None Returns ------- - fig: bokeh.plotting.figure.Figure + fig : bokeh.plotting.figure.Figure Bokeh plot of components ranked by a given feature """ hovertool = models.HoverTool( @@ -316,7 +317,7 @@ def _create_sorted_plt( return fig -def _create_varexp_pie_plt(comptable_cds, n_comps): +def _create_varexp_pie_plt(comptable_cds): fig = plotting.figure( plot_width=400, plot_height=400, @@ -353,21 +354,21 @@ def _create_varexp_pie_plt(comptable_cds, n_comps): def _tap_callback(comptable_cds, div_content, io_generator): """ - Javacript function to animate tap events and show component info on the right + Javacript function to animate tap events and show component info on the right. Parameters ---------- - CDS: bokeh.models.ColumnDataSource + CDS : bokeh.models.ColumnDataSource Data structure containing a limited set of columns from the comp_table - div: bokeh.models.Div + div : bokeh.models.Div Target Div element where component images will be loaded - io_generator: tedana.io.OutputGenerator + io_generator : tedana.io.OutputGenerator Output generating object for this workflow Returns ------- - CustomJS: bokeh.models.CustomJS + CustomJS : bokeh.models.CustomJS Javascript function that adds the tapping functionality """ return models.CustomJS( @@ -397,7 +398,7 @@ def _link_figures(fig, comptable_ds, div_content, io_generator): div_content : bokeh.models.Div Div element for additional HTML content. - io_generator: `tedana.io.OutputGenerator` + io_generator : `tedana.io.OutputGenerator` Output generating object for this workflow Returns @@ -405,7 +406,6 @@ def _link_figures(fig, comptable_ds, div_content, io_generator): fig : bokeh.plotting.figure Same as input figure, but with a linked method to its Tap event. - """ fig.js_on_event(events.Tap, _tap_callback(comptable_ds, div_content, io_generator)) return fig diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index 2820f16bc..bc2f31309 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -1,3 +1,4 @@ +"""Build HTML reports for tedana.""" import logging import os from os.path import join as opj @@ -74,6 +75,7 @@ def _update_template_bokeh(bokeh_id, info_table, about, prefix, references, boke Javascript created by bokeh.embed.components buttons : str HTML div created by _generate_buttons() + Returns ------- HTMLReport : an instance of a populated HTML report @@ -120,9 +122,7 @@ def _save_as_html(body): def _generate_info_table(info_dict): - """Generate a table with relevant information about the - system and tedana. - """ + """Generate a table with relevant information about the system and tedana.""" resource_path = Path(__file__).resolve().parent.joinpath("data", "html") info_template_name = "report_info_table_template.html" @@ -149,16 +149,13 @@ def _generate_info_table(info_dict): return info_html -def generate_report(io_generator, tr): +def generate_report(io_generator): """Generate an HTML report. Parameters ---------- io_generator : tedana.io.OutputGenerator io_generator object for this workflow's output - tr : float - The repetition time (TR) for the collected multi-echo - sequence Returns ------- @@ -180,13 +177,19 @@ def generate_report(io_generator, tr): def get_elbow_val(elbow_prefix): """ - Find cross component metrics that begin with elbow_prefix and output the value - Current prefixes are kappa_elbow_kundu and rho_elbow_kundu. This flexability - means anything that begins [kappa/rho]_elbow will be found and used regardless - of the suffix. If more than one metric has the prefix then the alphabetically - first one will be used and a warning will be logged - """ + Find cross component metrics that begin with elbow_prefix and output the value. + Current prefixes are kappa_elbow_kundu and rho_elbow_kundu. + + This flexibility means anything that begins [kappa/rho]_elbow will be found and + used regardless of the suffix. If more than one metric has the prefix then the + alphabetically first one will be used and a warning will be logged. + + Parameters + ---------- + elbow_prefix : str + The prefix to look for in the cross component metrics + """ elbow_keys = [k for k in cross_comp_metrics_dict.keys() if elbow_prefix in k] elbow_keys.sort() if len(elbow_keys) == 0: @@ -234,7 +237,7 @@ def get_elbow_val(elbow_prefix): y_label="Rho", elbow=rho_elbow, ) - varexp_pie_plot = df._create_varexp_pie_plt(comptable_cds, n_comps) + varexp_pie_plot = df._create_varexp_pie_plt(comptable_cds) # link all dynamic figures figs = [kappa_rho_plot, kappa_sorted_plot, rho_sorted_plot, varexp_pie_plot] diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index 2a190e7bb..85d613e78 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -1,6 +1,4 @@ -""" -Functions to creating figures to inspect tedana output -""" +"""Functions to creating figures to inspect tedana output.""" import logging import os @@ -21,21 +19,20 @@ def _trim_edge_zeros(arr): """ - Trims away the zero-filled slices that surround many 3/4D arrays + Trims away the zero-filled slices that surround many 3/4D arrays. Parameters ---------- - ndarray: (S x T) array_like + ndarray : (S x T) array_like an array with signal, surrounded by slices that contain only zeros that should be removed. Returns - --------- - ndarray: (S x T) array_like + ------- + ndarray : (S x T) array_like an array with reduced dimensions, such that the array contains only non_zero values from edge to edge. """ - mask = arr != 0 bounding_box = tuple(slice(np.min(indexes), np.max(indexes) + 1) for indexes in np.where(mask)) return arr[bounding_box] @@ -184,9 +181,10 @@ def carpet_plot( def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): """ - Creates static figures that highlight certain aspects of tedana processing + Create static figures that highlight certain aspects of tedana processing. + This includes a figure for each component showing the component time course, - the spatial weight map and a fast Fourier transform of the time course + the spatial weight map and a fast Fourier transform of the time course. Parameters ---------- @@ -194,7 +192,7 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): Time series from which to derive ICA betas mask : (S,) array_like Boolean mask array - comptable : (C x X) :obj:`pandas.DataFrame` + comptable : (C x M) :obj:`pandas.DataFrame` Component metric table. One row for each component, with a column for each metric. The index should be the component number. mmix : (C x T) array_like @@ -210,19 +208,19 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): mmix = mmix * comptable["optimal sign"].values # regenerate the beta images - ts_B = stats.get_coeffs(ts, mmix, mask) - ts_B = ts_B.reshape(io_generator.reference_img.shape[:3] + ts_B.shape[1:]) - # trim edges from ts_B array - ts_B = _trim_edge_zeros(ts_B) + ts_b = stats.get_coeffs(ts, mmix, mask) + ts_b = ts_b.reshape(io_generator.reference_img.shape[:3] + ts_b.shape[1:]) + # trim edges from ts_b array + ts_b = _trim_edge_zeros(ts_b) # Mask out remaining zeros - ts_B = np.ma.masked_where(ts_B == 0, ts_B) + ts_b = np.ma.masked_where(ts_b == 0, ts_b) # Get repetition time from reference image tr = io_generator.reference_img.header.get_zooms()[-1] # Create indices for 6 cuts, based on dimensions - cuts = [ts_B.shape[dim] // 6 for dim in range(3)] + cuts = [ts_b.shape[dim] // 6 for dim in range(3)] expl_text = "" # Remove trailing ';' from rationale column @@ -256,32 +254,34 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): ax_ts.xaxis.set_major_locator(xloc) ax_ts2 = ax_ts.twiny() - ax1Xs = ax_ts.get_xticks() + ax1_xs = ax_ts.get_xticks() - ax2Xs = [] - for X in ax1Xs: + ax2_xs = [] + for x in ax1_xs: # Limit to 2 decimal places - seconds_val = round(X * tr, 2) - ax2Xs.append(seconds_val) - ax_ts2.set_xticks(ax1Xs) + seconds_val = round(x * tr, 2) + ax2_xs.append(seconds_val) + ax_ts2.set_xticks(ax1_xs) ax_ts2.set_xlim(ax_ts.get_xbound()) - ax_ts2.set_xticklabels(ax2Xs) + ax_ts2.set_xticklabels(ax2_xs) ax_ts2.set_xlabel("seconds") ax_ts.plot(mmix[:, compnum], color=line_color) # Title will include variance from comptable - comp_var = "{0:.2f}".format(comptable.loc[compnum, "variance explained"]) - comp_kappa = "{0:.2f}".format(comptable.loc[compnum, "kappa"]) - comp_rho = "{0:.2f}".format(comptable.loc[compnum, "rho"]) - plt_title = "Comp. {}: variance: {}%, kappa: {}, rho: {}, {}".format( - compnum, comp_var, comp_kappa, comp_rho, expl_text + comp_var = f"{comptable.loc[compnum, 'variance explained']:.2f}" + comp_kappa = f"{comptable.loc[compnum, 'kappa']:.2f}" + comp_rho = f"{comptable.loc[compnum, 'rho']:.2f}" + plt_title = ( + f"Comp. {compnum}: variance: {comp_var}%, kappa: {comp_kappa}, " + f"rho: {comp_rho}, {expl_text}" ) + title = ax_ts.set_title(plt_title) title.set_y(1.5) # Set range to ~1/10th of max positive or negative beta - imgmax = 0.1 * np.abs(ts_B[:, :, :, compnum]).max() + imgmax = 0.1 * np.abs(ts_b[:, :, :, compnum]).max() imgmin = imgmax * -1 for idx, _ in enumerate(cuts): @@ -290,11 +290,11 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): ax.axis("off") if idx == 0: - to_plot = np.rot90(ts_B[imgslice * cuts[idx], :, :, compnum]) + to_plot = np.rot90(ts_b[imgslice * cuts[idx], :, :, compnum]) if idx == 1: - to_plot = np.rot90(ts_B[:, imgslice * cuts[idx], :, compnum]) + to_plot = np.rot90(ts_b[:, imgslice * cuts[idx], :, compnum]) if idx == 2: - to_plot = ts_B[:, :, imgslice * cuts[idx], compnum] + to_plot = ts_b[:, :, imgslice * cuts[idx], compnum] ax_im = ax.imshow(to_plot, vmin=imgmin, vmax=imgmax, aspect="equal", cmap=png_cmap) @@ -337,7 +337,6 @@ def pca_results(criteria, n_components, all_varex, io_generator): io_generator : object An object containing all the information needed to generate the output. """ - # Plot the PCA optimization curve for each criteria plt.figure(figsize=(10, 9)) plt.title("PCA Criteria") diff --git a/tedana/resources/config/metrics.json b/tedana/resources/config/metrics.json index dc9ac5cb2..038578e2e 100644 --- a/tedana/resources/config/metrics.json +++ b/tedana/resources/config/metrics.json @@ -117,4 +117,4 @@ "tes" ] } -} \ No newline at end of file +} diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index 622ab3bbe..a77c28807 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -219,4 +219,4 @@ "orig": "registry", "bidsv1.5.0": "desc-tedana_registry" } -} \ No newline at end of file +} diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 0ef1e3fc9..716fdc311 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -460,4 +460,4 @@ "_comment": "No code in the premodularized tedana" } ] -} \ No newline at end of file +} diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index ae2fe2b48..eb06e541c 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -227,4 +227,4 @@ } } ] -} \ No newline at end of file +} diff --git a/tedana/selection/__init__.py b/tedana/selection/__init__.py index 8a2f3dc5f..c692c320c 100644 --- a/tedana/selection/__init__.py +++ b/tedana/selection/__init__.py @@ -1,7 +1,6 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- -# ex: set sts=4 ts=4 sw=4 et: +"""TEDANA selection methods.""" -from .tedica import automatic_selection -from .tedpca import kundu_tedpca +from tedana.selection.tedica import automatic_selection +from tedana.selection.tedpca import kundu_tedpca __all__ = ["kundu_tedpca", "automatic_selection"] diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index f17e9ac9a..37d0b3a15 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -1,7 +1,4 @@ -""" -Functions that include workflows to identify and label -TE-dependent and TE-independent components. -""" +"""Identify and label TE-dependent and TE-independent components.""" import inspect import logging import os.path as op @@ -29,9 +26,7 @@ class TreeError(Exception): - """ - Passes errors that are raised when `validate_tree` fails - """ + """Passes errors that are raised when `validate_tree` fails.""" pass @@ -49,7 +44,6 @@ def load_config(tree): tree : :obj:`dict` A validated decision tree for the component selection process. """ - if tree in DEFAULT_TREES: fname = op.join(get_resource_path(), "decision_trees", tree + ".json") else: @@ -88,7 +82,6 @@ def validate_tree(tree): ------ TreeError """ - # Set the fields that should always be present err_msg = "" tree_expected_keys = [ @@ -270,7 +263,6 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab An example initialization with these options would look like ``selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols)`` """ - self.tree_name = tree self.__dict__.update(cross_component_metrics) @@ -324,7 +316,7 @@ def select(self): each component as accepted or rejected. Notes - ------- + ----- The selection process uses previously calculated parameters stored in `component_table` for each ICA component such as Kappa (a T2* weighting metric), Rho (an S0 weighting metric), and variance explained. If a necessary metric @@ -349,7 +341,6 @@ def select(self): everything that changed in each node - current_node_idx: The total number of nodes run in ``ComponentSelector`` """ - if "classification_tags" not in self.component_table.columns: self.component_table["classification_tags"] = "" @@ -433,15 +424,15 @@ def add_manual(self, indices, classification): def check_null(self, params, fcn): """ - Checks that all required parameters for selection node functions are - attributes in the class. Error if any are undefined + Check that required parameters for selection node functions are attributes in the class. + + Error if any are undefined. Returns ------- - params: :obj:`dict` + params : :obj:`dict` The keys and values for the inputted parameters """ - for key, val in params.items(): if val is None: try: @@ -460,9 +451,11 @@ def check_null(self, params, fcn): def are_only_necessary_metrics_used(self): """ - Check if all metrics that are declared as necessary are actually - used and if any used_metrics weren't explicitly declared necessary. - If either of these happen, a warning is added to the logger + Check if all metrics that are declared as necessary are actually used. + + Also check if any used_metrics weren't explicitly declared necessary. + + If either of these happen, a warning is added to the logger. """ necessary_metrics = self.necessary_metrics not_declared = self.tree["used_metrics"] - necessary_metrics @@ -480,9 +473,11 @@ def are_only_necessary_metrics_used(self): def are_all_components_accepted_or_rejected(self): """ - After the tree has finished executing, check if all component - classifications are either "accepted" or "rejected". - If any other component classifications remain, log a warning + Check if all component classifications are either "accepted" or "rejected". + + This is done after the tree has finished executing, + + If any other component classifications remain, log a warning. """ component_classifications = set(self.component_table["classification"].to_list()) nonfinal_classifications = component_classifications.difference({"accepted", "rejected"}) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index b7dbe8698..6b5a1e6cb 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -180,26 +180,26 @@ def dec_left_op_right( %(tag_if_true)s %(tag_if_false)s %(decide_comps)s - op: :obj:`str` + op : :obj:`str` Must be one of: ">", ">=", "==", "<=", "<" Applied the user defined operator to left op right - left, right: :obj:`str` or :obj:`float` + left, right : :obj:`str` or :obj:`float` The labels for the two metrics to be used for comparision. For example: left='kappa', right='rho' and op='>' means this function will test kappa>rho. One of the two can also be a number. In that case, a metric would be compared against a fixed threshold. For example left='T2fitdiff_invsout_ICAmap_Tstat', right=0, and op='>' means this function will test T2fitdiff_invsout_ICAmap_Tstat>0 - left_scale, right_scale: :obj:`float` or :obj:`str` + left_scale, right_scale : :obj:`float` or :obj:`str` Multiply the left or right metrics value by a constant. For example if left='kappa', right='rho', right_scale=2, and op='>' this tests kappa>(2*rho). These can also be a string that is a value in cross_component_metrics, since those will resolve to a single value. This cannot be a label for a component_table column since that would output a different value for each component. Default=1 - op2: :obj:`str`, Default=None - left2, right2, left3, right3: :obj:`str` or :obj:`float`, Default=None - left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float` or :obj:`str`, Default=1 + op2 : :obj:`str`, Default=None + left2, right2, left3, right3 : :obj:`str` or :obj:`float`, Default=None + left2_scale, right2_scale, left3_scale, right3_scale : :obj:`float` or :obj:`str`, Default=1 This function can also be used to calculate the intersection of two or three boolean statements. If op2, left2, and right2 are defined then this function returns @@ -247,9 +247,11 @@ def dec_left_op_right( def identify_used_metric(val, isnum=False): """ - Parse the left or right values or scalers to see if they are an - existing used_metric or cross_component_metric - If the value already a number, no parse would be needed + Parse the left or right values or scalers. + + This is done to see if they are an existing used_metric or cross_component_metric. + + If the value already a number, no parse would be needed. This is also used on left_scale and right_scale to convert a value in cross_component_metrics to a number. Set the isnum @@ -288,10 +290,11 @@ def identify_used_metric(val, isnum=False): def confirm_valid_conditional(left_scale, left_val, right_scale, right_val, op_val): """ + Confirm that the conditional statement is valid. + Makes sure the left_scale, left_val, right_scale, right_val, and - operator variables combine into a valid conditional statement + operator variables combine into a valid conditional statement. """ - left_val = identify_used_metric(left_val) right_val = identify_used_metric(right_val) left_scale = identify_used_metric(left_scale, isnum=True) @@ -303,13 +306,15 @@ def confirm_valid_conditional(left_scale, left_val, right_scale, right_val, op_v def operator_scale_descript(val_scale, val): """ - Return a string with one element from the mathematical expression - If val_scale is not 1, include scaling factor (rounded to 2 decimals) + Return a string with one element from the mathematical expression. + + If val_scale is not 1, include scaling factor (rounded to 2 decimals). + If val is a column in the component_table output the column label If val is a number (either an inputted number or from cross_component_metrics include the number (rounded to 2 decimals) This output is used to great a descriptor for visualizing the decision tree - Unrounded values are saved and rounding here will not affect results + Unrounded values are saved and rounding here will not affect results. """ if not isinstance(val, str): val = str(round(val, 2)) @@ -398,7 +403,7 @@ def operator_scale_descript(val_scale, val): ) def parse_vals(val): - """Get the metric values for the selected components or relevant constant""" + """Get the metric values for the selected components or relevant constant.""" if isinstance(val, str): return selector.component_table.loc[comps2use, val].copy() else: @@ -494,14 +499,14 @@ def dec_variance_lessthan_thresholds( %(tag_if_true)s %(tag_if_false)s %(decide_comps)s - var_metric: :obj:`str` + var_metric : :obj:`str` The name of the metric in component_table for variance. Default="variance explained" This is an option so that it is possible to use "normalized variance explained" or another metric - single_comp_threshold: :obj:`float` + single_comp_threshold : :obj:`float` The threshold for which all components need to have lower variance. Default=0.1 - all_comp_threshold: :obj: `float` + all_comp_threshold : :obj: `float` The number of the variance for all components less than single_comp_threshold needs to be under this threshold. Default=1.0 %(log_extra_info)s @@ -610,10 +615,10 @@ def calc_median( ---------- %(selector)s %(decide_comps)s - metric_name: :obj:`str` + metric_name : :obj:`str` The name of a column in selector.component_table. The median of the values in this column will be calculated - median_label: :obj:`str` + median_label : :obj:`str` The median will be saved in "median_(median_label)" %(log_extra_info)s %(log_extra_report)s @@ -820,12 +825,12 @@ def calc_rho_elbow( ---------- %(selector)s %(decide_comps)s - subset_decide_comps: :obj:`str` + subset_decide_comps : :obj:`str` This is a string with a single component classification label. For the elbow calculation used by Kundu in MEICA v.27 thresholds are based on all components and on unclassified components. Default='unclassified'. - rho_elbow_type: :obj:`str` + rho_elbow_type : :obj:`str` The algorithm used to calculate the rho elbow. Current options are: 'kundu' and 'liberal'. Default='kundu'. %(log_extra_info)s @@ -952,28 +957,30 @@ def dec_classification_doesnt_exist( tag=None, ): """ - If there are no components with a classification specified in class_comp_exists, - change the classification of all components in decide_comps. + Change the classification of all components in decide_comps. + + This is done if there are no components with a classification specified in + class_comp_exists. Parameters ---------- %(selector)s - new_classification: :obj:`str` + new_classification : :obj:`str` Assign all components identified in decide_comps the classification in new_classification. %(decide_comps)s - class_comp_exists: :obj:`str` or :obj:`list[str]` or :obj:`int` or :obj:`list[int]` + class_comp_exists : :obj:`str` or :obj:`list[str]` or :obj:`int` or :obj:`list[int]` This has the same structure options as decide_comps. This function tests whether any components in decide_comps have the classifications defined in this variable. - at_least_num_exist: :obj:`int` + at_least_num_exist : :obj:`int` Instead of just testing whether a classification exists, test whether at least this number of components have that classification. Default=1 %(log_extra_info)s %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s - tag: :obj:`str` + tag : :obj:`str` A classification tag to assign to all components being reclassified. This should be one of the tags defined by classification_tags in the decision tree specification. Default="". @@ -1082,12 +1089,12 @@ def dec_reclassify_high_var_comps( tag=None, ): """ - Identifies and reclassifies a couple components with the largest gaps in variance + Identify and reclassify a couple components with the largest gaps in variance. Parameters ---------- %(selector)s - new_classification: :obj:`str` + new_classification : :obj:`str` Assign all components identified in decide_comps the classification in new_classification. %(decide_comps)s @@ -1095,7 +1102,7 @@ def dec_reclassify_high_var_comps( %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s - tag: :obj:`str` + tag : :obj:`str` A classification tag to assign to all components being reclassified. This should be one of the tags defined by classification_tags in the decision tree specification. Default="". @@ -1229,15 +1236,15 @@ def calc_varex_thresh( ---------- %(selector)s %(decide_comps)s - thresh_label: :obj:`str` + thresh_label : :obj:`str` The threshold will be saved in "varex_(thresh_label)_thresh" In the original kundu decision tree this was either "upper" or "lower" If passed an empty string, will be saved as "varex_thresh" - percentile_thresh: :obj:`int` + percentile_thresh : :obj:`int` A percentile threshold to apply to components to set the variance threshold. In the original kundu decision tree this was 90 for varex_upper_thresh and 25 for varex_lower_thresh - num_highest_var_comps: :obj:`str` :obj:`int` + num_highest_var_comps : :obj:`str` :obj:`int` percentile can be calculated on the num_highest_var_comps components with the lowest variance. Either input an integer directly or input a string that is a parameter stored in selector.cross_component_metrics ("num_acc_guess" in @@ -1400,7 +1407,7 @@ def calc_extend_factor( %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s - extend_factor: :obj:`float` + extend_factor : :obj:`float` If a number, then use rather than calculating anything. If None than calculate. default=None @@ -1470,7 +1477,7 @@ def calc_max_good_meanmetricrank( ---------- %(selector)s %(decide_comps)s - metric_suffix: :obj:`str` + metric_suffix : :obj:`str` By default, this will output a value called "max_good_meanmetricrank" If this variable is not None or "" then it will output: "max_good_meanmetricrank_[metric_suffix]". Default=None @@ -1693,7 +1700,7 @@ def calc_revised_meanmetricrank_guesses( ---------- %(selector)s %(decide_comps)s - restrict_factor: :obj:`int` or :obj:`float` + restrict_factor : :obj:`int` or :obj:`float` A scaling factor to scale between num_acc_guess and conservative_guess. Default=2. @@ -1844,12 +1851,12 @@ def calc_revised_meanmetricrank_guesses( outputs["conservative_guess"] = outputs["num_acc_guess"] / outputs["restrict_factor"] tmp_kappa = selector.component_table.loc[comps2use, "kappa"].to_numpy() - tmp_dice_FT2 = selector.component_table.loc[comps2use, "dice_FT2"].to_numpy() + tmp_dice_ft2 = selector.component_table.loc[comps2use, "dice_FT2"].to_numpy() tmp_signal_m_noise_t = selector.component_table.loc[comps2use, "signal-noise_t"].to_numpy() tmp_countnoise = selector.component_table.loc[comps2use, "countnoise"].to_numpy() - tmp_countsigFT2 = selector.component_table.loc[comps2use, "countsigFT2"].to_numpy() + tmp_countsig_ft2 = selector.component_table.loc[comps2use, "countsigFT2"].to_numpy() tmp_d_table_score = generate_decision_table_score( - tmp_kappa, tmp_dice_FT2, tmp_signal_m_noise_t, tmp_countnoise, tmp_countsigFT2 + tmp_kappa, tmp_dice_ft2, tmp_signal_m_noise_t, tmp_countnoise, tmp_countsig_ft2 ) selector.component_table[f"d_table_score_node{selector.current_node_idx}"] = np.NaN selector.component_table.loc[ diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index e5f727311..882496746 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -1,6 +1,4 @@ -""" -Utility functions for tedana.selection -""" +"""Utility functions for tedana.selection.""" import logging @@ -36,7 +34,6 @@ def selectcomps2use(selector, decide_comps): comps2use : :obj:`list[int]` A list of component indices with classifications included in decide_comps """ - if "classification" not in selector.component_table: raise ValueError( "selector.component_table needs a 'classification' column to run selectcomp2suse" @@ -97,8 +94,10 @@ def change_comptable_classifications( dont_warn_reclassify=False, ): """ - Given information on whether a decision critereon is true or false for each - component, change or don't change the component classification + Change or don't change the component classification. + + This happens based on the information on whether a decision critereon is true or + false for each component. Parameters ---------- @@ -286,8 +285,9 @@ def comptable_classification_changer( def clean_dataframe(component_table): """ - Reorder columns in component table so that "classification" - and "classification_tags" are last. + Reorder columns in component table. + + The reordering is done so that "classification" and "classification_tags" are last. Parameters ---------- @@ -317,8 +317,7 @@ def clean_dataframe(component_table): def confirm_metrics_exist(component_table, necessary_metrics, function_name=None): """ - Confirm that all metrics declared in necessary_metrics are - already included in comptable. + Confirm that all metrics declared in necessary_metrics are included in comptable. Parameters ---------- @@ -341,7 +340,7 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None If metrics_exist is False then raise an error and end the program Note - ----- + ---- This doesn't check if there are data in each metric's column, just that the columns exist. Also, the string in `necessary_metrics` and the column labels in component_table will only be matched if they're identical. @@ -373,7 +372,7 @@ def log_decision_tree_step( if_false=None, calc_outputs=None, ): - """Logging text to add after every decision tree calculation + """Log text to add after every decision tree calculation. Parameters ---------- @@ -453,8 +452,8 @@ def log_classification_counts(decision_node_idx, component_table): Returns ------- - The LGR.info logger will add a line like: \ - 'Step 4: Total component classifications: 10 accepted, 5 provisionalreject, 8 rejected' + The LGR.info logger will add a line like : \ + 'Step 4 : Total component classifications: 10 accepted, 5 provisionalreject, 8 rejected' """ classification_labels, label_counts = np.unique( component_table["classification"].values, return_counts=True @@ -471,7 +470,7 @@ def log_classification_counts(decision_node_idx, component_table): # Calculations that are used in decision tree functions ####################################################### def getelbow_cons(arr, return_val=False): - """Elbow using mean/variance method - conservative + """Elbow using mean/variance method - conservative. Parameters ---------- @@ -482,7 +481,7 @@ def getelbow_cons(arr, return_val=False): Returns ------- - :obj:`int` or :obj:`float` + : obj:`int` or :obj:`float` Either the elbow index (if return_val is True) or the values at the elbow index (if return_val is False) """ @@ -533,7 +532,7 @@ def getelbow(arr, return_val=False): Returns ------- - :obj:`int` or :obj:`float` + : obj:`int` or :obj:`float` Either the elbow index (if return_val is True) or the values at the elbow index (if return_val is False) """ @@ -568,8 +567,9 @@ def getelbow(arr, return_val=False): def kappa_elbow_kundu(component_table, n_echos, comps2use=None): """ - Calculate an elbow for kappa using the approach originally in - Prantik Kundu's MEICA v2.5 code + Calculate an elbow for kappa. + + Uses the approach originally in Prantik Kundu's MEICA v2.5 code. Parameters ---------- @@ -612,7 +612,6 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): on kappa values and is used in rho_elbow_kundu_liberal. For several reasons it made more sense to calculate here. """ - # If comps2use is None then set to a list of all component numbers if not comps2use: comps2use = list(range(component_table.shape[0])) @@ -659,9 +658,10 @@ def rho_elbow_kundu_liberal( component_table, n_echos, rho_elbow_type="kundu", comps2use=None, subset_comps2use=-1 ): """ - Calculate an elbow for rho using the approach originally in - Prantik Kundu's MEICA v2.5 code and with a slightly more - liberal threshold + Calculate an elbow for rho. + + Uses the approach originally in Prantik Kundu's MEICA v2.5 code + and with a slightly more liberal threshold. Parameters ---------- @@ -772,7 +772,9 @@ def rho_elbow_kundu_liberal( def get_extend_factor(n_vols=None, extend_factor=None): """ - extend_factor is a scaler used to set a threshold for the d_table_score in + Get the extend_factor for the kundu decision tree. + + Extend_factor is a scaler used to set a threshold for the d_table_score in the kundu decision tree. It is either defined by the number of volumes in the time series or directly diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 682bb5c61..eb8c6d24e 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -1,6 +1,4 @@ -""" -Functions to identify TE-dependent and TE-independent components. -""" +"""Functions to identify TE-dependent and TE-independent components.""" import logging from tedana.metrics import collect @@ -10,7 +8,7 @@ RepLGR = logging.getLogger("REPORT") -def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose=False): +def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): """Classify components based on component table and decision tree type. Parameters diff --git a/tedana/selection/tedpca.py b/tedana/selection/tedpca.py index 5a99fea71..6cd334969 100644 --- a/tedana/selection/tedpca.py +++ b/tedana/selection/tedpca.py @@ -1,6 +1,4 @@ -""" -Functions to identify TE-dependent and TE-independent components. -""" +"""Functions to identify TE-dependent and TE-independent components.""" import logging import numpy as np @@ -122,8 +120,8 @@ def kundu_tedpca(comptable, n_echos, kdaw=10.0, rdaw=1.0, stabilize=False): n_components = comptable.loc[comptable["classification"] == "accepted"].shape[0] LGR.info( - "Selected {0} components with Kappa threshold: {1:.02f}, Rho " - "threshold: {2:.02f}".format(n_components, kappa_thr, rho_thr) + f"Selected {n_components} components with Kappa threshold: {kappa_thr:.02f}, Rho " + f"threshold: {rho_thr:.02f}" ) # Move decision columns to end diff --git a/tedana/stats.py b/tedana/stats.py index 2be30864e..04e8a48dd 100644 --- a/tedana/stats.py +++ b/tedana/stats.py @@ -1,6 +1,4 @@ -""" -Statistical functions -""" +"""Statistical functions.""" import logging import numpy as np @@ -14,7 +12,7 @@ def getfbounds(n_echos): """ - Gets F-statistic boundaries based on number of echos + Get F-statistic boundaries based on number of echos. Parameters ---------- @@ -35,7 +33,7 @@ def getfbounds(n_echos): def computefeats2(data, mmix, mask=None, normalize=True): """ - Converts `data` to component space using `mmix` + Convert `data` to component space using `mmix`. Parameters ---------- @@ -51,69 +49,69 @@ def computefeats2(data, mmix, mask=None, normalize=True): Returns ------- - data_Z : (S x C) :obj:`numpy.ndarray` + data_z : (S x C) :obj:`numpy.ndarray` Data in component space """ if data.ndim != 2: - raise ValueError("Parameter data should be 2d, not {0}d".format(data.ndim)) + raise ValueError(f"Parameter data should be 2d, not {data.ndim}d") elif mmix.ndim not in [2]: - raise ValueError("Parameter mmix should be 2d, not {0}d".format(mmix.ndim)) + raise ValueError(f"Parameter mmix should be 2d, not {mmix.ndim}d") elif (mask is not None) and (mask.ndim != 1): - raise ValueError("Parameter mask should be 1d, not {0}d".format(mask.ndim)) + raise ValueError(f"Parameter mask should be 1d, not {mask.ndim}d") elif (mask is not None) and (data.shape[0] != mask.shape[0]): raise ValueError( - "First dimensions (number of samples) of data ({0}) " - "and mask ({1}) do not match.".format(data.shape[0], mask.shape[0]) + f"First dimensions (number of samples) of data ({data.shape[0]}) " + f"and mask ({mask.shape[0]}) do not match." ) elif data.shape[1] != mmix.shape[0]: raise ValueError( - "Second dimensions (number of volumes) of data ({0}) " - "and mmix ({1}) do not match.".format(data.shape[0], mmix.shape[0]) + f"Second dimensions (number of volumes) of data ({data.shape[0]}) " + f"and mmix ({mmix.shape[0]}) do not match." ) # demean masked data if mask is not None: data = data[mask, ...] # normalize data (subtract mean and divide by standard deviation) in the last dimension - # so that least-squares estimates represent "approximate" correlation values (data_R) + # so that least-squares estimates represent "approximate" correlation values (data_r) # assuming mixing matrix (mmix) values are also normalized data_vn = stats.zscore(data, axis=-1) # get betas of `data`~`mmix` and limit to range [-0.999, 0.999] - data_R = get_coeffs(data_vn, mmix, mask=None) - # Avoid abs(data_R) => 1, otherwise Fisher's transform will return Inf or -Inf - data_R[data_R < -0.999] = -0.999 - data_R[data_R > 0.999] = 0.999 + data_r = get_coeffs(data_vn, mmix, mask=None) + # Avoid abs(data_r) => 1, otherwise Fisher's transform will return Inf or -Inf + data_r[data_r < -0.999] = -0.999 + data_r[data_r > 0.999] = 0.999 # R-to-Z transform - data_Z = np.arctanh(data_R) - if data_Z.ndim == 1: - data_Z = np.atleast_2d(data_Z).T + data_z = np.arctanh(data_r) + if data_z.ndim == 1: + data_z = np.atleast_2d(data_z).T # normalize data (only division by std) if normalize: # subtract mean and dividing by standard deviation - data_Zm = stats.zscore(data_Z, axis=0) + data_zm = stats.zscore(data_z, axis=0) # adding back the mean - data_Z = data_Zm + (data_Z.mean(axis=0, keepdims=True) / data_Z.std(axis=0, keepdims=True)) + data_z = data_zm + (data_z.mean(axis=0, keepdims=True) / data_z.std(axis=0, keepdims=True)) - return data_Z + return data_z -def get_coeffs(data, X, mask=None, add_const=False): +def get_coeffs(data, x, mask=None, add_const=False): """ - Performs least-squares fit of `X` against `data` + Perform least-squares fit of `x` against `data`. Parameters ---------- data : (S [x E] x T) array_like Array where `S` is samples, `E` is echoes, and `T` is time - X : (T [x C]) array_like + x : (T [x C]) array_like Array where `T` is time and `C` is predictor variables mask : (S [x E]) array_like Boolean mask array add_const : bool, optional - Add intercept column to `X` before fitting. Default: False + Add intercept column to `x` before fitting. Default: False Returns ------- @@ -121,39 +119,38 @@ def get_coeffs(data, X, mask=None, add_const=False): Array of `S` sample betas for `C` predictors """ if data.ndim not in [2, 3]: - raise ValueError("Parameter data should be 2d or 3d, not {0}d".format(data.ndim)) - elif X.ndim not in [2]: - raise ValueError("Parameter X should be 2d, not {0}d".format(X.ndim)) - elif data.shape[-1] != X.shape[0]: + raise ValueError(f"Parameter data should be 2d or 3d, not {data.ndim}d") + elif x.ndim not in [2]: + raise ValueError(f"Parameter x should be 2d, not {x.ndim}d") + elif data.shape[-1] != x.shape[0]: raise ValueError( - "Last dimension (dimension {0}) of data ({1}) does not " - "match first dimension of " - "X ({2})".format(data.ndim, data.shape[-1], X.shape[0]) + f"Last dimension (dimension {data.ndim}) of data ({data.shape[-1]}) does not " + f"match first dimension of x ({x.shape[0]})" ) # mask data and flip (time x samples) if mask is not None: if mask.ndim not in [1, 2]: - raise ValueError("Parameter data should be 1d or 2d, not {0}d".format(mask.ndim)) + raise ValueError(f"Parameter data should be 1d or 2d, not {mask.ndim}d") elif data.shape[0] != mask.shape[0]: raise ValueError( - "First dimensions of data ({0}) and mask ({1}) do not " - "match".format(data.shape[0], mask.shape[0]) + f"First dimensions of data ({data.shape[0]}) and " + f"mask ({mask.shape[0]}) do not match" ) mdata = data[mask, :].T else: mdata = data.T - # coerce X to >=2d - X = np.atleast_2d(X) + # coerce x to >=2d + x = np.atleast_2d(x) - if len(X) == 1: - X = X.T + if len(x) == 1: + x = x.T if add_const: # add intercept, if specified - X = np.column_stack([X, np.ones((len(X), 1))]) + x = np.column_stack([x, np.ones((len(x), 1))]) - betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T + betas = np.linalg.lstsq(x, mdata, rcond=None)[0].T if add_const: # drop beta for intercept, if specified betas = betas[:, :-1] diff --git a/tedana/tests/test_bibtex.py b/tedana/tests/test_bibtex.py index 885593ba7..f8418f6b1 100644 --- a/tedana/tests/test_bibtex.py +++ b/tedana/tests/test_bibtex.py @@ -1,4 +1,4 @@ -"""Tests for bibtex""" +"""Tests for bibtex.""" from tedana import bibtex diff --git a/tedana/tests/test_combine.py b/tedana/tests/test_combine.py index 41b21ff07..65e757a9a 100644 --- a/tedana/tests/test_combine.py +++ b/tedana/tests/test_combine.py @@ -1,6 +1,4 @@ -""" -Tests for tedana.combine -""" +"""Tests for tedana.combine.""" import numpy as np @@ -8,9 +6,7 @@ def test__combine_t2s(): - """ - Test tedana.combine._combine_t2s - """ + """Test tedana.combine._combine_t2s.""" np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 data = np.random.random((n_voxels, n_echos, n_trs)) @@ -28,9 +24,7 @@ def test__combine_t2s(): def test__combine_paid(): - """ - Test tedana.combine._combine_paid - """ + """Test tedana.combine._combine_paid.""" np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 data = np.random.random((n_voxels, n_echos, n_trs)) @@ -40,9 +34,7 @@ def test__combine_paid(): def test_make_optcom(): - """ - Test tedana.combine.make_optcom - """ + """Test tedana.combine.make_optcom.""" np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 n_mask = 5 diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 6e86f3114..32277ceb6 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -1,4 +1,4 @@ -"""Tests for the decision tree modularization""" +"""Tests for the decision tree modularization.""" import glob import json import os @@ -17,7 +17,7 @@ def sample_comptable(): - """Retrieves a sample component table""" + """Retrieves a sample component table.""" sample_fname = op.join(THIS_DIR, "data", "sample_comptable.tsv") return pd.read_csv(sample_fname, delimiter="\t") @@ -25,11 +25,11 @@ def sample_comptable(): def dicts_to_test(treechoice): """ - Outputs decision tree dictionaries to use to test tree validation + Outputs decision tree dictionaries to use to test tree validation. Parameters ---------- - treechoice: :obj:`str` One of several labels to select which dict to output + treechoice : :obj:`str` One of several labels to select which dict to output Options are: "valid": A tree that would trigger all warnings, but pass validation "extra_req_param": A tree with an undefined required parameter for a decision node function @@ -40,7 +40,7 @@ def dicts_to_test(treechoice): Returns ------- - tree: :ojb:`dict` A dict that can be input into component_selector.validate_tree + tree : :ojb:`dict` A dict that can be input into component_selector.validate_tree """ # valid_dict is a simple valid dictionary to test @@ -144,7 +144,7 @@ def dicts_to_test(treechoice): # load_config # ----------- def test_load_config_fails(): - """Tests for load_config failure modes""" + """Tests for load_config failure modes.""" # We recast to ValueError in the file not found and directory cases with pytest.raises(ValueError): @@ -159,7 +159,7 @@ def test_load_config_fails(): def test_load_config_succeeds(): - """Tests to make sure load_config succeeds""" + """Tests to make sure load_config succeeds.""" # The minimal tree should have an id of "minimal_decision_tree_test1" tree = component_selector.load_config("minimal") @@ -167,7 +167,7 @@ def test_load_config_succeeds(): def test_minimal(): - """Smoke test for constructor for ComponentSelector using minimal tree""" + """Smoke test for constructor for ComponentSelector using minimal tree.""" xcomp = { "n_echos": 3, } @@ -195,11 +195,12 @@ def test_minimal(): def test_validate_tree_succeeds(): """ Tests to make sure validate_tree suceeds for all default - decision trees in decision trees + decision trees in decision trees. + Tested on all default trees in ./tedana/resources/decision_trees Note: If there is a tree in the default trees directory that is being developed and not yet valid, it's file name should - include 'invalid' as a prefix + include 'invalid' as a prefix. """ default_tree_names = glob.glob( @@ -223,7 +224,7 @@ def test_validate_tree_succeeds(): def test_validate_tree_warnings(): """ Tests to make sure validate_tree triggers all warning conditions - but still succeeds + but still succeeds. """ # A tree that raises all possible warnings in the validator should still be valid @@ -233,8 +234,9 @@ def test_validate_tree_warnings(): def test_validate_tree_fails(): """ Tests to make sure validate_tree fails for invalid trees - Tests ../resources/decision_trees/invalid*.json and - ./data/ComponentSelection/invalid*.json trees + Tests ../resources/decision_trees/invalid*.json and. + + ./data/ComponentSelection/invalid*.json trees. """ # An empty dict should not be valid @@ -261,7 +263,7 @@ def test_validate_tree_fails(): def test_check_null_fails(): - """Tests to trigger check_null missing parameter error""" + """Tests to trigger check_null missing parameter error.""" selector = component_selector.ComponentSelector("minimal", sample_comptable()) selector.tree = dicts_to_test("null_value") @@ -273,7 +275,7 @@ def test_check_null_fails(): def test_check_null_succeeds(): - """Tests check_null finds empty parameter in self""" + """Tests check_null finds empty parameter in self.""" # "left" is missing from the function definition in node # but is found as an initialized cross component metric @@ -293,7 +295,7 @@ def test_check_null_succeeds(): def test_are_only_necessary_metrics_used_warning(): - """Tests a warning that wasn't triggered in other test workflows""" + """Tests a warning that wasn't triggered in other test workflows.""" selector = component_selector.ComponentSelector("minimal", sample_comptable()) @@ -304,7 +306,7 @@ def test_are_only_necessary_metrics_used_warning(): def test_are_all_components_accepted_or_rejected(): - """Tests warnings are triggered in are_all_components_accepted_or_rejected""" + """Tests warnings are triggered in are_all_components_accepted_or_rejected.""" selector = component_selector.ComponentSelector("minimal", sample_comptable()) selector.component_table.loc[7, "classification"] = "intermediate1" @@ -313,7 +315,7 @@ def test_are_all_components_accepted_or_rejected(): def test_selector_properties_smoke(): - """Tests to confirm properties match expected results""" + """Tests to confirm properties match expected results.""" selector = component_selector.ComponentSelector("minimal", sample_comptable()) diff --git a/tedana/tests/test_decay.py b/tedana/tests/test_decay.py index 221fc51d9..4d11d8e90 100644 --- a/tedana/tests/test_decay.py +++ b/tedana/tests/test_decay.py @@ -1,6 +1,4 @@ -""" -Tests for tedana.decay -""" +"""Tests for tedana.decay.""" import os.path as op @@ -16,7 +14,7 @@ @pytest.fixture(scope="module") def testdata1(): tes = np.array([14.5, 38.5, 62.5]) - in_files = [op.join(get_test_data_path(), "echo{0}.nii.gz".format(i + 1)) for i in range(3)] + in_files = [op.join(get_test_data_path(), f"echo{i + 1}.nii.gz") for i in range(3)] data, _ = io.load_data(in_files, n_echos=len(tes)) mask, adaptive_mask = utils.make_adaptive_mask(data, getsum=True) fittype = "loglin" @@ -31,10 +29,8 @@ def testdata1(): def test_fit_decay(testdata1): - """ - fit_decay should return data in (samples,) shape. - """ - t2sv, s0v, t2svG, s0vG = me.fit_decay( + """Fit_decay should return data in (samples,) shape.""" + t2sv, s0v, t2svg, s0vg = me.fit_decay( testdata1["data"], testdata1["tes"], testdata1["mask"], @@ -43,15 +39,13 @@ def test_fit_decay(testdata1): ) assert t2sv.ndim == 1 assert s0v.ndim == 1 - assert t2svG.ndim == 1 - assert s0vG.ndim == 1 + assert t2svg.ndim == 1 + assert s0vg.ndim == 1 def test_fit_decay_ts(testdata1): - """ - fit_decay_ts should return data in samples x time shape. - """ - t2sv, s0v, t2svG, s0vG = me.fit_decay_ts( + """Fit_decay_ts should return data in samples x time shape.""" + t2sv, s0v, t2svg, s0vg = me.fit_decay_ts( testdata1["data"], testdata1["tes"], testdata1["mask"], @@ -60,13 +54,14 @@ def test_fit_decay_ts(testdata1): ) assert t2sv.ndim == 2 assert s0v.ndim == 2 - assert t2svG.ndim == 2 - assert s0vG.ndim == 2 + assert t2svg.ndim == 2 + assert s0vg.ndim == 2 def test__apply_t2s_floor(): """ - _apply_t2s_floor applies a floor to T2* values to prevent a ZeroDivisionError during + _apply_t2s_floor applies a floor to T2* values to prevent a ZeroDivisionError during. + optimal combination. """ n_voxels, n_echos, n_trs = 100, 5, 25 @@ -93,8 +88,10 @@ def test__apply_t2s_floor(): def test_smoke_fit_decay(): """ - test_smoke_fit_decay tests that the function fit_decay returns reasonable + Test_smoke_fit_decay tests that the function fit_decay returns reasonable. + objects with semi-random inputs in the correct format. + A mask with at least some "good" voxels and an adaptive mask where all good voxels have at least two good echoes are generated to ensure that the decay-fitting function has valid voxels on which to run. @@ -119,9 +116,11 @@ def test_smoke_fit_decay(): def test_smoke_fit_decay_curvefit(): """ - test_smoke_fit_decay tests that the function fit_decay returns reasonable - objects with random inputs in the correct format when using the direct - monoexponetial approach + Test_smoke_fit_decay tests that the function fit_decay returns reasonable. + + objects with random inputs in the correct format when using the direct. + + monoexponetial approach. """ n_samples = 100 n_echos = 5 @@ -143,8 +142,9 @@ def test_smoke_fit_decay_curvefit(): def test_smoke_fit_decay_ts(): """ - test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable - objects with random inputs in the correct format + Test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable. + + objects with random inputs in the correct format. """ n_samples = 100 n_echos = 5 @@ -166,9 +166,11 @@ def test_smoke_fit_decay_ts(): def test_smoke_fit_decay_curvefit_ts(): """ - test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable - objects with random inputs in the correct format when using the direct - monoexponetial approach + Test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable. + + objects with random inputs in the correct format when using the direct. + + monoexponetial approach. """ n_samples = 100 n_echos = 5 diff --git a/tedana/tests/test_gscontrol.py b/tedana/tests/test_gscontrol.py index d5be27552..f05f82236 100644 --- a/tedana/tests/test_gscontrol.py +++ b/tedana/tests/test_gscontrol.py @@ -1,6 +1,4 @@ -""" -Tests for tedana.model.fit -""" +"""Tests for tedana.model.fit.""" import os @@ -17,7 +15,8 @@ def test_break_gscontrol_raw(): """ - Ensure that gscontrol_raw fails when input data do not have the right + Ensure that gscontrol_raw fails when input data do not have the right. + shapes. """ n_samples, n_echos, n_vols = 10000, 4, 100 @@ -31,8 +30,7 @@ def test_break_gscontrol_raw(): catd=catd, optcom=optcom, n_echos=n_echos, io_generator=io_generator, dtrank=4 ) assert str(e_info.value) == ( - "First dimensions of catd ({0}) and optcom ({1}) do not " - "match".format(catd.shape[0], optcom.shape[0]) + f"First dimensions of catd ({catd.shape[0]}) and optcom ({optcom.shape[0]}) do not match" ) catd = np.empty((n_samples, n_echos + 1, n_vols)) @@ -41,8 +39,7 @@ def test_break_gscontrol_raw(): catd=catd, optcom=optcom, n_echos=n_echos, io_generator=io_generator, dtrank=4 ) assert str(e_info.value) == ( - "Second dimension of catd ({0}) does not match " - "n_echos ({1})".format(catd.shape[1], n_echos) + f"Second dimension of catd ({catd.shape[1]}) does not match n_echos ({n_echos})" ) catd = np.empty((n_samples, n_echos, n_vols)) @@ -52,7 +49,6 @@ def test_break_gscontrol_raw(): catd=catd, optcom=optcom, n_echos=n_echos, io_generator=io_generator, dtrank=4 ) assert str(e_info.value) == ( - "Third dimension of catd ({0}) does not match " - "second dimension of optcom " - "({1})".format(catd.shape[2], optcom.shape[1]) + f"Third dimension of catd ({catd.shape[2]}) does not match " + f"second dimension of optcom ({optcom.shape[1]})" ) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 09c0bee65..426901436 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -1,6 +1,4 @@ -""" -Integration tests for "real" data -""" +"""Integration tests for "real" data.""" import glob import json @@ -33,7 +31,7 @@ def check_integration_outputs(fname, outpath, n_logs=1): """ - Checks outputs of integration tests + Checks outputs of integration tests. Parameters ---------- @@ -80,7 +78,7 @@ def check_integration_outputs(fname, outpath, n_logs=1): def data_for_testing_info(test_dataset=str): """ - Get the path and download link for each dataset used for testing + Get the path and download link for each dataset used for testing. Also creates the base directories into which the data and output directories are written @@ -136,8 +134,9 @@ def data_for_testing_info(test_dataset=str): def download_test_data(osf_id, test_data_path): """ If current data is not already available, downloads tar.gz data - stored at `https://osf.io/osf_id/download` - and unpacks into `out_path` + stored at `https://osf.io/osf_id/download`. + + and unpacks into `out_path`. Parameters ---------- @@ -279,7 +278,7 @@ def test_integration_five_echo(skip_integration): def test_integration_four_echo(skip_integration): - """Integration test of the full tedana workflow using four-echo test data""" + """Integration test of the full tedana workflow using four-echo test data.""" if skip_integration: pytest.skip("Skipping four-echo integration test") @@ -327,7 +326,7 @@ def test_integration_four_echo(skip_integration): def test_integration_three_echo(skip_integration): - """Integration test of the full tedana workflow using three-echo test data""" + """Integration test of the full tedana workflow using three-echo test data.""" if skip_integration: pytest.skip("Skipping three-echo integration test") @@ -648,7 +647,7 @@ def test_integration_reclassify_index_failures(skip_integration): def test_integration_t2smap(skip_integration): - """Integration test of the full t2smap workflow using five-echo test data""" + """Integration test of the full t2smap workflow using five-echo test data.""" if skip_integration: pytest.skip("Skipping t2smap integration test") test_data_path, osf_id = data_for_testing_info("five-echo") diff --git a/tedana/tests/test_io.py b/tedana/tests/test_io.py index 19062babd..9bdbd27e9 100644 --- a/tedana/tests/test_io.py +++ b/tedana/tests/test_io.py @@ -1,6 +1,4 @@ -""" -Tests for tedana.io -""" +"""Tests for tedana.io.""" import json import os @@ -98,8 +96,9 @@ def test_load_data(): def test_smoke_split_ts(): """ - Ensures that split_ts returns output when fed in with random inputs - Note: classification is ["accepted", "rejected", "ignored"] + Ensures that split_ts returns output when fed in with random inputs. + + Note: classification is ["accepted", "rejected", "ignored"]. """ np.random.seed(0) # seeded because comptable MUST have accepted components n_samples = 100 @@ -124,8 +123,9 @@ def test_smoke_split_ts(): def test_smoke_write_split_ts(): - """ - Ensures that write_split_ts writes out the expected files with random input and tear them down + """Ensures that write_split_ts writes out the expected files with. + + random input and tear them down. """ np.random.seed(0) # at least one accepted and one rejected, thus all files are generated n_samples, n_times, n_components = 64350, 10, 6 @@ -156,8 +156,9 @@ def test_smoke_write_split_ts(): def test_smoke_filewrite(): """ - Ensures that filewrite fails for no known image type, write a known key - in both bids and orig formats + Ensures that filewrite fails for no known image type, write a known key. + + in both bids and orig formats. """ n_samples, _, _ = 64350, 10, 6 data_1d = np.random.random((n_samples)) @@ -178,9 +179,7 @@ def test_smoke_filewrite(): def test_smoke_load_data(): - """ - Ensures that data is loaded when given a random neuroimage - """ + """Ensures that data is loaded when given a random neuroimage.""" data = os.path.join(data_dir, "mask.nii.gz") n_echos = 1 @@ -193,9 +192,7 @@ def test_smoke_load_data(): def test_prep_data_for_json(): - """ - Tests for prep_data_for_json - """ + """Tests for prep_data_for_json.""" # Should reject non-dict entities since that is required for saver with pytest.raises(TypeError): me.prep_data_for_json(1) @@ -222,9 +219,7 @@ def test_prep_data_for_json(): def test_str_to_component_list(): - """ - Tests for converting a string to a component list - """ + """Tests for converting a string to a component list.""" int_list_1 = [1] int_list_2 = [1, 4, 5] test_list_1 = [str(x) for x in int_list_1] @@ -270,10 +265,8 @@ def test_fname_to_component_list(): assert result == [1, 1] -def test_CustomEncoder(): - """ - Test the encoder we use for JSON incompatibilities - """ +def test_custom_encoder(): + """Test the encoder we use for JSON incompatibilities.""" # np int64 test_data = {"data": np.int64(4)} encoded = json.dumps(test_data, cls=me.CustomEncoder) diff --git a/tedana/tests/test_metrics.py b/tedana/tests/test_metrics.py index c3ce575c4..63d10d947 100644 --- a/tedana/tests/test_metrics.py +++ b/tedana/tests/test_metrics.py @@ -14,7 +14,7 @@ def testdata1(): """Data used for tests of the metrics module.""" tes = np.array([14.5, 38.5, 62.5]) - in_files = [op.join(get_test_data_path(), "echo{0}.nii.gz".format(i + 1)) for i in range(3)] + in_files = [op.join(get_test_data_path(), f"echo{i + 1}.nii.gz") for i in range(3)] data_cat, ref_img = io.load_data(in_files, n_echos=len(tes)) _, adaptive_mask = utils.make_adaptive_mask(data_cat, getsum=True) data_optcom = np.mean(data_cat, axis=1) @@ -98,14 +98,14 @@ def test_smoke_calculate_f_maps(): """Smoke test for tedana.metrics.dependence.calculate_f_maps.""" n_voxels, n_echos, n_volumes, n_components = 1000, 5, 100, 50 data_cat = np.random.random((n_voxels, n_echos, n_volumes)) - Z_maps = np.random.normal(size=(n_voxels, n_components)) + z_maps = np.random.normal(size=(n_voxels, n_components)) mixing = np.random.random((n_volumes, n_components)) adaptive_mask = np.random.randint(1, n_echos + 1, size=n_voxels) tes = np.array([15, 25, 35, 45, 55]) - F_T2_maps, F_S0_maps, _, _ = dependence.calculate_f_maps( - data_cat, Z_maps, mixing, adaptive_mask, tes, f_max=500 + f_t2_maps, f_s0_maps, _, _ = dependence.calculate_f_maps( + data_cat, z_maps, mixing, adaptive_mask, tes, f_max=500 ) - assert F_T2_maps.shape == F_S0_maps.shape == (n_voxels, n_components) + assert f_t2_maps.shape == f_s0_maps.shape == (n_voxels, n_components) def test_smoke_calculate_varex(): @@ -138,26 +138,26 @@ def test_smoke_compute_dice(): def test_smoke_compute_signal_minus_noise_z(): """Smoke test for tedana.metrics.dependence.compute_signal_minus_noise_z.""" n_voxels, n_components = 1000, 50 - Z_maps = np.random.normal(size=(n_voxels, n_components)) - Z_clmaps = np.random.randint(0, 2, size=(n_voxels, n_components)) - F_T2_maps = np.random.random((n_voxels, n_components)) + z_maps = np.random.normal(size=(n_voxels, n_components)) + z_clmaps = np.random.randint(0, 2, size=(n_voxels, n_components)) + f_t2_maps = np.random.random((n_voxels, n_components)) ( signal_minus_noise_z, signal_minus_noise_p, - ) = dependence.compute_signal_minus_noise_z(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95) + ) = dependence.compute_signal_minus_noise_z(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95) assert signal_minus_noise_z.shape == signal_minus_noise_p.shape == (n_components,) def test_smoke_compute_signal_minus_noise_t(): """Smoke test for tedana.metrics.dependence.compute_signal_minus_noise_t.""" n_voxels, n_components = 1000, 50 - Z_maps = np.random.normal(size=(n_voxels, n_components)) - Z_clmaps = np.random.randint(0, 2, size=(n_voxels, n_components)) - F_T2_maps = np.random.random((n_voxels, n_components)) + z_maps = np.random.normal(size=(n_voxels, n_components)) + z_clmaps = np.random.randint(0, 2, size=(n_voxels, n_components)) + f_t2_maps = np.random.random((n_voxels, n_components)) ( signal_minus_noise_t, signal_minus_noise_p, - ) = dependence.compute_signal_minus_noise_t(Z_maps, Z_clmaps, F_T2_maps, z_thresh=1.95) + ) = dependence.compute_signal_minus_noise_t(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95) assert signal_minus_noise_t.shape == signal_minus_noise_p.shape == (n_components,) @@ -182,12 +182,12 @@ def test_smoke_generate_decision_table_score(): """Smoke test for tedana.metrics.dependence.generate_decision_table_score.""" n_voxels, n_components = 1000, 50 kappa = np.random.random(n_components) - dice_FT2 = np.random.random(n_components) + dice_ft2 = np.random.random(n_components) signal_minus_noise_t = np.random.normal(size=n_components) countnoise = np.random.randint(0, n_voxels, size=n_components) - countsigFT2 = np.random.randint(0, n_voxels, size=n_components) + countsigft2 = np.random.randint(0, n_voxels, size=n_components) decision_table_score = dependence.generate_decision_table_score( - kappa, dice_FT2, signal_minus_noise_t, countnoise, countsigFT2 + kappa, dice_ft2, signal_minus_noise_t, countnoise, countsigft2 ) assert decision_table_score.shape == (n_components,) @@ -195,8 +195,8 @@ def test_smoke_generate_decision_table_score(): def test_smoke_calculate_dependence_metrics(): """Smoke test for tedana.metrics.dependence.calculate_dependence_metrics.""" n_voxels, n_components = 1000, 50 - F_T2_maps = np.random.random((n_voxels, n_components)) - F_S0_maps = np.random.random((n_voxels, n_components)) - Z_maps = np.random.random((n_voxels, n_components)) - kappas, rhos = dependence.calculate_dependence_metrics(F_T2_maps, F_S0_maps, Z_maps) + f_t2_maps = np.random.random((n_voxels, n_components)) + f_s0_maps = np.random.random((n_voxels, n_components)) + z_maps = np.random.random((n_voxels, n_components)) + kappas, rhos = dependence.calculate_dependence_metrics(f_t2_maps, f_s0_maps, z_maps) assert kappas.shape == rhos.shape == (n_components,) diff --git a/tedana/tests/test_reporting.py b/tedana/tests/test_reporting.py index ede7249d5..91da4f27c 100644 --- a/tedana/tests/test_reporting.py +++ b/tedana/tests/test_reporting.py @@ -1,15 +1,11 @@ -""" -Tests for tedana.reporting -""" +"""Tests for tedana.reporting.""" import numpy as np from tedana import reporting def test_smoke_trim_edge_zeros(): - """ - Ensures that trim_edge_zeros works with random inputs - """ + """Ensures that trim_edge_zeros works with random inputs.""" arr = np.random.random((100, 100)) assert reporting.static_figures._trim_edge_zeros(arr) is not None diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 909af2b21..8044d5bd8 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -10,7 +10,7 @@ def test_manual_classify_smoke(): - """Smoke tests for all options in manual_classify""" + """Smoke tests for all options in manual_classify.""" selector = sample_selector(options="provclass") @@ -64,7 +64,7 @@ def test_manual_classify_smoke(): def test_dec_left_op_right_succeeds(): - """tests for successful calls to dec_left_op_right""" + """Tests for successful calls to dec_left_op_right.""" selector = sample_selector(options="provclass") @@ -223,7 +223,7 @@ def test_dec_left_op_right_succeeds(): def test_dec_left_op_right_fails(): - """tests for calls to dec_left_op_right that raise errors""" + """Tests for calls to dec_left_op_right that raise errors.""" selector = sample_selector(options="provclass") decide_comps = "provisional accept" @@ -366,7 +366,7 @@ def test_dec_left_op_right_fails(): def test_dec_variance_lessthan_thresholds_smoke(): - """Smoke tests for dec_variance_lessthan_thresholds""" + """Smoke tests for dec_variance_lessthan_thresholds.""" selector = sample_selector(options="provclass") decide_comps = "provisional accept" @@ -416,7 +416,7 @@ def test_dec_variance_lessthan_thresholds_smoke(): def test_calc_kappa_elbow(): - """Smoke tests for calc_kappa_elbow""" + """Smoke tests for calc_kappa_elbow.""" selector = sample_selector() decide_comps = "all" @@ -493,7 +493,7 @@ def test_calc_kappa_elbow(): def test_calc_rho_elbow(): - """Smoke tests for calc_rho_elbow""" + """Smoke tests for calc_rho_elbow.""" selector = sample_selector(options="unclass") decide_comps = "all" @@ -602,7 +602,7 @@ def test_calc_rho_elbow(): def test_calc_median_smoke(): - """Smoke tests for calc_median""" + """Smoke tests for calc_median.""" selector = sample_selector() decide_comps = "all" @@ -679,7 +679,7 @@ def test_calc_median_smoke(): def test_dec_classification_doesnt_exist_smoke(): - """Smoke tests for dec_classification_doesnt_exist""" + """Smoke tests for dec_classification_doesnt_exist.""" selector = sample_selector(options="unclass") decide_comps = ["unclassified", "provisional accept"] @@ -765,7 +765,7 @@ def test_dec_classification_doesnt_exist_smoke(): def test_dec_reclassify_high_var_comps(): - """tests for dec_reclassify_high_var_comps""" + """Tests for dec_reclassify_high_var_comps.""" selector = sample_selector(options="unclass") decide_comps = "unclassified" @@ -828,7 +828,7 @@ def test_dec_reclassify_high_var_comps(): def test_calc_varex_thresh_smoke(): - """Smoke tests for calc_varex_thresh""" + """Smoke tests for calc_varex_thresh.""" # Standard use of this function requires some components to be "provisional accept" selector = sample_selector() @@ -1021,7 +1021,7 @@ def test_calc_varex_thresh_smoke(): def test_calc_extend_factor_smoke(): - """Smoke tests for calc_extend_factor""" + """Smoke tests for calc_extend_factor.""" selector = sample_selector() @@ -1062,7 +1062,7 @@ def test_calc_extend_factor_smoke(): def test_calc_max_good_meanmetricrank_smoke(): - """Smoke tests for calc_max_good_meanmetricrank""" + """Smoke tests for calc_max_good_meanmetricrank.""" # Standard use of this function requires some components to be "provisional accept" selector = sample_selector("provclass") @@ -1145,7 +1145,7 @@ def test_calc_max_good_meanmetricrank_smoke(): def test_calc_varex_kappa_ratio_smoke(): - """Smoke tests for calc_varex_kappa_ratio""" + """Smoke tests for calc_varex_kappa_ratio.""" # Standard use of this function requires some components to be "provisional accept" selector = sample_selector("provclass") @@ -1194,7 +1194,7 @@ def test_calc_varex_kappa_ratio_smoke(): def test_calc_revised_meanmetricrank_guesses_smoke(): - """Smoke tests for calc_revised_meanmetricrank_guesses""" + """Smoke tests for calc_revised_meanmetricrank_guesses.""" # Standard use of this function requires some components to be "provisional accept" selector = sample_selector("provclass") diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 2f25baea8..09bec74fe 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -13,7 +13,7 @@ def sample_component_table(options=None): """ - Retrieves a sample component table + Retrieves a sample component table. Options: Different strings will also the contents of the component table 'provclass': Change the classifications to "provisional accept" for 4 components @@ -37,12 +37,11 @@ def sample_component_table(options=None): def sample_selector(options=None): """ Retrieves a sample component table and initializes - a selector using that component table and the minimal tree + a selector using that component table and the minimal tree. options: Different strings will alter the selector 'provclass': Change the classifications to "provisional accept" for 4 components 'unclass': Change 4 classifications to "provisional accept" and the rest to "unclassified" - """ tree = "minimal" @@ -68,8 +67,9 @@ def sample_selector(options=None): def test_selectcomps2use_succeeds(): """ Tests to make sure selectcomps2use runs with full range of inputs. + Include tests to make sure the correct number of components are selected - from the pre-defined sample_comptable.tsv component table + from the pre-defined sample_comptable.tsv component table. """ selector = sample_selector() @@ -96,7 +96,7 @@ def test_selectcomps2use_succeeds(): def test_selectcomps2use_fails(): - """Tests for selectcomps2use failure modes""" + """Tests for selectcomps2use failure modes.""" selector = sample_selector() decide_comps_options = [ @@ -119,9 +119,10 @@ def test_selectcomps2use_fails(): def test_comptable_classification_changer_succeeds(): """ All conditions where comptable_classification_changer should run - Note: This confirms the function runs, but not that outputs are accurate + Note: This confirms the function runs, but not that outputs are accurate. + Also tests conditions where the warning logger is used, but doesn't - check the logger + check the logger. """ def validate_changes(expected_classification): @@ -183,7 +184,7 @@ def validate_changes(expected_classification): def test_change_comptable_classifications_succeeds(): - """All conditions where change_comptable_classifications should run""" + """All conditions where change_comptable_classifications should run.""" selector = sample_selector(options="provclass") @@ -212,7 +213,7 @@ def test_change_comptable_classifications_succeeds(): def test_clean_dataframe_smoke(): - """A smoke test for the clean_dataframe function""" + """A smoke test for the clean_dataframe function.""" component_table = sample_component_table(options="comptable") selection_utils.clean_dataframe(component_table) @@ -223,7 +224,7 @@ def test_clean_dataframe_smoke(): def test_confirm_metrics_exist_succeeds(): - """tests confirm_metrics_exist run with correct inputs""" + """Tests confirm_metrics_exist run with correct inputs.""" component_table = sample_component_table(options="comptable") # Testing for metrics that exist with 1 or 2 necessary metrics in a set @@ -233,7 +234,7 @@ def test_confirm_metrics_exist_succeeds(): def test_confirm_metrics_exist_fails(): - """tests confirm_metrics_exist for failure conditions""" + """Tests confirm_metrics_exist for failure conditions.""" component_table = sample_component_table(options="comptable") @@ -247,7 +248,7 @@ def test_confirm_metrics_exist_fails(): def test_log_decision_tree_step_smoke(): - """A smoke test for log_decision_tree_step""" + """A smoke test for log_decision_tree_step.""" selector = sample_selector() @@ -299,7 +300,7 @@ def test_log_decision_tree_step_smoke(): def test_log_classification_counts_smoke(): - """A smoke test for log_classification_counts""" + """A smoke test for log_classification_counts.""" component_table = sample_component_table(options="comptable") @@ -352,7 +353,7 @@ def test_getelbow_cons_smoke(): def test_kappa_elbow_kundu_smoke(): - """A smoke test for the kappa_elbow_kundu function""" + """A smoke test for the kappa_elbow_kundu function.""" component_table = sample_component_table() @@ -399,7 +400,7 @@ def test_kappa_elbow_kundu_smoke(): def test_rho_elbow_kundu_liberal_smoke(): - """A smoke test for the rho_elbow_kundu_liberal function""" + """A smoke test for the rho_elbow_kundu_liberal function.""" component_table = sample_component_table(options="unclass") # Normal execution with default kundu threshold @@ -466,7 +467,7 @@ def test_rho_elbow_kundu_liberal_smoke(): def test_get_extend_factor_smoke(): - """A smoke test for get_extend_factor""" + """A smoke test for get_extend_factor.""" val = selection_utils.get_extend_factor(extend_factor=int(10)) assert isinstance(val, float) diff --git a/tedana/tests/test_stats.py b/tedana/tests/test_stats.py index 653dadffb..c891c33af 100644 --- a/tedana/tests/test_stats.py +++ b/tedana/tests/test_stats.py @@ -1,6 +1,4 @@ -""" -Tests for the tedana stats module -""" +"""Tests for the tedana stats module.""" import random import numpy as np @@ -10,10 +8,7 @@ def test_break_computefeats2(): - """ - Ensure that computefeats2 fails when input data do not have the right - shapes. - """ + """Ensure that computefeats2 fails when input data do not have the right shapes.""" n_samples, n_vols, n_comps = 10000, 100, 50 data = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) @@ -44,9 +39,7 @@ def test_break_computefeats2(): def test_smoke_computefeats2(): - """ - Ensures that computefeats2 works with random inputs and different optional parameters - """ + """Ensures that computefeats2 works with random inputs and different optional parameters.""" n_samples, n_times, n_components = 100, 20, 6 data = np.random.random((n_samples, n_times)) mmix = np.random.random((n_times, n_components)) @@ -58,71 +51,68 @@ def test_smoke_computefeats2(): def test_get_coeffs(): - """ - Check least squares coefficients. - """ + """Check least squares coefficients.""" # Simulate one voxel with 40 TRs data = np.empty((2, 40)) data[0, :] = np.arange(0, 200, 5) data[1, :] = np.arange(0, 200, 5) - X = np.arange(0, 40)[:, np.newaxis] + x = np.arange(0, 40)[:, np.newaxis] mask = np.array([True, False]) - betas = get_coeffs(data, X, mask=None, add_const=False) + betas = get_coeffs(data, x, mask=None, add_const=False) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5.0, 5.0])) - betas = get_coeffs(data, X, mask=None, add_const=True) + betas = get_coeffs(data, x, mask=None, add_const=True) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5.0, 5.0])) - betas = get_coeffs(data, X, mask=mask, add_const=False) + betas = get_coeffs(data, x, mask=mask, add_const=False) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5, 0])) - betas = get_coeffs(data, X, mask=mask, add_const=True) + betas = get_coeffs(data, x, mask=mask, add_const=True) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5, 0])) def test_break_get_coeffs(): """ - Ensure that get_coeffs fails when input data do not have the right + Ensure that get_coeffs fails when input data do not have the right. + shapes. """ n_samples, n_echos, n_vols, n_comps = 10000, 5, 100, 50 data = np.empty((n_samples, n_vols)) - X = np.empty((n_vols, n_comps)) + x = np.empty((n_vols, n_comps)) mask = np.empty((n_samples)) data = np.empty((n_samples)) with pytest.raises(ValueError): - get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, x, mask, add_const=False) data = np.empty((n_samples, n_vols)) - X = np.empty((n_vols)) + x = np.empty((n_vols)) with pytest.raises(ValueError): - get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, x, mask, add_const=False) data = np.empty((n_samples, n_echos, n_vols + 1)) - X = np.empty((n_vols, n_comps)) + x = np.empty((n_vols, n_comps)) with pytest.raises(ValueError): - get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, x, mask, add_const=False) data = np.empty((n_samples, n_echos, n_vols)) mask = np.empty((n_samples, n_echos, n_vols)) with pytest.raises(ValueError): - get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, x, mask, add_const=False) mask = np.empty((n_samples + 1, n_echos)) with pytest.raises(ValueError): - get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, x, mask, add_const=False) def test_smoke_get_coeffs(): - """ - Ensure that get_coeffs returns outputs with different inputs and optional paramters - """ + """Ensure that get_coeffs returns outputs with different inputs and optional paramters.""" n_samples, _, n_times, n_components = 100, 5, 20, 6 data_2d = np.random.random((n_samples, n_times)) x = np.random.random((n_times, n_components)) @@ -142,9 +132,7 @@ def test_getfbounds(): def test_smoke_getfbounds(): - """ - Ensures that getfbounds returns outputs when fed in a random number of echo - """ + """Ensures that getfbounds returns outputs when fed in a random number of echo.""" n_echos = random.randint(3, 10) # At least two echos! f05, f025, f01 = getfbounds(n_echos) diff --git a/tedana/tests/test_t2smap.py b/tedana/tests/test_t2smap.py index bd8bf36a6..f3c60276f 100644 --- a/tedana/tests/test_t2smap.py +++ b/tedana/tests/test_t2smap.py @@ -1,6 +1,4 @@ -""" -Tests for t2smap. -""" +"""Tests for t2smap.""" import os.path as op from shutil import rmtree @@ -129,9 +127,7 @@ def test_basic_t2smap4(self): assert len(img.shape) == 4 def test_t2smap_cli(self): - """ - Run test_basic_t2smap1, but use the CLI method. - """ + """Run test_basic_t2smap1, but use the CLI method.""" data_dir = get_test_data_path() data = [ op.join(data_dir, "echo1.nii.gz"), diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 9abb221bb..4039f28ad 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -1,6 +1,4 @@ -""" -Tests for tedana.utils -""" +"""Tests for tedana.utils.""" import random from os.path import dirname @@ -14,7 +12,7 @@ rs = np.random.RandomState(1234) datadir = pjoin(dirname(__file__), "data") -fnames = [pjoin(datadir, "echo{}.nii.gz".format(n)) for n in range(1, 4)] +fnames = [pjoin(datadir, f"echo{n}.nii.gz") for n in range(1, 4)] tes = ["14.5", "38.5", "62.5"] @@ -106,9 +104,10 @@ def test_make_adaptive_mask(): def test_smoke_reshape_niimg(): """ - ensure that reshape_niimg returns reasonable objects with random inputs - in the correct format - Note: reshape_niimg could take in 3D or 4D array + Ensure that reshape_niimg returns reasonable objects with random inputs + in the correct format. + + Note: reshape_niimg could take in 3D or 4D array. """ data_3d = np.random.random((100, 5, 20)) data_4d = np.random.random((100, 5, 20, 50)) @@ -125,9 +124,10 @@ def test_smoke_reshape_niimg(): def test_smoke_make_adaptive_mask(): """ - ensure that make_adaptive_mask returns reasonable objects with random inputs - in the correct format - Note: make_adaptive_mask has optional paramters - mask and getsum + Ensure that make_adaptive_mask returns reasonable objects with random inputs + in the correct format. + + Note: make_adaptive_mask has optional paramters - mask and getsum. """ n_samples = 100 n_echos = 5 @@ -142,9 +142,10 @@ def test_smoke_make_adaptive_mask(): def test_smoke_unmask(): """ - ensure that unmask returns reasonable objects with random inputs - in the correct format - Note: unmask could take in 1D or 2D or 3D arrays + Ensure that unmask returns reasonable objects with random inputs + in the correct format. + + Note: unmask could take in 1D or 2D or 3D arrays. """ data_1d = np.random.random((100)) data_2d = np.random.random((100, 5)) @@ -158,9 +159,10 @@ def test_smoke_unmask(): def test_smoke_dice(): """ - ensure that dice returns reasonable objects with random inputs - in the correct format - Note: two arrays must be in the same length + Ensure that dice returns reasonable objects with random inputs + in the correct format. + + Note: two arrays must be in the same length. """ arr1 = np.random.random((100)) arr2 = np.random.random((100)) @@ -170,8 +172,8 @@ def test_smoke_dice(): def test_smoke_andb(): """ - ensure that andb returns reasonable objects with random inputs - in the correct format + Ensure that andb returns reasonable objects with random inputs + in the correct format. """ arr = np.random.random((100, 10)).tolist() # 2D list of "arrays" @@ -180,8 +182,8 @@ def test_smoke_andb(): def test_smoke_get_spectrum(): """ - ensure that get_spectrum returns reasonable objects with random inputs - in the correct format + Ensure that get_spectrum returns reasonable objects with random inputs + in the correct format. """ data = np.random.random((100)) tr = random.random() @@ -193,9 +195,10 @@ def test_smoke_get_spectrum(): def test_smoke_threshold_map(): """ - ensure that threshold_map returns reasonable objects with random inputs - in the correct format - Note: using 3D array as img, some parameters are optional and are all tested + Ensure that threshold_map returns reasonable objects with random inputs + in the correct format. + + Note: using 3D array as img, some parameters are optional and are all tested. """ img = np.random.random((10, 10, 10)) # 3D array must of of size S min_cluster_size = random.randint(1, 100) @@ -214,17 +217,13 @@ def test_smoke_threshold_map(): def test_sec2millisec(): - """ - Ensure that sec2millisec returns 1000x the input values. - """ + """Ensure that sec2millisec returns 1000x the input values.""" assert utils.sec2millisec(5) == 5000 assert utils.sec2millisec(np.array([5])) == np.array([5000]) def test_millisec2sec(): - """ - Ensure that millisec2sec returns 1/1000x the input values. - """ + """Ensure that millisec2sec returns 1/1000x the input values.""" assert utils.millisec2sec(5000) == 5 assert utils.millisec2sec(np.array([5000])) == np.array([5]) diff --git a/tedana/tests/utils.py b/tedana/tests/utils.py index d64011536..5b4a473e3 100644 --- a/tedana/tests/utils.py +++ b/tedana/tests/utils.py @@ -1,13 +1,13 @@ -""" -Utility functions for testing tedana. -""" +"""Utility functions for testing tedana.""" from os.path import abspath, dirname, join, sep def get_test_data_path(): """ - Returns the path to test datasets, terminated with separator. Test-related + Returns the path to test datasets, terminated with separator. + + Test-related data are kept in tests folder in "data". Based on function by Yaroslav Halchenko used in Neurosynth Python package. """ diff --git a/tedana/utils.py b/tedana/utils.py index 715c0d00f..1e2c63256 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -1,6 +1,4 @@ -""" -Utilities for tedana package -""" +"""Utilities for tedana package.""" import logging import os.path as op import platform @@ -41,7 +39,7 @@ def reshape_niimg(data): def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): """ - Makes map of `data` specifying longest echo a voxel can be sampled with + Make map of `data` specifying longest echo a voxel can be sampled with. Parameters ---------- @@ -111,8 +109,8 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): if np.any(masksum[mask] < threshold): n_bad_voxels = np.sum(masksum[mask] < threshold) LGR.warning( - "{0} voxels in user-defined mask do not have good " - "signal. Removing voxels from mask.".format(n_bad_voxels) + f"{n_bad_voxels} voxels in user-defined mask do not have good " + "signal. Removing voxels from mask." ) masksum[masksum < threshold] = 0 mask = masksum.astype(bool) @@ -125,7 +123,7 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): def unmask(data, mask): """ - Unmasks `data` using non-zero entries of `mask` + Unmasks `data` using non-zero entries of `mask`. Parameters ---------- @@ -140,7 +138,6 @@ def unmask(data, mask): out : (S [x E [x T]]) :obj:`numpy.ndarray` Unmasked `data` array """ - out = np.zeros(mask.shape + data.shape[1:], dtype=data.dtype) out[mask] = data return out @@ -148,7 +145,9 @@ def unmask(data, mask): def dice(arr1, arr2, axis=None): """ - Compute Dice's similarity index between two numpy arrays. Arrays will be + Compute Dice's similarity index between two numpy arrays. + + Arrays will be binarized before comparison. This method was first proposed in :footcite:t:`dice1945measures` and @@ -183,7 +182,7 @@ def dice(arr1, arr2, axis=None): raise ValueError("Shape mismatch: arr1 and arr2 must have the same shape.") if axis is not None and axis > (arr1.ndim - 1): - raise ValueError("Axis provided {} not supported by the input arrays.".format(axis)) + raise ValueError(f"Axis provided {axis} not supported by the input arrays.") arr_sum = arr1.sum(axis=axis) + arr2.sum(axis=axis) intersection = np.logical_and(arr1, arr2) @@ -208,7 +207,7 @@ def dice(arr1, arr2, axis=None): def andb(arrs): """ - Sums arrays in `arrs` + Sum arrays in `arrs`. Parameters ---------- @@ -220,7 +219,6 @@ def andb(arrs): result : :obj:`numpy.ndarray` Integer array of summed `arrs` """ - # coerce to integer and ensure all arrays are the same shape arrs = [check_array(arr, dtype=int, ensure_2d=False, allow_nd=True) for arr in arrs] if not np.all([arr1.shape == arr2.shape for arr1 in arrs for arr2 in arrs]): @@ -234,8 +232,9 @@ def andb(arrs): def get_spectrum(data: np.array, tr: float = 1.0): """ - Returns the power spectrum and corresponding frequencies when provided - with a component time course and repitition time. + Return the power spectrum and corresponding frequencies. + + Done when provided with a component time course and repitition time. Parameters ---------- @@ -244,7 +243,6 @@ def get_spectrum(data: np.array, tr: float = 1.0): tr : :obj:`float` Reptition time (TR) of the data """ - # adapted from @dangom power_spectrum = np.abs(np.fft.rfft(data)) ** 2 freqs = np.fft.rfftfreq(power_spectrum.size * 2 - 1, tr) @@ -380,6 +378,19 @@ def millisec2sec(arr): def setup_loggers(logname=None, repname=None, quiet=False, debug=False): + """Set up loggers for tedana. + + Parameters + ---------- + logname : str, optional + Name of log file, by default None + repname : str, optional + Name of report file, by default None + quiet : bool, optional + Whether to suppress logging to console, by default False + debug : bool, optional + Whether to set logging level to debug, by default False + """ # Set up the general logger log_formatter = logging.Formatter( "%(asctime)s\t%(module)s.%(funcName)-12s\t%(levelname)-8s\t%(message)s", @@ -416,6 +427,7 @@ def setup_loggers(logname=None, repname=None, quiet=False, debug=False): def teardown_loggers(): + """Close loggers.""" for local_logger in (RepLGR, LGR): for handler in local_logger.handlers[:]: handler.close() diff --git a/tedana/workflows/__init__.py b/tedana/workflows/__init__.py index 571a2d567..04b284826 100644 --- a/tedana/workflows/__init__.py +++ b/tedana/workflows/__init__.py @@ -1,9 +1,8 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: -from .ica_reclassify import ica_reclassify_workflow -from .t2smap import t2smap_workflow - -# Overrides submodules with their functions. -from .tedana import tedana_workflow +"""Command line interfaces and workflows.""" +from tedana.workflows.ica_reclassify import ica_reclassify_workflow +from tedana.workflows.t2smap import t2smap_workflow +from tedana.workflows.tedana import tedana_workflow __all__ = ["tedana_workflow", "t2smap_workflow", "ica_reclassify_workflow"] diff --git a/tedana/workflows/ica_reclassify.py b/tedana/workflows/ica_reclassify.py index 0a1ebe3e3..e88a63fbc 100644 --- a/tedana/workflows/ica_reclassify.py +++ b/tedana/workflows/ica_reclassify.py @@ -1,6 +1,4 @@ -""" -Run the reclassification workflow for a previous tedana run -""" +"""Run the reclassification workflow for a previous tedana run.""" import argparse import datetime import logging @@ -26,14 +24,12 @@ def _get_parser(): - """ - Parses command line inputs for tedana + """Parse command line inputs for ica_reclassify. Returns ------- parser.parse_args() : argparse dict """ - from tedana import __version__ verstr = f"ica_reclassify v{__version__}" @@ -153,7 +149,7 @@ def _get_parser(): def _main(argv=None): - """ica_reclassify entry point""" + """Run the ica_reclassify workflow.""" reclassify_command = "ica_reclassify " + " ".join(sys.argv[1:]) args = _get_parser().parse_args(argv) @@ -180,17 +176,17 @@ def _main(argv=None): def _parse_manual_list(manual_list): """ - Parse the list of components to accept or reject into a list of integers + Parse the list of components to accept or reject into a list of integers. Parameters ---------- - manual_list: :obj:`str` :obj:`list[str]` or [] or None + manual_list : :obj:`str` :obj:`list[str]` or [] or None String of integers separated by spaces, commas, or tabs A file name for a file that contains integers Returns ------- - manual_nums: :obj:`list[int]` + manual_nums : :obj:`list[int]` A list of integers or an empty list. Note @@ -253,11 +249,11 @@ def ica_reclassify_workflow( Parameters ---------- - registry: :obj:`str` + registry : :obj:`str` The previously run registry as a JSON file. - accept: :obj: `list` + accept : :obj: `list` A list of integer values of components to accept in this workflow. - reject: :obj: `list` + reject : :obj: `list` A list of integer values of components to reject in this workflow. out_dir : :obj:`str`, optional Output directory. @@ -469,8 +465,7 @@ def ica_reclassify_workflow( "series." ) - n_vols = data_oc.shape[3] - img_t_r = io_generator.reference_img.header.get_zooms()[-1] + # img_t_r = io_generator.reference_img.header.get_zooms()[-1] adaptive_mask = utils.reshape_niimg(adaptive_mask) mask_denoise = adaptive_mask >= 1 data_oc = utils.reshape_niimg(data_oc) @@ -486,7 +481,6 @@ def ica_reclassify_workflow( mask=mask_denoise, comptable=comptable, mmix=mmix, - n_vols=n_vols, io_generator=io_generator, ) @@ -568,7 +562,7 @@ def ica_reclassify_workflow( ) LGR.info("Generating dynamic report") - reporting.generate_report(io_generator, tr=img_t_r) + reporting.generate_report(io_generator) io_generator.save_self() LGR.info("Workflow completed") diff --git a/tedana/workflows/parser_utils.py b/tedana/workflows/parser_utils.py index 794a6b14c..889930c4a 100644 --- a/tedana/workflows/parser_utils.py +++ b/tedana/workflows/parser_utils.py @@ -1,14 +1,14 @@ -""" -Functions for parsers. -""" +"""Functions for parsers.""" import argparse import os.path as op def check_tedpca_value(string, is_parser=True): """ - Check if argument is a float in range (0,1), - an int greater than 1 or one of a list of strings. + Check tedpca argument. + + Check if argument is a float in range (0,1), an int greater than 1 or one of a + list of strings. """ valid_options = ("mdl", "aic", "kic", "kundu", "kundu-stabilize") if string in valid_options: @@ -18,7 +18,7 @@ def check_tedpca_value(string, is_parser=True): try: floatarg = float(string) except ValueError: - msg = "Argument to tedpca must be a number or one of: {}".format(", ".join(valid_options)) + msg = f"Argument to tedpca must be a number or one of: {', '.join(valid_options)}" raise error(msg) if floatarg != int(floatarg): @@ -33,10 +33,8 @@ def check_tedpca_value(string, is_parser=True): def is_valid_file(parser, arg): - """ - Check if argument is existing file. - """ + """Check if argument is existing file.""" if not op.isfile(arg) and arg is not None: - parser.error("The file {0} does not exist!".format(arg)) + parser.error(f"The file {arg} does not exist!") return arg diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index 67cf39e57..fa58d628e 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -1,6 +1,4 @@ -""" -Estimate T2 and S0, and optimally combine data across TEs. -""" +"""Estimate T2 and S0, and optimally combine data across TEs.""" import argparse import logging import os @@ -18,8 +16,7 @@ def _get_parser(): - """ - Parses command line inputs for tedana + """Parse command line inputs for t2smap. Returns ------- @@ -233,7 +230,7 @@ def t2smap_workflow( utils.setup_loggers(quiet=quiet, debug=debug) - LGR.info("Using output directory: {}".format(out_dir)) + LGR.info(f"Using output directory: {out_dir}") # ensure tes are in appropriate format tes = [float(te) for te in tes] @@ -243,7 +240,7 @@ def t2smap_workflow( if isinstance(data, str): data = [data] - LGR.info("Loading input data: {}".format([f for f in data])) + LGR.info(f"Loading input data: {[f for f in data]}") catd, ref_img = io.load_data(data, n_echos=n_echos) io_generator = io.OutputGenerator( ref_img, @@ -254,7 +251,7 @@ def t2smap_workflow( make_figures=False, ) n_samp, n_echos, n_vols = catd.shape - LGR.debug("Resulting data shape: {}".format(catd.shape)) + LGR.debug(f"Resulting data shape: {catd.shape}") if mask is None: LGR.info("Computing adaptive mask") @@ -276,15 +273,15 @@ def t2smap_workflow( # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile cap_t2s = stats.scoreatpercentile(t2s_full.flatten(), 99.5, interpolation_method="lower") cap_t2s_sec = utils.millisec2sec(cap_t2s * 10.0) - LGR.debug("Setting cap on T2* map at {:.5f}s".format(cap_t2s_sec)) + LGR.debug(f"Setting cap on T2* map at {cap_t2s_sec:.5f}s") t2s_full[t2s_full > cap_t2s * 10] = cap_t2s LGR.info("Computing optimal combination") # optimally combine data - OCcatd = combine.make_optcom(catd, tes, masksum, t2s=t2s_full, combmode=combmode) + data_oc = combine.make_optcom(catd, tes, masksum, t2s=t2s_full, combmode=combmode) # clean up numerical errors - for arr in (OCcatd, s0_full, t2s_full): + for arr in (data_oc, s0_full, t2s_full): np.nan_to_num(arr, copy=False) s0_full[s0_full < 0] = 0 @@ -303,7 +300,7 @@ def t2smap_workflow( s0_limited, "limited s0 img", ) - io_generator.save_file(OCcatd, "combined img") + io_generator.save_file(data_oc, "combined img") # Write out BIDS-compatible description file derivative_metadata = { @@ -330,7 +327,7 @@ def t2smap_workflow( def _main(argv=None): - """T2smap entry point""" + """Run the t2smap workflow.""" options = _get_parser().parse_args(argv) kwargs = vars(options) n_threads = kwargs.pop("n_threads") diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 15cda7234..df59b871e 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -1,6 +1,4 @@ -""" -Run the "canonical" TE-Dependent ANAlysis workflow. -""" +"""Run the "canonical" TE-Dependent ANAlysis workflow.""" import argparse import datetime import json @@ -38,8 +36,7 @@ def _get_parser(): - """ - Parses command line inputs for tedana + """Parse command line inputs for tedana. Returns ------- @@ -47,7 +44,7 @@ def _get_parser(): """ from tedana import __version__ - verstr = "tedana v{}".format(__version__) + verstr = f"tedana v{__version__}" parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Argument parser follow template provided by RalphyZ # https://stackoverflow.com/a/43456577 @@ -341,8 +338,7 @@ def tedana_workflow( mixm=None, tedana_command=None, ): - """ - Run the "canonical" TE-Dependent ANAlysis workflow. + """Run the "canonical" TE-Dependent ANAlysis workflow. Please remember to cite :footcite:t:`dupre2021te`. @@ -445,7 +441,6 @@ def tedana_workflow( ---------- .. footbibliography:: """ - out_dir = op.abspath(out_dir) if not op.isdir(out_dir): os.mkdir(out_dir) @@ -484,7 +479,7 @@ def tedana_workflow( variables = variables.split(", tedana_command")[0] tedana_command = f"tedana_workflow({variables})" - LGR.info("Using output directory: {}".format(out_dir)) + LGR.info(f"Using output directory: {out_dir}") # ensure tes are in appropriate format tes = [float(te) for te in tes] @@ -502,7 +497,7 @@ def tedana_workflow( if isinstance(data, str): data = [data] - LGR.info("Loading input data: {}".format([f for f in data])) + LGR.info(f"Loading input data: {[f for f in data]}") catd, ref_img = io.load_data(data, n_echos=n_echos) io_generator = io.OutputGenerator( @@ -525,7 +520,7 @@ def tedana_workflow( info_dict["Command"] = tedana_command n_samp, n_echos, n_vols = catd.shape - LGR.debug("Resulting data shape: {}".format(catd.shape)) + LGR.debug(f"Resulting data shape: {catd.shape}") # check if TR is 0 img_t_r = io_generator.reference_img.header.get_zooms()[-1] @@ -597,7 +592,7 @@ def tedana_workflow( getsum=True, threshold=1, ) - LGR.debug("Retaining {}/{} samples for denoising".format(mask_denoise.sum(), n_samp)) + LGR.debug(f"Retaining {mask_denoise.sum()}/{n_samp} samples for denoising") io_generator.save_file(masksum_denoise, "adaptive mask img") # Create an adaptive mask with at least 3 good echoes, for classification @@ -611,7 +606,7 @@ def tedana_workflow( "(restricted to voxels with good data in at least the first three echoes) was used for " "the component classification procedure." ) - LGR.debug("Retaining {}/{} samples for classification".format(mask_clf.sum(), n_samp)) + LGR.debug(f"Retaining {mask_clf.sum()}/{n_samp} samples for classification") if t2smap is None: LGR.info("Computing T2* map") @@ -622,7 +617,7 @@ def tedana_workflow( # set a hard cap for the T2* map # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile cap_t2s = stats.scoreatpercentile(t2s_full.flatten(), 99.5, interpolation_method="lower") - LGR.debug("Setting cap on T2* map at {:.5f}s".format(utils.millisec2sec(cap_t2s))) + LGR.debug(f"Setting cap on T2* map at {utils.millisec2sec(cap_t2s):.5f}s") t2s_full[t2s_full > cap_t2s * 10] = cap_t2s io_generator.save_file(utils.millisec2sec(t2s_full), "t2star img") io_generator.save_file(s0_full, "s0 img") @@ -639,23 +634,20 @@ def tedana_workflow( catd, data_oc = gsc.gscontrol_raw(catd, data_oc, n_echos, io_generator) fout = io_generator.save_file(data_oc, "combined img") - LGR.info("Writing optimally combined data set: {}".format(fout)) + LGR.info(f"Writing optimally combined data set: {fout}") if mixm is None: # Identify and remove thermal noise from data dd, n_components = decomposition.tedpca( catd, data_oc, - combmode, mask_clf, masksum_clf, - t2s_full, io_generator, tes=tes, algorithm=tedpca, kdaw=10.0, rdaw=1.0, - verbose=verbose, low_mem=low_mem, ) if verbose: @@ -817,7 +809,6 @@ def tedana_workflow( mask=mask_denoise, comptable=comptable, mmix=mmix, - n_vols=n_vols, io_generator=io_generator, ) @@ -910,14 +901,14 @@ def tedana_workflow( ) LGR.info("Generating dynamic report") - reporting.generate_report(io_generator, tr=img_t_r) + reporting.generate_report(io_generator) LGR.info("Workflow completed") utils.teardown_loggers() def _main(argv=None): - """Tedana entry point""" + """Run the tedana workflow.""" tedana_command = "tedana " + " ".join(sys.argv[1:]) options = _get_parser().parse_args(argv) kwargs = vars(options)