Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improved functions to get leadfields and LCMV weights #374

Merged
merged 4 commits into from
Jan 10, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
182 changes: 74 additions & 108 deletions osl_ephys/source_recon/beamforming.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,151 +521,121 @@ def transform_recon_timeseries(
return recon_timeseries_out, reference_brain_resampled, coords_out, recon_indices


def get_leadfields(
def get_lcmv_weights(
subjects_dir,
subject,
spatial_resolution=None,
reference_brain="mni",
orientation="max-dim",
verbose=None,
):
"""Spatially resamples a (nsensors x ndipoles) array of leadfields (in head/polhemus space) to dipoles on the brain grid of the specified reference brain.
"""Get LCMV beamformer weights.

Parameters
----------
subjects_dir : str
Directory to find subject directories in.
subject : str
Subject name.
leadfield : numpy.ndarray
(nsensors, ndipoles) containing the lead field of each dipole (in head (polhemus) space). Assumes that the dipoles are the same (and in the same order)
as those in the forward model, rhino_files['fwd_model']. Typically derive from the VolSourceEstimate's output by MNE source recon methods,
e.g. mne.beamformer.apply_lcmv, obtained using a forward model generated by RHINO.
spatial_resolution : int
Resolution to use for the reference brain in mm (must be an integer, or will be cast to nearest int). If None, then the gridstep used in rhino_files['fwd_model'] is used.
reference_brain : str
'mni' indicates that the reference_brain is the stdbrain in MNI space.
'mri' indicates that the reference_brain is the subject's sMRI in the scaled native/mri space.
'unscaled_mri' indicates that the reference_brain is the subject's sMRI in unscaled native/mri space.
Note that Scaled/unscaled relates to the allow_smri_scaling option in coreg. If allow_scaling was False, then the unscaled MRI will be the same as the scaled MRI.
orientation : str
How should we reduce the 3 leadfield dimensions into a scaler? Options:
'l2-norm' takes the L2 norm across the xyz dimensions.
'max-dim' takes the leadfield with the highest value across the xyz dimensions.
'max-power' projects the leadfield onto the eigenvector with the smallest eigenvalue, this is equivalent to the maximum power orientation.
verbose : bool
If True, print out more information.

Returns
-------
leadfield_out : numpy.ndarray
(nsensors, ndipoles) np.array of lead fields resampled on the reference brain grid.
coords_out : (3, ndipoles) numpy.ndarray
Array of coordinates (in mm) of dipoles in leadfield_out in "reference_brain" space.
weights : (ndipoles, nsensors) numpy.ndarray
Beamformer weights resampled on the reference brain grid.
coords : (3, ndipoles) numpy.ndarray
Array of coordinates (in mm) of dipoles in weights in "reference_brain" space.
"""

rhino_files = rhino_utils.get_rhino_files(subjects_dir, subject)
surfaces_filenames = rhino_files["surf"]
coreg_filenames = rhino_files["coreg"]

# ------------------
# Load forward model

# Load forward model and LCMV beamformer
fwd = read_forward_solution(rhino_files["fwd_model"], verbose=verbose)
filters = load_lcmv(subjects_dir, subject)

# ---------------------------------------------
# Get hold of coords of points reconstructed to

# MNE forward model is done in head space in metres
# RHINO does everything in mm
vs = fwd["src"][0]
recon_coords_head = vs["rr"][vs["vertno"]] * 1000 # in mm

# ----------------------
# Get spatial resolution

if spatial_resolution is None:
# Estimate gridstep from forward model
rr = fwd["src"][0]["rr"]

store = []
for ii in range(rr.shape[0]):
store.append(np.sqrt(np.sum(np.square(rr[ii, :] - rr[0, :]))))
store = np.asarray(store)
spatial_resolution = int(np.round(np.min(store[np.where(store > 0)]) * 1000))

spatial_resolution = int(spatial_resolution)

# ----------------
# Define reference

if reference_brain == "mni":
# Reference is mni stdbrain

# Convert recon_coords_head from head to mni space, head_mri_t_file xform is to unscaled MRI
head_mri_t = rhino_utils.read_trans(coreg_filenames["head_mri_t_file"])
recon_coords_mri = rhino_utils.xform_points(head_mri_t["trans"], recon_coords_head.T).T

# mni_mri_t_file xform is to unscaled MRI
mni_mri_t = rhino_utils.read_trans(surfaces_filenames["mni_mri_t_file"])
recon_coords_out = rhino_utils.xform_points(np.linalg.inv(mni_mri_t["trans"]), recon_coords_mri.T).T

reference_brain = os.environ["FSLDIR"] + "/data/standard/MNI152_T1_1mm_brain.nii.gz"

# Sample reference_brain to the desired resolution
reference_brain_resampled = op.join(coreg_filenames["basedir"], "MNI152_T1_{}mm_brain.nii.gz".format(spatial_resolution))

elif reference_brain == "unscaled_mri":
# Reference is unscaled smri

# Convert recon_coords_head from head to mri space
head_mri_t = rhino_utils.read_trans(coreg_filenames["head_mri_t_file"])
recon_coords_out = rhino_utils.xform_points(head_mri_t["trans"], recon_coords_head.T).T

reference_brain = surfaces_filenames["smri_file"]

# Sample reference_brain to the desired resolution
reference_brain_resampled = reference_brain.replace(".nii.gz", "_{}mm.nii.gz".format(spatial_resolution))

elif reference_brain == "mri":
# Reference is scaled smri
# Weights in head space
weights = filters["weights"]

# Convert recon_coords_head from head to mri space
head_scaledmri_t = rhino_utils.read_trans(coreg_filenames["head_scaledmri_t_file"])
recon_coords_out = rhino_utils.xform_points(head_scaledmri_t["trans"], recon_coords_head.T).T
# Account for whether the sensor data was being whitened
whitener = filters["whitener"]
if whitener is not None:
weights = weights.dot(whitener)

reference_brain = coreg_filenames["smri_file"]
# Transform to a different coordinate space
weights, _, coords, _ = transform_recon_timeseries(
subjects_dir=subjects_dir,
subject=subject,
recon_timeseries=weights,
spatial_resolution=spatial_resolution,
reference_brain=reference_brain,
)

# Sample reference_brain to the desired resolution
reference_brain_resampled = reference_brain.replace(".nii.gz", "_{}mm.nii.gz".format(spatial_resolution))
return weights, coords

else:
ValueError("Invalid out_space, should be mni or mri or scaledmri")

# ---------------------------------------------------------------------
# Get coordinates from reference brain at resolution spatial_resolution
def get_leadfields(
subjects_dir,
subject,
spatial_resolution=None,
reference_brain="mni",
orientation="max-dim",
verbose=None,
):
"""Get leadfields from a forward model.

# Create std brain of the required resolution
rhino_utils.system_call("flirt -in {} -ref {} -out {} -applyisoxfm {}".format(reference_brain, reference_brain, reference_brain_resampled, spatial_resolution))
Parameters
----------
subjects_dir : str
Directory to find subject directories in.
subject : str
Subject name.
spatial_resolution : int
Resolution to use for the reference brain in mm (must be an integer, or will be cast to nearest int). If None, then the gridstep used in rhino_files['fwd_model'] is used.
reference_brain : str
'mni' indicates that the reference_brain is the stdbrain in MNI space.
'mri' indicates that the reference_brain is the subject's sMRI in the scaled native/mri space.
'unscaled_mri' indicates that the reference_brain is the subject's sMRI in unscaled native/mri space.
Note that Scaled/unscaled relates to the allow_smri_scaling option in coreg. If allow_scaling was False, then the unscaled MRI will be the same as the scaled MRI.
orientation : str
How should we reduce the 3 leadfield dimensions into a scaler? Options:
'l2-norm' takes the L2 norm across the xyz dimensions.
'max-dim' takes the leadfield with the highest value across the xyz dimensions.
'max-power' projects the leadfield onto the eigenvector with the smallest eigenvalue, this is equivalent to the maximum power orientation.
verbose : bool
If True, print out more information.

coords_out, _ = rhino_utils.niimask2mmpointcloud(reference_brain_resampled)
Returns
-------
leadfield : (nsensors, ndipoles) numpy.ndarray
Lead fields resampled on the reference brain grid.
coords : (3, ndipoles) numpy.ndarray
Array of coordinates (in mm) of dipoles in leadfield_out in "reference_brain" space.
"""
rhino_files = rhino_utils.get_rhino_files(subjects_dir, subject)

# ------------------------
# Leadfields in head space

# Load the leadfields
fwd = read_forward_solution(rhino_files["fwd_model"], verbose=verbose)
L = fwd['sol']['data']
L = L.reshape((L.shape[0], -1, 3))

# Reduce the 3 dimensions (x,y,z) to a scalar
if orientation == "l2-norm":
leadfield = np.linalg.norm(L, axis=-1)
leadfields = np.linalg.norm(L, axis=-1)

elif orientation == "max-dim":
L_max = np.max(L, axis=-1)
L_min = np.min(L, axis=-1)
L_max[abs(L_min) > L_max] = L_min[abs(L_min) > L_max]
leadfield = L_max
leadfields = L_max

elif orientation == "max-power":
# Get the orientation for maximum power as the eigenvector with the smallest eigenvalue
Expand All @@ -681,25 +651,21 @@ def get_leadfields(
max_power_ori *= signs

# Leadfield for max power orientation
leadfield = np.sum(L * max_power_ori[None, ...], axis=-1)
leadfields = np.sum(L * max_power_ori[None, ...], axis=-1)

else:
raise ValueError("orientation should be 'l2-norm', 'max' or 'max-power'.")

# --------------------------------------------------------------
# For each mni_coords_out find nearest coord in recon_coords_out

leadfield_out = np.zeros((leadfield.shape[0], coords_out.shape[1]))
recon_indices = np.zeros([coords_out.shape[1]])

for cc in range(coords_out.shape[1]):
recon_index, dist = rhino_utils.closest_node(coords_out[:, cc], recon_coords_out)

if dist < spatial_resolution:
leadfield_out[:, cc] = leadfield[:, recon_index]
recon_indices[cc] = recon_index
# Transform to a different coordinate space
leadfields, _, coords, _ = transform_recon_timeseries(
subjects_dir=subjects_dir,
subject=subject,
recon_timeseries=leadfields.T, # pretend sensor dimension is time
spatial_resolution=spatial_resolution,
reference_brain=reference_brain,
)

return leadfield_out, coords_out
return leadfields.T, coords


@verbose
Expand Down
Loading