From 07d05a5ad4dfb79e425cf6c4a9d29511ba61169d Mon Sep 17 00:00:00 2001 From: NicolasGensollen Date: Thu, 11 Jul 2024 14:45:53 +0200 Subject: [PATCH] move PETSurface pipeline to new pet module --- clinica/pipelines/__init__.py | 1 - clinica/pipelines/pet/__init__.py | 2 +- clinica/pipelines/pet/surface/__init__.py | 1 + .../surface}/applyInverseDeformationField.m | 0 .../pet_surface_cli.py => pet/surface/cli.py} | 2 +- .../{pet_surface => pet/surface}/info.json | 2 +- .../surface/longitudinal_cli.py} | 2 +- .../surface/pipeline.py} | 82 +- clinica/pipelines/pet/surface/tasks.py | 256 +++ clinica/pipelines/pet/surface/utils.py | 1243 ++++++++++++++ clinica/pipelines/pet/surface/workflows.py | 512 ++++++ clinica/pipelines/pet_surface/__init__.py | 1 - .../pet_surface/pet_surface_utils.py | 1435 ----------------- clinica/pipelines/utils.py | 100 ++ clinica/utils/exceptions.py | 8 + clinica/utils/filemanip.py | 89 + .../test_instantiate_all_pipelines.py | 4 +- .../pipelines/pet/test_pet_surface.py | 4 +- .../pipelines/pet/test_pet_surface_utils.py | 752 +++++++++ test/unittests/utils/test_filemanip.py | 38 + 20 files changed, 3052 insertions(+), 1482 deletions(-) create mode 100644 clinica/pipelines/pet/surface/__init__.py rename clinica/pipelines/{pet_surface => pet/surface}/applyInverseDeformationField.m (100%) rename clinica/pipelines/{pet_surface/pet_surface_cli.py => pet/surface/cli.py} (98%) rename clinica/pipelines/{pet_surface => pet/surface}/info.json (99%) rename clinica/pipelines/{pet_surface/pet_surface_longitudinal_cli.py => pet/surface/longitudinal_cli.py} (98%) rename clinica/pipelines/{pet_surface/pet_surface_pipeline.py => pet/surface/pipeline.py} (85%) create mode 100644 clinica/pipelines/pet/surface/tasks.py create mode 100644 clinica/pipelines/pet/surface/utils.py create mode 100644 clinica/pipelines/pet/surface/workflows.py delete mode 100644 clinica/pipelines/pet_surface/__init__.py delete mode 100644 clinica/pipelines/pet_surface/pet_surface_utils.py create mode 100644 clinica/pipelines/utils.py create mode 100644 test/unittests/pipelines/pet/test_pet_surface_utils.py diff --git a/clinica/pipelines/__init__.py b/clinica/pipelines/__init__.py index 4947bddc6..70aed9dcf 100644 --- a/clinica/pipelines/__init__.py +++ b/clinica/pipelines/__init__.py @@ -5,7 +5,6 @@ machine_learning, machine_learning_spatial_svm, pet, - pet_surface, statistics_surface, statistics_volume, statistics_volume_correction, diff --git a/clinica/pipelines/pet/__init__.py b/clinica/pipelines/pet/__init__.py index 20c0b85b8..30a739634 100644 --- a/clinica/pipelines/pet/__init__.py +++ b/clinica/pipelines/pet/__init__.py @@ -1 +1 @@ -from . import linear, volume +from . import linear, surface, volume diff --git a/clinica/pipelines/pet/surface/__init__.py b/clinica/pipelines/pet/surface/__init__.py new file mode 100644 index 000000000..a2f24cc96 --- /dev/null +++ b/clinica/pipelines/pet/surface/__init__.py @@ -0,0 +1 @@ +from . import cli, longitudinal_cli diff --git a/clinica/pipelines/pet_surface/applyInverseDeformationField.m b/clinica/pipelines/pet/surface/applyInverseDeformationField.m similarity index 100% rename from clinica/pipelines/pet_surface/applyInverseDeformationField.m rename to clinica/pipelines/pet/surface/applyInverseDeformationField.m diff --git a/clinica/pipelines/pet_surface/pet_surface_cli.py b/clinica/pipelines/pet/surface/cli.py similarity index 98% rename from clinica/pipelines/pet_surface/pet_surface_cli.py rename to clinica/pipelines/pet/surface/cli.py index 76949b315..6cd1c8c9a 100644 --- a/clinica/pipelines/pet_surface/pet_surface_cli.py +++ b/clinica/pipelines/pet/surface/cli.py @@ -53,7 +53,7 @@ def cli( from clinica.utils.ux import print_end_pipeline - from .pet_surface_pipeline import PetSurface + from .pipeline import PetSurface parameters = { "acq_label": acq_label, diff --git a/clinica/pipelines/pet_surface/info.json b/clinica/pipelines/pet/surface/info.json similarity index 99% rename from clinica/pipelines/pet_surface/info.json rename to clinica/pipelines/pet/surface/info.json index d5881daa9..35af46e6c 100644 --- a/clinica/pipelines/pet_surface/info.json +++ b/clinica/pipelines/pet/surface/info.json @@ -24,4 +24,4 @@ "version": ">=12" } ] -} \ No newline at end of file +} diff --git a/clinica/pipelines/pet_surface/pet_surface_longitudinal_cli.py b/clinica/pipelines/pet/surface/longitudinal_cli.py similarity index 98% rename from clinica/pipelines/pet_surface/pet_surface_longitudinal_cli.py rename to clinica/pipelines/pet/surface/longitudinal_cli.py index 8bc24019e..edee2e63e 100644 --- a/clinica/pipelines/pet_surface/pet_surface_longitudinal_cli.py +++ b/clinica/pipelines/pet/surface/longitudinal_cli.py @@ -51,7 +51,7 @@ def cli( from clinica.utils.ux import print_end_pipeline - from .pet_surface_pipeline import PetSurface + from .pipeline import PetSurface parameters = { "acq_label": acq_label, diff --git a/clinica/pipelines/pet_surface/pet_surface_pipeline.py b/clinica/pipelines/pet/surface/pipeline.py similarity index 85% rename from clinica/pipelines/pet_surface/pet_surface_pipeline.py rename to clinica/pipelines/pet/surface/pipeline.py index 71e9292b0..9adb39749 100644 --- a/clinica/pipelines/pet_surface/pet_surface_pipeline.py +++ b/clinica/pipelines/pet/surface/pipeline.py @@ -189,21 +189,24 @@ def _build_input_node_longitudinal(self): self.parameters["acq_label"], skip_question=self.parameters["skip_question"], ) - - # fmt: off self.connect( [ - (read_parameters_node, self.input_node, [("pet", "pet"), - ("orig_nu", "orig_nu"), - ("white_surface_left", "white_surface_left"), - ("white_surface_right", "white_surface_right"), - ("destrieux_left", "destrieux_left"), - ("destrieux_right", "destrieux_right"), - ("desikan_left", "desikan_left"), - ("desikan_right", "desikan_right")]) + ( + read_parameters_node, + self.input_node, + [ + ("pet", "pet"), + ("orig_nu", "orig_nu"), + ("white_surface_left", "white_surface_left"), + ("white_surface_right", "white_surface_right"), + ("destrieux_left", "destrieux_left"), + ("destrieux_right", "destrieux_right"), + ("desikan_left", "desikan_left"), + ("desikan_right", "desikan_right"), + ], + ) ] ) - # fmt: on def _build_input_node_cross_sectional(self): import nipype.interfaces.utility as nutil @@ -320,21 +323,24 @@ def _build_input_node_cross_sectional(self): self.parameters["acq_label"], skip_question=self.parameters["skip_question"], ) - - # fmt: off self.connect( [ - (read_parameters_node, self.input_node, [("pet", "pet"), - ("orig_nu", "orig_nu"), - ("white_surface_left", "white_surface_left"), - ("white_surface_right", "white_surface_right"), - ("destrieux_left", "destrieux_left"), - ("destrieux_right", "destrieux_right"), - ("desikan_left", "desikan_left"), - ("desikan_right", "desikan_right")]) + ( + read_parameters_node, + self.input_node, + [ + ("pet", "pet"), + ("orig_nu", "orig_nu"), + ("white_surface_left", "white_surface_left"), + ("white_surface_right", "white_surface_right"), + ("destrieux_left", "destrieux_left"), + ("destrieux_right", "destrieux_right"), + ("desikan_left", "desikan_left"), + ("desikan_right", "desikan_right"), + ], + ) ] ) - # fmt: on def _build_output_node(self): """Build and connect an output node to the pipeline.""" @@ -355,7 +361,7 @@ def _build_core_nodes(self): import nipype.interfaces.utility as niu import nipype.pipeline.engine as npe - import clinica.pipelines.pet_surface.pet_surface_utils as utils + from clinica.pipelines.pet.surface.workflows import get_wf full_pipe = npe.MapNode( niu.Function( @@ -380,7 +386,7 @@ def _build_core_nodes(self): "is_longitudinal", ], output_names=[], - function=utils.get_wf, + function=get_wf, ), name="full_pipeline_mapnode", iterfield=[ @@ -413,6 +419,7 @@ def _build_core_nodes(self): os.path.dirname(os.path.realpath(__file__)), "..", "..", + "..", "resources", "label_conversion_gtmsegmentation.csv", ) @@ -421,20 +428,21 @@ def _build_core_nodes(self): os.path.dirname(os.path.realpath(__file__)) ) full_pipe.inputs.is_longitudinal = self.parameters["longitudinal"] - - # Connection - # ========== - # fmt: off self.connect( [ - (self.input_node, full_pipe, [("pet", "pet"), - ("white_surface_left", "white_surface_left"), - ("white_surface_right", "white_surface_right"), - ("orig_nu", "orig_nu"), - ("destrieux_left", "destrieux_left"), - ("destrieux_right", "destrieux_right"), - ("desikan_left", "desikan_left"), - ("desikan_right", "desikan_right")]) + ( + self.input_node, + full_pipe, + [ + ("pet", "pet"), + ("white_surface_left", "white_surface_left"), + ("white_surface_right", "white_surface_right"), + ("orig_nu", "orig_nu"), + ("destrieux_left", "destrieux_left"), + ("destrieux_right", "destrieux_right"), + ("desikan_left", "desikan_left"), + ("desikan_right", "desikan_right"), + ], + ) ] ) - # fmt: off diff --git a/clinica/pipelines/pet/surface/tasks.py b/clinica/pipelines/pet/surface/tasks.py new file mode 100644 index 000000000..b18967dfe --- /dev/null +++ b/clinica/pipelines/pet/surface/tasks.py @@ -0,0 +1,256 @@ +def remove_nan_from_image_task(image_path: str) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import remove_nan_from_image + + return str(remove_nan_from_image(Path(image_path))) + + +def perform_gtmseg_task( + caps_dir: str, subject_id: str, session_id: str, is_longitudinal: bool +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import perform_gtmseg + + return str(perform_gtmseg(Path(caps_dir), subject_id, session_id, is_longitudinal)) + + +def make_label_conversion_task(gtmseg_file: str, csv_file: str) -> list: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import make_label_conversion + + return [str(p) for p in make_label_conversion(Path(gtmseg_file), Path(csv_file))] + + +def run_apply_inverse_deformation_field_task( + target_image: str, + deformation_field: str, + image: str, + matscript_folder: str, +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import run_apply_inverse_deformation_field + + return str( + run_apply_inverse_deformation_field( + Path(target_image), + Path(deformation_field), + Path(image), + Path(matscript_folder), + ) + ) + + +def run_apply_inverse_deformation_field_spm_standalone_task( + target_image: str, + deformation_field: str, + image: str, + matscript_folder: str, +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import ( + run_apply_inverse_deformation_field_spm_standalone, + ) + + return str( + run_apply_inverse_deformation_field_spm_standalone( + Path(target_image), + Path(deformation_field), + Path(image), + Path(matscript_folder), + ) + ) + + +def normalize_suvr_task(pet_image: str, mask: str, output_dir=None) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import normalize_suvr + + if output_dir: + output_dir = Path(output_dir) + return str(normalize_suvr(Path(pet_image), Path(mask), output_dir)) + + +def reformat_surfname_task( + hemisphere: str, left_surface: str, right_surface: str +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import reformat_surfname + from clinica.utils.image import HemiSphere + + return str( + reformat_surfname( + HemiSphere(hemisphere), + Path(left_surface), + Path(right_surface), + ) + ) + + +def run_mris_expand_task(surface: str, output_dir=None): + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import run_mris_expand + + if output_dir: + output_dir = Path(output_dir) + return [str(p) for p in run_mris_expand(Path(surface), output_dir)] + + +def run_mri_surf2surf_task( + surface: str, + registration: str, + gtmsegfile: str, + subject_id: str, + session_id: str, + caps_dir: str, + is_longitudinal: bool, + output_dir=None, +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import run_mri_surf2surf + + if output_dir: + output_dir = Path(output_dir) + + return str( + run_mri_surf2surf( + Path(surface), + Path(registration), + Path(gtmsegfile), + subject_id, + session_id, + Path(caps_dir), + is_longitudinal, + output_dir, + ) + ) + + +def run_mri_vol2surf_task( + pet_volume: str, + surface: str, + subject_id: str, + session_id: str, + caps_dir: str, + gtmsegfile: str, + is_longitudinal: bool, + output_dir=None, +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import run_mri_vol2surf + + if output_dir: + output_dir = Path(output_dir) + + return str( + run_mri_vol2surf( + Path(pet_volume), + Path(surface), + subject_id, + session_id, + Path(caps_dir), + Path(gtmsegfile), + is_longitudinal, + output_dir, + ) + ) + + +def compute_weighted_mean_surface_task(surfaces, output_dir=None) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import compute_weighted_mean_surface + + if output_dir: + output_dir = Path(output_dir) + + return str( + compute_weighted_mean_surface( + [Path(surface) for surface in surfaces], + output_dir, + ) + ) + + +def project_onto_fsaverage_task( + projection: str, + subject_id: str, + session_id: str, + caps_dir: str, + fwhm: int, + is_longitudinal: bool, + output_dir=None, +) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import project_onto_fsaverage + + if output_dir: + output_dir = Path(output_dir) + + return str( + project_onto_fsaverage( + Path(projection), + subject_id, + session_id, + Path(caps_dir), + fwhm, + is_longitudinal, + output_dir, + ) + ) + + +def get_mid_surface_task(surfaces) -> str: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import get_mid_surface + + return str(get_mid_surface([Path(surface) for surface in surfaces])) + + +def compute_average_pet_signal_based_on_annotations_task( + pet_projections: tuple, + atlas_files: dict, + output_dir=None, +) -> tuple: + from pathlib import Path + + from clinica.pipelines.pet.surface.utils import ( + compute_average_pet_signal_based_on_annotations, + ) + from clinica.utils.stream import log_and_raise + + if output_dir: + output_dir = Path(output_dir) + if len(atlas_files) != 2: + msg = ( + "The compute_average_pet_signal_based_on_annotations task requires two atlases " + "for the argument 'atlas_files', one for desikan, one for destrieux. " + f"The following {len(atlas_files)} were received:\n" + + "\n".join([str(_) for _ in atlas_files]) + ) + log_and_raise(msg, ValueError) + + tsv_files = compute_average_pet_signal_based_on_annotations( + (Path(pet_projections[0]), Path(pet_projections[1])), + atlas_files, + output_dir, + ) + if len(tsv_files) != 2: + msg = ( + "The compute_average_pet_signal_based_on_annotations task expects to return a tuple of 2 filenames. " + f"Instead, {len(tsv_files)} files were computed:\n" + + "\n".join([str(_) for _ in tsv_files]) + ) + log_and_raise(msg, ValueError) + return str(tsv_files[0]), str(tsv_files[1]) diff --git a/clinica/pipelines/pet/surface/utils.py b/clinica/pipelines/pet/surface/utils.py new file mode 100644 index 000000000..83eaacfb4 --- /dev/null +++ b/clinica/pipelines/pet/surface/utils.py @@ -0,0 +1,1243 @@ +import os.path +from os import PathLike +from pathlib import Path +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import nibabel as nib +import numpy as np +import pandas as pd + +from clinica.pipelines.utils import FreeSurferAnnotationImage +from clinica.utils.image import HemiSphere +from clinica.utils.pet import SUVRReferenceRegion, Tracer + +__all__ = [ + "get_output_dir", + "remove_nan_from_image", + "perform_gtmseg", + "make_label_conversion", + "run_apply_inverse_deformation_field", + "run_apply_inverse_deformation_field_spm_standalone", + "normalize_suvr", + "reformat_surfname", + "run_mris_expand", + "run_mri_surf2surf", + "run_mri_vol2surf", + "compute_weighted_mean_surface", + "project_onto_fsaverage", + "get_mid_surface", + "compute_average_pet_signal_based_on_annotations", + "get_regexp_substitutions", +] + + +def get_output_dir( + is_longitudinal: bool, + caps_dir: Union[str, PathLike], + subject_id: str, + session_id: str, +) -> Path: + caps_dir = Path(caps_dir) + root = caps_dir / "subjects" / subject_id / session_id + if is_longitudinal: + input_folder = root / "t1" + return ( + root + / "pet" + / _get_longitudinal_folder_name(input_folder) + / "surface_longitudinal" + ) + return root / "pet" / "surface" + + +def _get_longitudinal_folder_name(input_folder: Path) -> str: + from clinica.utils.exceptions import ClinicaCAPSError + + longitudinal_folders = [ + f.name for f in input_folder.iterdir() if f.name.startswith("long-") + ] + if len(longitudinal_folders) > 1: + raise ClinicaCAPSError( + f"[Error] Folder {input_folder} contains {len(longitudinal_folders)} " + "folders labeled long-*. Only 1 can exist" + ) + if len(longitudinal_folders) == 0: + raise ClinicaCAPSError( + f"[Error] Folder {input_folder} does not contains a folder labeled long-*. " + "Have you run t1-freesurfer-longitudinal?" + ) + return longitudinal_folders[0] + + +def remove_nan_from_image(image_path: PathLike) -> Path: + """Remove NaN values from the provided nifti image. + This is needed after a registration performed by 'spmregister' : instead + of filling space with 0, nan are used to extend the PET space. + We propose to replace them with 0s. + Parameters + ---------- + image_path : PathLike + The path to the Nifti volume where NaNs need to be replaced by zeros. + Returns + ------- + output_image_path : Path + The path to the volume in Nifti that does not contain any NaNs. + """ + import nibabel as nib + import numpy as np + + from clinica.utils.filemanip import get_filename_no_ext + + image = nib.load(image_path) + data = np.nan_to_num(image.get_fdata(dtype="float32")) + output_image = nib.Nifti1Image(data, image.affine, header=image.header) + output_image_path = Path.cwd() / f"no_nan_{get_filename_no_ext(image_path)}.nii.gz" + nib.save(output_image, output_image_path) + + return output_image_path + + +def perform_gtmseg( + caps_dir: Path, subject_id: str, session_id: str, is_longitudinal: bool +) -> Path: + """Perform Freesurfer gtmseg. + 'gtmseg' is a freesurfer command used to perform a segmentation + used in some partial volume correction methods. + Parameters + ---------- + caps_dir : Path + CAPS directory. + subject_id : str + The subject ID. Example: 'sub-ADNI002S4213'. + session_id : str + The session ID. Example: 'ses-M012'. + is_longitudinal : bool + If longitudinal processing, subjects_dir must be put elsewhere + Returns + ------- + Path : + Path to the segmentation volume : a volume where each voxel + has a label (ranging [0 2035] ), see Freesurfer lookup table to see the + labels with their corresponding names. + Warnings + -------- + This method changes the environment variable $SUBJECTS_DIR (but put + the original one back after execution). This has not been intensely + tested whether it can lead to some problems : (for instance if 2 + subjects are running in parallel) + """ + import os + + from clinica.utils.filemanip import copy_file + + # Old subject_dir is saved for later + subjects_dir_backup = _expand_environment_variable_into_path("$SUBJECTS_DIR") + root_env, freesurfer_id = _get_new_subjects_dir( + is_longitudinal, caps_dir, subject_id, session_id + ) + # Set the new subject dir for the function to work properly + os.environ["SUBJECTS_DIR"] = str(root_env) + + freesurfer_mri_folder = ( + _expand_environment_variable_into_path("$SUBJECTS_DIR") / freesurfer_id / "mri" + ) + gtmseg_filename = freesurfer_mri_folder / "gtmseg.mgz" + if not gtmseg_filename.exists(): + _run_gtmseg(freesurfer_id) + # We specify the out file to be in the current directory of execution + # (easy for us to look at it afterward in the working directory). + # We copy then the file. + copy_file(gtmseg_filename, Path.cwd() / "gtmseg.mgz") + + # Remove bunch of files created during segmentation in caps dir and not needed + for filename in ("gtmseg.ctab", "gtmseg.lta"): + if (freesurfer_mri_folder / filename).exists(): + (freesurfer_mri_folder / filename).unlink() + + # Set back the SUBJECT_DIR environment variable of the user + os.environ["SUBJECTS_DIR"] = str(subjects_dir_backup) + + return gtmseg_filename + + +def _expand_environment_variable_into_path(variable_name: str) -> Path: + return Path(os.path.expandvars(variable_name)) + + +def _get_new_subjects_dir( + is_longitudinal: bool, caps_dir: Path, subject_id: str, session_id: str +) -> Tuple[Path, str]: + """Extract SUBJECT_DIR. + Extract path to FreeSurfer segmentation in CAPS folder and FreeSurfer ID + (e.g. sub-CLNC01_ses-M000.long.sub-CLNC01_long-M000M018 or sub-CLNC01_ses-M000). + """ + root = caps_dir / "subjects" / subject_id / session_id / "t1" + if is_longitudinal: + longitudinal_folder_name = _get_longitudinal_folder_name(root) + return ( + root / longitudinal_folder_name / "freesurfer_longitudinal", + f"{subject_id}_{session_id}.long.{subject_id}_{longitudinal_folder_name}", + ) + return root / "freesurfer_cross_sectional", f"{subject_id}_{session_id}" + + +def _run_gtmseg(freesurfer_id: str): + """Run the gtmseg command with provided freesurfer ID. + This function creates a standalone node based on Command Line Interface. + We simply put the command line we would run on a console. + """ + import nipype.pipeline.engine as pe + from nipype.interfaces.base import CommandLine + + segmentation = pe.Node( + interface=CommandLine( + f"gtmseg --s {freesurfer_id} --no-seg-stats --xcerseg", + terminal_output="stream", + ), + name="gtmseg", + ) + segmentation.run() + + +def make_label_conversion(gtmseg_file: Path, csv_file: Path) -> List[Path]: + """Method used on the segmentation from gtmsegmentation. + + The purpose is to reduce the number of label. + The gathering of labels is specified in a separate file. + + Parameters + ---------- + gtmseg_file : Path + The path to the Nifti volume containing the gtmseg segmentation. + + csv_file : Path + The path to .csv file that contains 3 columns : REGION SOURCE DST. + Separator is , (coma). + + Returns + ------- + list of Path : + List of path to the converted volumes according to the .csv file. + Each volume is a mask representing an area. + """ + label_image = nib.load(gtmseg_file) + label_image.header.set_data_dtype("int8") + label_data = label_image.get_fdata(dtype="float32") + initial_labels = np.unique(label_data).astype("int16") + control_volume = np.zeros(label_data.shape) + src_val, dst_val, reg = _get_source_destination_region(csv_file) + _verify_all_initial_labels_have_matching_transformation(initial_labels, src_val) + + # Instantiation of final volume, with same dtype as original volume + new_label_data = np.zeros(label_data.shape, dtype=label_data.dtype) + # Computing the transformation + for i, src in enumerate(src_val): + new_label_data[label_data == src] = dst_val[i] + # Get unique list of new label + new_labels = np.unique(new_label_data) + new_labels = new_labels.astype("int") + list_of_regions = [] + + for label_value in new_labels: + region_volume = np.zeros(label_data.shape, dtype="uint8") + region_volume[new_label_data == label_value] = 1 + region_image = nib.Nifti1Image( + region_volume, label_image.affine, header=label_image.header + ) + output_path = Path(f"{label_value}.nii.gz").resolve() + nib.save(region_image, output_path) + list_of_regions.append(output_path) + control_volume = control_volume + region_volume + _check_sum(control_volume) + + return list_of_regions + + +def _read_csv(csv_file: Path) -> pd.DataFrame: + expected_columns = ["REGION", "SOURCE", "DST"] + if not csv_file.is_file(): + raise IOError(f"The provided CSV file {csv_file} does not exist.") + df = pd.read_csv(csv_file, sep=",") + if list(df.columns.values) != expected_columns: + raise Exception( + f"CSV file {csv_file} is not in the correct format. " + f"Columns should be: {expected_columns}." + ) + return df + + +def _verify_all_initial_labels_have_matching_transformation( + initial_labels: np.ndarray, src_val: np.ndarray +): + """Check that each label of original volume (initial_labels) + has a matching transformation in the csv file. + """ + for label in initial_labels: + index = np.argwhere(src_val == label) + # Size 0 means no occurrence found + if index.size == 0: + raise ValueError( + f"Could not find label {label} on conversion table. " + "Add it manually in CSV file to correct error." + ) + + +def _get_source_destination_region( + csv_file: Path, +) -> Tuple[np.ndarray, np.ndarray, List[str]]: + df = _read_csv(csv_file) + + return ( + np.asanyarray(list(df.SOURCE)).astype("int"), + np.asarray(list(df.DST)).astype("int"), + list(df.REGION), + ) + + +def _check_sum(control_image_data: np.ndarray): + """The sum of a voxel location across the fourth dimension should be 1.""" + sum_voxel_mean = float(sum(sum(sum(control_image_data)))) / control_image_data.size + if not _are_almost_equal(1.0, sum_voxel_mean): + raise ValueError( + "Problem during parcellation: the mean sum of a voxel across " + f"4th dimension is {sum_voxel_mean} instead of 1.0" + ) + + +def _are_almost_equal(a: float, b: float, rel_tol=1e-9, abs_tol=0.0) -> bool: + """Measure equality between to floating or double numbers, using 2 thresholds : a + relative tolerance, and an absolute tolerance + """ + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def run_apply_inverse_deformation_field( + target_image: Path, + deformation_field: Path, + image: Path, + matscript_folder: Path, +) -> Path: + import sys + + from nipype.interfaces.matlab import MatlabCommand, get_matlab_command + + prefix = "subject_space_" + MatlabCommand.set_default_matlab_cmd(get_matlab_command()) + matlab = MatlabCommand() + if sys.platform.startswith("linux"): + matlab.inputs.args = "-nosoftwareopengl" + matlab.inputs.paths = str(matscript_folder) + matlab.inputs.script = """ + applyInverseDeformationField('%s', '%s', '%s', './', '%s') + """ % ( + target_image, + deformation_field, + image, + prefix, + ) + matlab.inputs.single_comp_thread = False + matlab_log_file = Path.cwd() / "matlab_output.log" + matlab.inputs.logfile = str(matlab_log_file) + matlab.run() + output_file = Path.cwd() / f"{prefix}{image.name}" + if not output_file.exists(): + raise IOError( + f"Something went wrong, please check {matlab_log_file.resolve()} for more information." + ) + return output_file + + +def run_apply_inverse_deformation_field_spm_standalone( + target_image: Path, + deformation_field: Path, + image: Path, + matscript_folder: Path, +) -> Path: + """Perform the same job as run_apply_inverse_deformation_field but with SPM standalone. + + We directly create a batch file that SPM standalone can run. + This function does not check whether SPM standalone must be used. + Previous check when building the pipeline ensures that all the + env vars exists ($SPMSTANDALONE_HOME and $MCR_HOME). + """ + import platform + import subprocess + + prefix = "subject_space_" + script_location = Path.cwd() / "m_script.m" + with open(script_location, "w+") as script_file: + script_file.write( + _build_spm_batch_command(target_image, deformation_field, image, prefix) + ) + if platform.system() == "Darwin": + cmdline = f"cd $SPMSTANDALONE_HOME && ./run_spm12.sh $MCR_HOME batch {script_location}" + elif platform.system() == "Linux": + cmdline = f"$SPMSTANDALONE_HOME/run_spm12.sh $MCR_HOME batch {script_location}" + else: + raise SystemError("Only support Mac OS and Linux") + subprocess_run = subprocess.run( + cmdline, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if subprocess_run.returncode != 0: + raise ValueError( + "runApplyInverseDeformationField_SPM_standalone failed, returned non-zero code" + ) + output_file = Path.cwd() / f"{prefix}{image.name}" + if output_file.exists(): + raise IOError( + "Something went wrong while trying to run runApplyInverseDeformationField_SPM_standalone. " + "Output file not generated. Command launched :\n\t {cmdline}\n. We strongly recommend that " + "you use the supported version of Matlab MCR recommended by the creators of SPM." + ) + return output_file + + +def _build_spm_batch_command( + target_image: Path, deformation_field: Path, image: Path, prefix: str +) -> str: + """Write SPM batch command directly in a script that is readable by SPM standalone.""" + import os + from os.path import abspath + + command = ( + "jobs{1}.spm.util.defs.comp{1}.inv.comp{1}.def = {'" + + str(deformation_field) + + "'};\n" + ) + command += ( + "jobs{1}.spm.util.defs.comp{1}.inv.space = {'" + str(target_image) + "'};\n" + ) + command += "jobs{1}.spm.util.defs.out{1}.pull.fnames = {'" + str(image) + "'};\n" + command += ( + "jobs{1}.spm.util.defs.out{1}.pull.savedir.saveusr = {'" + + abspath(os.getcwd()) + + "'};\n" + ) + command += "jobs{1}.spm.util.defs.out{1}.pull.interp = 4;\n" + command += "jobs{1}.spm.util.defs.out{1}.pull.mask = 1;\n" + command += "jobs{1}.spm.util.defs.out{1}.pull.fwhm = [0 0 0];\n" + command += "jobs{1}.spm.util.defs.out{1}.pull.prefix = '" + prefix + "';\n" + + return command + + +def normalize_suvr( + pet_image: Path, mask: Path, output_dir: Optional[Path] = None +) -> Path: + """Get SUVR from pet image. + + Based on the segmentation performed by gtmsegmentation. + The Standard Uptake Value ratio is computed by dividing the + whole PET volume by the mean value observed in the pons. + + Parameters + ---------- + pet_image : Path + The path to the Nifti volume containing PET scan, realigned on up-sampled T1. + + mask : Path + The path to the mask of the pons (18FFDG) or pons+cerebellum (18FAV45) already eroded. + + output_dir : Path, optional + The directory in which to write the SUVR image. + If not provided, it will be written in the current directory. + + Returns + ------- + Path : + The path to the SUVR normalized volume in the current directory. + + Raises + ------ + ClinicaImageError : + If the provided eroded mask contains only zero values. + """ + import nibabel as nib + + from clinica.utils.exceptions import ClinicaImageError + + eroded_mask_nifti = nib.load(mask) + eroded_mask = eroded_mask_nifti.get_fdata(dtype="float32") + eroded_mask = eroded_mask > 0 + if (mask_size := np.sum(eroded_mask)) == 0: + raise ClinicaImageError( + f"The eroded mask located at {mask} contains only zero values. " + "A problem likely occurred when moving the eroded mask from MNI to gtmsegspace." + ) + # Load PET data (they must be in gtmsegspace, or same space as label file) + pet_image_nifti = nib.load(pet_image) + pet_data = pet_image_nifti.get_fdata(dtype="float32") + # Mask unwanted values to determine mean uptake value + pons_pet_activity = eroded_mask * pet_data + mean_pons_pet_activity = np.sum(pons_pet_activity) / mask_size + # Then normalize PET data by this mean activity + suvr_image_nifti = nib.Nifti1Image( + pet_data / mean_pons_pet_activity, + pet_image_nifti.affine, + header=pet_image_nifti.header, + ) + suvr_image = (output_dir or Path.cwd()) / f"suvr_{pet_image.name}" + nib.save(suvr_image_nifti, suvr_image) + + return suvr_image + + +def reformat_surfname( + hemisphere: HemiSphere, left_surface: Path, right_surface: Path +) -> Path: + if hemisphere == HemiSphere.LEFT: + return left_surface + if hemisphere == HemiSphere.RIGHT: + return right_surface + + +def run_mris_expand(surface: Path, output_dir: Optional[Path] = None) -> List[Path]: + """Make a subprocess call to the freesurfer mris_expand function. + + Expands the white input surface toward the pial, generating 7 surfaces at + 35%, 40%, 45%, 50%, 55%, 60%, 65% of thickness. + + Parameters + ---------- + surface : Path + The path to the input white surface. + Must be named 'lh.white' or 'rh.white'. + The folder containing the surface file must also have + '?h.pial', '?.sphere', '?h.thickness' (freesurfer 'surf' folder). + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + List of Path : + List of path to the generated surfaces. + + Notes + ----- + There is a bug in mris_expand : you are not allowed to write the surfaces + elsewhere than in the surf folder. + -N is a hidden parameter (not documented) that allows the user to specify + the number of surface generated between source and final target surface. + Here target is 65% of thickness, with 13 surfaces. + Then we only keep the surfaces we are interested in. + """ + from clinica.utils.filemanip import move_file + from clinica.utils.stream import cprint + + output_dir = output_dir or Path.cwd() + if not output_dir.exists(): + output_dir.mkdir(parents=True, exist_ok=True) + out_file = surface.parent / f"{surface.name}_exp-" + _run_mri_expand_as_subprocess(surface, out_file) + # Move surface file to the output directory + source_files = [ + out_file.parent / f"{out_file.name}0{n}" + for n in ("07", "08", "09", "10", "11", "12", "13") + ] + destination_files = [output_dir / f.name for f in source_files] + for src, dst in zip(source_files, destination_files): + move_file(src, dst) + # Remove useless surfaces (0%, 5%, 10%, 15%, 20%, 25% and 30% of thickness) + for file_to_remove in (out_file.parent / f"{out_file.name}00{n}" for n in range(7)): + file_to_remove.unlink(missing_ok=False) + cprint(f"File {file_to_remove} has been removed.", lvl="debug") + + return destination_files + + +def _run_mri_expand_as_subprocess(surface: Path, out_file: Path): + _run_command_as_subprocess( + "mris_expand", _build_mri_expand_command(surface, out_file) + ) + + +def _run_command_as_subprocess(command_name: str, command: str): + import subprocess + + from clinica.utils.exceptions import ClinicaSubprocessError + from clinica.utils.stream import cprint + + cprint( + f"Running {command_name} with the following command:\n\n{command}", lvl="debug" + ) + subprocess_ = subprocess.run( + command, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if (code := subprocess_.returncode) != 0: + error_msg = ( + f"The subprocess '{command_name}' failed with a non-zero return code of {code}. " + f"The following command was run:\n\n{command}" + ) + cprint(error_msg, lvl="error") + raise ClinicaSubprocessError(error_msg) + + +def _build_mri_expand_command(surface: Path, out_file: Path) -> str: + import platform + + user_system = platform.system().lower() + command = f"mris_expand -thickness -N 13 {surface} 0.65 {out_file}" + # If system is MacOS, this export command must be run just + # before the mri_vol2surf command to bypass MacOs security. + if user_system.startswith("darwin"): + command = _make_freesurfer_command_mac_compatible(command) + return command + + +def _make_freesurfer_command_mac_compatible(command: str) -> str: + return "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && " + command + + +def run_mri_surf2surf( + surface: Path, + registration: Path, + gtmsegfile: Path, + subject_id: str, + session_id: str, + caps_dir: Path, + is_longitudinal: bool, + output_dir: Optional[Path] = None, +) -> Path: + """Make a subprocess call to the freesurfer surf2surf function. + + Here the aim is to convert a input surface (which is the native space of the subject), + into the same surface but in the gtmseg space (space of the volume generated by + the gtmsegmentation). + + Parameters + ---------- + surface : Path + The path to the surface file that needs to be converted. + + registration : Path + The path to a registration file that represents the transformation + needed to go from the native space to the gtmsegspace + (see https://surfer.nmr.mgh.harvard.edu/fswiki/FsAnat-to-NativeAnat + for more details). + + gtmsegfile : Path + The path to the gtm segmentation file. + + subject_id : str + The subject_id (something like sub-ADNI002S4213). + + session_id : str + The session id ( something like : ses-M012). + + caps_dir : Path + The path to the CAPS directory. + + is_longitudinal : bool + Whether the function should handle longitudinal files or not. + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + Path : + The path to the converted surface in current directory. + """ + import os + + from clinica.utils.filemanip import copy_file + + # set subjects_dir env. variable for mri_surf2surf to work properly + subjects_directory_backup = os.path.expandvars("$SUBJECTS_DIR") + subjects_directory, freesurfer_id = ( + _get_new_subjects_directory_longitudinal + if is_longitudinal + else _get_new_subjects_directory + )(caps_dir, subject_id, session_id) + os.environ["SUBJECTS_DIR"] = str(subjects_directory) + copy_file(surface, subjects_directory / freesurfer_id / "surf") + output_file = (output_dir or Path.cwd()) / f"{surface.name}_gtmsegspace" + _run_mri_surf2surf_as_subprocess( + surface, registration, gtmsegfile, freesurfer_id, output_file + ) + (subjects_directory / freesurfer_id / "surf" / surface.name).unlink( + missing_ok=False + ) + # put back original subjects_dir env + os.environ["SUBJECTS_DIR"] = subjects_directory_backup + + return output_file + + +def _get_new_subjects_directory_longitudinal( + caps_dir: Path, + subject_id: str, + session_id: str, +) -> Tuple[Path, str]: + """Extract SUBJECT_DIR. + + Extract path to FreeSurfer segmentation in CAPS folder and FreeSurfer ID + (e.g. sub-CLNC01_ses-M000.long.sub-CLNC01_long-M000M018 or sub-CLNC01_ses-M000). + """ + from clinica.utils.exceptions import ClinicaCAPSError + from clinica.utils.stream import cprint + + root = caps_dir / "subjects" / subject_id / session_id / "t1" + long_folds = [f.name for f in root.iterdir() if f.name.startswith("long-")] + if len(long_folds) > 1: + error_msg = f"Folder {root} contains {len(long_folds)} folders labeled long-*. Only 1 can exist." + cprint(error_msg, lvl="error") + raise ClinicaCAPSError(error_msg) + if len(long_folds) == 0: + error_msg = ( + f"Folder {root} does not contains a folder labeled long-*. " + "Have you run t1-freesurfer-longitudinal?" + ) + cprint(error_msg, lvl="error") + raise ClinicaCAPSError(error_msg) + root_env = root / long_folds[0] / "freesurfer_longitudinal" + sub_id_cmd = f"{subject_id}_{session_id}.long.{subject_id}_{long_folds[0]}" + return root_env, sub_id_cmd + + +def _get_new_subjects_directory( + caps_dir: Path, subject_id: str, session_id: str +) -> Tuple[Path, str]: + return ( + caps_dir + / "subjects" + / subject_id + / session_id + / "t1" + / "freesurfer_cross_sectional", + f"{subject_id}_{session_id}", + ) + + +def _build_mri_surf2surf_command( + surface: Path, + registration: Path, + gtmsegfile: Path, + freesurfer_id: str, + output_file: Path, +) -> str: + import platform + + from clinica.utils.image import HemiSphere + + user_system = platform.system().lower() + hemisphere = HemiSphere(surface.name[0:2]) + surface_name = surface.name[3:] + command = ( + f"mri_surf2surf --reg {registration} {gtmsegfile} --sval-xyz {surface_name} " + f"--hemi {hemisphere.value} --tval-xyz {gtmsegfile} --tval {output_file} --s {freesurfer_id}" + ) + # If system is MacOS, this export command must be run just + # before the mri_vol2surf command to bypass MacOs security. + if user_system.startswith("darwin"): + command = _make_freesurfer_command_mac_compatible(command) + return command + + +def _run_mri_surf2surf_as_subprocess( + surface: Path, + registration: Path, + gtmsegfile: Path, + freesurfer_id: str, + output_file: Path, +): + _run_command_as_subprocess( + "mri_surf2surf", + _build_mri_surf2surf_command( + surface, registration, gtmsegfile, freesurfer_id, output_file + ), + ) + + +def run_mri_vol2surf( + pet_volume: Path, + surface: Path, + subject_id: str, + session_id: str, + caps_dir: Path, + gtmsegfile: Path, + is_longitudinal: bool, + output_dir: Optional[Path] = None, +) -> Path: + """Make a subprocess call to the freesurfer vol2surf function. + + Projects the volume into the surface : the value at each vertex is + given by the value of the voxel it intersects + + Parameters + ---------- + pet_volume : Path + The path to PET volume (in gtmseg space) that needs to be mapped into surface. + + surface : Path + The path to surface file. + + subject_id : str + The subject_id (something like sub-ADNI002S4213). + + session_id : str + The session id ( something like : ses-M012). + + caps_dir : Path + The path to the CAPS directory. + + gtmsegfile : Path + The path to the gtm segmentation file (provides information on space, labels are not used). + + is_longitudinal : bool + Whether the function should handle longitudinal files or not. + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + Path : + The path to the data projected onto the surface. + """ + import os + + from clinica.utils.filemanip import copy_file + from clinica.utils.image import HemiSphere + + subjects_directory_backup = os.path.expandvars("$SUBJECTS_DIR") + subjects_directory, freesurfer_id = ( + _get_new_subjects_directory_longitudinal + if is_longitudinal + else _get_new_subjects_directory + )(caps_dir, subject_id, session_id) + os.environ["SUBJECTS_DIR"] = str(subjects_directory) + copy_file(surface, subjects_directory / freesurfer_id / "surf") + gtmsegfile_copy = subjects_directory / freesurfer_id / "mri" / "gtmseg.mgz" + if not gtmsegfile_copy.exists(): + copy_file(gtmsegfile, gtmsegfile_copy) + hemisphere = HemiSphere(surface.name[0:2]) + output_file = ( + output_dir or Path.cwd() + ) / f"{hemisphere.value}.projection_{surface.name}.mgh" + _run_mri_vol2surf_as_subprocess(pet_volume, surface, freesurfer_id, output_file) + (subjects_directory / freesurfer_id / "surf" / surface.name).unlink( + missing_ok=False + ) + # TODO careful here... + # Removing gtmseg.mgz may lead to problems as other vol2surf are using it + gtmsegfile_copy.unlink(missing_ok=False) + # put back original subjects_dir env + os.environ["SUBJECTS_DIR"] = subjects_directory_backup + + return output_file + + +def _run_mri_vol2surf_as_subprocess( + pet_volume: Path, + surface: Path, + freesurfer_id: str, + output_file: Path, +): + _run_command_as_subprocess( + "mri_vol2surf", + _build_mri_vol2surf_command(pet_volume, surface, freesurfer_id, output_file), + ) + + +def _build_mri_vol2surf_command( + pet_volume: Path, + surface: Path, + freesurfer_id: str, + output_file: Path, +) -> str: + import platform + + from clinica.utils.image import HemiSphere + + user_system = platform.system().lower() + hemisphere = HemiSphere(surface.name[0:2]) + surface_name = surface.name[3:] + command = ( + f"mri_vol2surf --mov {pet_volume} --o {output_file} --surf {surface_name} --hemi {hemisphere.value} " + f"--regheader {freesurfer_id} --ref gtmseg.mgz --interp nearest" + ) + if user_system.startswith("darwin"): + command = _make_freesurfer_command_mac_compatible(command) + + return command + + +def compute_weighted_mean_surface( + surfaces: Sequence[Path], output_dir: Optional[Path] = None +) -> Path: + """Compute a weighted average at each node of the surface. + + The weight are defined by a normal distribution (centered on the mid surface). + + Parameters + ---------- + surfaces : Sequence of Path + The paths to the data projected on the 7 surfaces (35 to 65 % of thickness) at each nodes. + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + Path : + The path to the data averaged. + """ + import nibabel as nib + + _assert_seven_surfaces(surfaces) + hemisphere = HemiSphere(surfaces[0].name[0:2]) + out_surface = ( + output_dir or Path.cwd() + ) / f"{hemisphere.value}.averaged_projection_on_cortical_surface.mgh" + nib.save(_build_weighted_mean_surface_image(surfaces), out_surface) + + return out_surface + + +def _assert_seven_surfaces(surfaces: Sequence[Path]): + if (n_surfaces := len(surfaces)) != 7: + raise ValueError( + "There should be 7 surfaces at this point of the pipeline. " + f"However 'compute_weighted_mean_surface' received {n_surfaces} surfaces. " + "Something probably went wrong in prior steps of the pipeline." + ) + + +def _build_weighted_mean_surface_image(surfaces: Sequence[Path]) -> nib.MGHImage: + import nibabel as nib + import numpy as np + + # sample only to get dimension + sample = nib.load(surfaces[0]) + data_normalized = np.zeros(sample.header.get_data_shape()) + for surface, coefficient in zip( + surfaces, _get_coefficient_for_normal_repartition() + ): + current_surf = nib.load(surface) + data_normalized += current_surf.get_fdata(dtype="float32") * coefficient + # data_normalized = np.atleast_3d(data_normalized) + return nib.MGHImage(data_normalized, affine=sample.affine, header=sample.header) + + +def _get_coefficient_for_normal_repartition() -> ( + Tuple[float, float, float, float, float, float, float] +): + """TODO: Find out where do these numbers come from..""" + return 0.1034, 0.1399, 0.1677, 0.1782, 0.1677, 0.1399, 0.1034 + + +def project_onto_fsaverage( + projection: Path, + subject_id: str, + session_id: str, + caps_dir: Path, + fwhm: int, + is_longitudinal: bool, + output_dir: Optional[Path] = None, +) -> Path: + """Project data onto an averaged subject called fsaverage. + + This subject is available in the $SUBJECTS_DIR folder. + + Notes + ----- + fsaverage and the subject must be in the subject_dir, so a copy of fsaverage is performed if necessary. + + Parameters + ---------- + projection : Path + The path to the projected data onto native subject surface. + + subject_id : str + The subject id (something like sub-ADNI002S4213). + + session_id : str + The session id ( something like : ses-M012). + + caps_dir : Path + The path to the CAPS directory. + + fwhm : int + FWHM of the Gaussian filter used for smoothing on fsaverage surface (not volume !) + + is_longitudinal : bool + Longitudinal pipeline or not. + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + Path : + The path to the data averaged. + """ + import os + import shutil + + subjects_directory_backup = Path(os.path.expandvars("$SUBJECTS_DIR")) + subjects_directory, freesurfer_id = ( + _get_new_subjects_directory_longitudinal + if is_longitudinal + else _get_new_subjects_directory + )(caps_dir, subject_id, session_id) + os.environ["SUBJECTS_DIR"] = str(subjects_directory) + # copy fsaverage folder next to : subject_id + '_' + session_id + # for the mris_preproc command to properly find src and target + fsaverage_has_been_copied = False + if not (subjects_directory / "fsaverage").exists(): + shutil.copytree( + str(subjects_directory_backup / "fsaverage"), + str(subjects_directory / "fsaverage"), + ) + fsaverage_has_been_copied = True + # also copy the mgh file in the surf folder (needed by MRISPreproc + projection_in_surf_folder = ( + subjects_directory / freesurfer_id / "surf" / projection.name + ) + if not projection_in_surf_folder.exists(): + shutil.copy(str(projection), str(projection_in_surf_folder)) + out_fsaverage = ( + output_dir or Path.cwd() + ) / f"fsaverage_fwhm-{fwhm}_{projection.name}" + _run_mris_preproc_as_standalone_nipype_node( + projection, freesurfer_id, fwhm, out_fsaverage + ) + # remove projection file from surf folder + projection_in_surf_folder.unlink(missing_ok=False) + # remove fsaverage if it has been copied + if fsaverage_has_been_copied: + shutil.rmtree(subjects_directory / "fsaverage") + # put back original subjects_dir env + os.environ["SUBJECTS_DIR"] = str(subjects_directory_backup) + return out_fsaverage + + +def _run_mris_preproc_as_standalone_nipype_node( + projection: Path, + freesurfer_id: str, + fwhm: float, + output_file: Path, +): + from nipype.interfaces.freesurfer import MRISPreproc + + projection_node = MRISPreproc() + projection_node.inputs.target = "fsaverage" + projection_node.inputs.subjects = [freesurfer_id] + projection_node.inputs.fwhm = fwhm + projection_node.inputs.hemi = HemiSphere(projection.name[0:2]).value + projection_node.inputs.surf_measure = projection.name[3:] + projection_node.inputs.out_file = str(output_file) + projection_node.run() + + +def get_mid_surface(surfaces: Sequence[Path]) -> Path: + """Returns the mid-surface when dealing with the 7 different surfaces. + + Parameters + ---------- + surfaces : Sequence of Path + The 7 different surfaces generated by mris_expand. + + Returns + ------- + Path : + The path to the mid-surface. + """ + _assert_seven_surfaces(surfaces) + return surfaces[3] + + +def compute_average_pet_signal_based_on_annotations( + pet_projections: Tuple[Path, Path], + atlas_files: Dict[str, FreeSurferAnnotationImage], + output_dir: Optional[Path] = None, +) -> List[Path]: + """Computes the average of PET signal based on annot files from Freesurfer. + + Those files describe the brain according to known atlases. + + Parameters + ---------- + pet_projections : tuple of two Path + The paths to the PET projection (must be a MGH file) [left_hemisphere, right_hemisphere]. + + atlas_files : dict[str, FreeSurferAnnotationImage] + Dictionary containing path to lh and rh annotation files for any number of atlases. + + output_dir : Path, optional + The path to the output folder in which to write the output files. + If not provided, the files will be written in the current directory. + + Returns + ------- + Path : + The path to the tsv containing average PET values. + + Raises + ------ + ValueError : + If not exactly two files were provided through the argument 'pet_projections'. + """ + import nibabel as nib + import numpy as np + import pandas as pd + + from clinica.pipelines.utils import FreeSurferAnnotation + from clinica.utils.stream import log_and_raise + + if len(pet_projections) != 2: + msg = ( + "The compute_average_pet_signal_based_on_annotations function requires two files " + "for the argument 'pet_projections', one for the left hemisphere, one for the right. " + f"The following {len(pet_projections)} were received:\n" + + "\n".join([str(_) for _ in pet_projections]) + ) + log_and_raise(msg, ValueError) + pet_mgh = { + HemiSphere.LEFT: np.squeeze( + nib.load(pet_projections[0]).get_fdata(dtype="float32") + ), + HemiSphere.RIGHT: np.squeeze( + nib.load(pet_projections[1]).get_fdata(dtype="float32") + ), + } + filename_tsv = [] + for atlas_name, annotation_image in atlas_files.items(): + annotation = FreeSurferAnnotation.from_annotation_image(annotation_image) + annotation.replace_minus_one_annotation_with_zero() + average_region = [] + for region_id, region_name in enumerate(annotation.region_names): + for hemisphere in (HemiSphere.LEFT, HemiSphere.RIGHT): + mask = annotation.get_annotation(hemisphere) == region_id + mask = np.uint(mask) + masked_data = mask * pet_mgh[hemisphere] + average_region.append( + np.nan if np.sum(mask) == 0 else np.sum(masked_data) / np.sum(mask) + ) + final_tsv = pd.DataFrame( + { + "index": range(len(average_region)), + "label_name": annotation.get_lateralized_region_names(left_first=True), + "mean_scalar": average_region, + } + ) + filename_atlas_tsv = (output_dir or Path.cwd()) / f"{atlas_name}.tsv" + filename_tsv.append(filename_atlas_tsv) + final_tsv.to_csv( + filename_atlas_tsv, + sep="\t", + index=False, + columns=["index", "label_name", "mean_scalar"], + ) + return filename_tsv + + +def get_regexp_substitutions( + pet_tracer: Tracer, + region: SUVRReferenceRegion, + is_longitudinal: bool, +) -> List[Tuple[str, str]]: + return [ + _get_mid_surface_substitutions(is_longitudinal=is_longitudinal), + _get_projection_in_native_space_substitutions( + pet_tracer, region, is_longitudinal=is_longitudinal + ), + _get_projection_in_fsaverage_substitution( + pet_tracer, region, is_longitudinal=is_longitudinal + ), + _get_tsv_file_for_atlas( + pet_tracer, region, "destrieux", is_longitudinal=is_longitudinal + ), + _get_tsv_file_for_atlas( + pet_tracer, region, "desikan", is_longitudinal=is_longitudinal + ), + ] + + +def _get_mid_surface_substitutions(is_longitudinal: bool) -> Tuple[str, str]: + if is_longitudinal: + return ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/midsurface\/.*_hemi_([a-z]+)(.*)$", + r"\1/\2_\3_\4_hemi-\5_midcorticalsurface", + ) + return ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/midsurface\/.*_hemi_([a-z]+)(.*)$", + r"\1/\2_\3_hemi-\4_midcorticalsurface", + ) + + +def _get_projection_in_native_space_substitutions( + pet_tracer: Tracer, + region: SUVRReferenceRegion, + is_longitudinal: bool, +) -> Tuple[str, str]: + if is_longitudinal: + return ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/projection_native\/.*_hemi_([a-z]+).*", + rf"\1/\2_\3_\4_trc-{pet_tracer.value}_pet_space-native_suvr-{region.value}_pvc-iy_hemi-\5_projection.mgh", + ) + return ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/projection_native\/.*_hemi_([a-z]+).*", + rf"\1/\2_\3_trc-{pet_tracer.value}_pet_space-native_suvr-{region.value}_pvc-iy_hemi-\4_projection.mgh", + ) + + +def _get_projection_in_fsaverage_substitution( + pet_tracer: Tracer, + region: SUVRReferenceRegion, + is_longitudinal: bool, +) -> Tuple[str, str]: + if is_longitudinal: + return ( + ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/" + r"projection_fsaverage\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*" + ), + ( + rf"\1/\2_\3_\4_trc-{pet_tracer.value}_pet_space-fsaverage_" + rf"suvr-{region.value}_pvc-iy_hemi-\5_fwhm-\6_projection.mgh" + ), + ) + return ( + r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/projection_fsaverage\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*", + ( + rf"\1/\2_\3_trc-{pet_tracer.value}_pet_space-fsaverage_" + rf"suvr-{region.value}_pvc-iy_hemi-\4_fwhm-\5_projection.mgh" + ), + ) + + +def _get_tsv_file_for_atlas( + pet_tracer: Tracer, + region: SUVRReferenceRegion, + atlas: str, + is_longitudinal: bool, +) -> Tuple[str, str]: + if is_longitudinal: + return ( + rf"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/{atlas}_tsv\/{atlas}.tsv", + ( + rf"\1/atlas_statistics/\2_\3_\4_trc-{pet_tracer.value}_pet_" + rf"space-{atlas}_pvc-iy_suvr-{region.value}_statistics.tsv" + ), + ) + return ( + rf"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/{atlas}_tsv\/{atlas}.tsv", + ( + rf"\1/atlas_statistics/\2_\3_trc-{pet_tracer.value}_pet_" + rf"space-{atlas}_pvc-iy_suvr-{region.value}_statistics.tsv" + ), + ) diff --git a/clinica/pipelines/pet/surface/workflows.py b/clinica/pipelines/pet/surface/workflows.py new file mode 100644 index 000000000..e61afac5d --- /dev/null +++ b/clinica/pipelines/pet/surface/workflows.py @@ -0,0 +1,512 @@ +def get_wf( + subject_id: str, + session_id: str, + pvc_psf_tsv: str, + caps_dir: str, + pet: str, + orig_nu: str, + white_surface_left: str, + white_surface_right: str, + working_directory_subjects: str, + acq_label: str, + csv_segmentation: str, + suvr_reference_region: str, + matscript_folder_inverse_deformation: str, + destrieux_left: str, + destrieux_right: str, + desikan_left: str, + desikan_right: str, + is_longitudinal: bool, + output_dir=None, +): + """get_wf create a full workflow for only one subject, and then executes it. + + Parameters + ---------- + subject_id : str + The subject ID. + + session_id : str + The session ID. + + pvc_psf_tsv : str + The path the TSV file containing information on the point spread function (PSF). + + caps_dir : str + The path to the CAPS directory. + + pet : str + The path to the PET image in the BIDS directory. + + orig_nu : str + The path to the 'orig_nu' file (must be in the CAPS directory, in 'mri'). + + white_surface_left : str + The path to the left white surface in native space of subject. + + white_surface_right : str + The path to the right white surface in native space of subject. + + working_directory_subjects : str + ??? + + acq_label : str + The PET tracer to consider. + + csv_segmentation : str + The path to the CSV for the segmentation (problems encountered while using __file__). + + suvr_reference_region : str + The label of the SUVR reference region. + + matscript_folder_inverse_deformation : str + The path to the current folder containing the matlab script + used to call the spm function for the inverse deformation. + + destrieux_left : str + The path to the destrieux parcellation for the left hemisphere. + + destrieux_right : str + The path to the destrieux parcellation for the right hemisphere. + + desikan_left : str + The path to the desikan parcellation for the left hemisphere. + + desikan_right : str + The path to the desikan parcellation for the right hemisphere. + + is_longitudinal : bool + Whether the pipeline is PETSurface or PETSurfaceLongitudinal. + + output_dir : str, optional + The path to the output directory. + """ + import os + from pathlib import Path + + import nipype.interfaces.io as nio + import nipype.interfaces.utility as niu + import nipype.pipeline.engine as pe + from nipype.interfaces.freesurfer import ApplyVolTransform, MRIConvert, Tkregister2 + from nipype.interfaces.fsl import Merge + from nipype.interfaces.petpvc import PETPVC + from nipype.interfaces.spm import Coregister, Normalize12 + + from clinica.pipelines.pet.surface.tasks import ( + compute_average_pet_signal_based_on_annotations_task, + compute_weighted_mean_surface_task, + get_mid_surface_task, + make_label_conversion_task, + normalize_suvr_task, + perform_gtmseg_task, + project_onto_fsaverage_task, + reformat_surfname_task, + remove_nan_from_image_task, + run_apply_inverse_deformation_field_spm_standalone_task, + run_apply_inverse_deformation_field_task, + run_mri_surf2surf_task, + run_mri_vol2surf_task, + run_mris_expand_task, + ) + from clinica.pipelines.pet.surface.utils import ( + get_output_dir, + get_regexp_substitutions, + ) + from clinica.pipelines.pet.utils import get_suvr_mask, read_psf_information + from clinica.pipelines.utils import FreeSurferAnnotationImage + from clinica.utils.filemanip import get_subject_id, load_volume, unzip_nii + from clinica.utils.pet import SUVRReferenceRegion, Tracer + from clinica.utils.spm import get_tpm, use_spm_standalone_if_available + from clinica.utils.ux import print_begin_image + + spm_standalone_is_available = use_spm_standalone_if_available() + acq_label = Tracer(acq_label) + suvr_reference_region = SUVRReferenceRegion(suvr_reference_region) + image_id = get_subject_id(pet) + try: + load_volume(pet) + except ValueError as e: + raise ValueError( + f"Clinica could not load volumes for {image_id.replace('_', ' | ')}. {str(e)}" + ) + print_begin_image(image_id) + + # Creation of workflow + # 1 Creation of node + unzip_pet = pe.Node( + niu.Function( + input_names=["in_file"], output_names=["out_file"], function=unzip_nii + ), + name="unzip_pet", + ) + + unzip_orig_nu = unzip_pet.clone(name="unzip_orig_nu") + + unzip_mask = unzip_pet.clone(name="unzip_mask") + unzip_mask.inputs.in_file = str(get_suvr_mask(suvr_reference_region)) + + coreg = pe.Node(Coregister(), name="coreg") + + convert_mgh = pe.Node(MRIConvert(), name="convert_mgh") + + removenan = pe.Node( + niu.Function( + input_names=["image_path"], + output_names=["vol_wo_nan"], + function=remove_nan_from_image_task, + ), + name="removenan", + ) + + gtmsegmentation = pe.Node( + niu.Function( + input_names=["caps_dir", "subject_id", "session_id", "is_longitudinal"], + output_names=["gtmseg_file"], + function=perform_gtmseg_task, + ), + name="gtmseg", + ) + gtmsegmentation.inputs.caps_dir = caps_dir + gtmsegmentation.inputs.subject_id = subject_id + gtmsegmentation.inputs.session_id = session_id + gtmsegmentation.inputs.is_longitudinal = is_longitudinal + + tkregister = pe.Node(Tkregister2(reg_header=True), name="tkreg") + + convert_gtmseg = convert_mgh.clone(name="convert_gtmseg") + + labelconversion = pe.Node( + niu.Function( + input_names=["gtmsegfile", "csv"], + output_names=["list_of_regions"], + function=make_label_conversion_task, + ), + name="conversion_of_labels", + ) + labelconversion.inputs.csv = csv_segmentation + + if not os.path.exists(labelconversion.inputs.csv): + raise FileNotFoundError( + f"CSV file : {labelconversion.inputs.csv} does not exist." + ) + + merge_volume = pe.Node( + Merge(output_type="NIFTI_GZ", dimension="t"), name="merge_volume" + ) + vol2vol = pe.Node( + ApplyVolTransform(reg_header=True, interp="trilin"), name="vol2vol" + ) + vol2vol_mask = pe.Node( + ApplyVolTransform(reg_header=True, interp="nearest"), name="vol2vol_mask" + ) + normalize12 = pe.Node( + Normalize12( + tpm=get_tpm(), + affine_regularization_type="mni", + jobtype="est", + bias_fwhm=60, + bias_regularization=0.0001, + warping_regularization=[0, 0.001, 0.5, 0.05, 0.2], + ), + name="normalize_to_MNI", + ) + apply_inverse_deformation = pe.Node( + niu.Function( + input_names=["target", "deformation_field", "img", "matscript_folder"], + output_names=["freesurfer_space_eroded_mask"], + function=( + run_apply_inverse_deformation_field_spm_standalone_task + if spm_standalone_is_available + else run_apply_inverse_deformation_field_task + ), + ), + name="applyInverseDeformation", + ) + apply_inverse_deformation.inputs.matscript_folder = ( + matscript_folder_inverse_deformation + ) + pons_normalization = pe.Node( + niu.Function( + input_names=["pet_image", "mask"], + output_names=["suvr_image"], + function=normalize_suvr_task, + ), + name="pons_normalization", + ) + # read_psf_information expects a list of subjects/sessions and returns a list of PSF + psf_info = read_psf_information( + Path(pvc_psf_tsv), + [subject_id], + [session_id], + acq_label, + )[0] + pvc = pe.Node(PETPVC(pvc="IY"), name="petpvc") + pvc.inputs.fwhm_x = psf_info[0] + pvc.inputs.fwhm_y = psf_info[1] + pvc.inputs.fwhm_z = psf_info[2] + + reformat_surface_name = pe.Node( + niu.Function( + input_names=["hemisphere", "left_surface", "right_surface"], + output_names=["out"], + function=reformat_surfname_task, + ), + name="reformat_surface_name", + ) + reformat_surface_name.inputs.left_surface = white_surface_left + reformat_surface_name.inputs.right_surface = white_surface_right + reformat_surface_name.iterables = ("hemisphere", ["lh", "rh"]) + + mris_exp = pe.Node( + niu.Function( + input_names=["surface", "output_dir"], + output_names=["out_surface"], + function=run_mris_expand_task, + ), + name="mris_expand_white", + ) + mris_exp.inputs.output_dir = output_dir + + surf_conversion = pe.MapNode( + niu.Function( + input_names=[ + "surface", + "registration", + "gtmseg_file", + "subject_id", + "session_id", + "caps_dir", + "is_longitudinal", + "output_dir", + ], + output_names=["tval"], + function=run_mri_surf2surf_task, + ), + name="surf_conversion", + iterfield=["in_surface"], + ) + surf_conversion.inputs.subject_id = subject_id + surf_conversion.inputs.session_id = session_id + surf_conversion.inputs.caps_dir = caps_dir + surf_conversion.inputs.is_longitudinal = is_longitudinal + surf_conversion.inputs.output_dir = output_dir + + vol_on_surf = pe.MapNode( + niu.Function( + input_names=[ + "pet_volume", + "surface", + "subject_id", + "session_id", + "caps_dir", + "gtmsegfile", + "is_longitudinal", + "output_dir", + ], + output_names=["output"], + function=run_mri_vol2surf_task, + ), + name="vol_on_surf", + iterfield=["surface"], + ) + vol_on_surf.inputs.subject_id = subject_id + vol_on_surf.inputs.session_id = session_id + vol_on_surf.inputs.caps_dir = caps_dir + vol_on_surf.inputs.is_longitudinal = is_longitudinal + vol_on_surf.inputs.output_dir = output_dir + + normal_average = pe.Node( + niu.Function( + input_names=["surfaces", "output_dir"], + output_names=["out_surface"], + function=compute_weighted_mean_surface_task, + ), + name="normal_average", + ) + normal_average.inputs.output_dir = output_dir + + project_on_fsaverage = pe.Node( + niu.Function( + input_names=[ + "projection", + "subject_id", + "caps_dir", + "session_id", + "fwhm", + "is_longitudinal", + "output_dir", + ], + output_names=["out_fsaverage"], + function=project_onto_fsaverage_task, + ), + name="project_on_fsaverage", + ) + project_on_fsaverage.iterables = ("fwhm", [0, 5, 10, 15, 20, 25]) + project_on_fsaverage.inputs.subject_id = subject_id + project_on_fsaverage.inputs.session_id = session_id + project_on_fsaverage.inputs.caps_dir = caps_dir + project_on_fsaverage.inputs.is_longitudinal = is_longitudinal + project_on_fsaverage.inputs.output_dir = output_dir + + extract_mid_surface = pe.Node( + niu.Function( + input_names=["surfaces"], + output_names=["mid_surface"], + function=get_mid_surface_task, + ), + name="extract_mid_surface", + ) + + gather_pet_projection = pe.JoinNode( + niu.IdentityInterface(fields=["pet_projection_lh_rh"]), + name="gather_pet_projection_hemisphere", + joinsource="reformat_surface_name", + joinfield=["pet_projection_lh_rh"], + ) + + atlas_tsv = pe.Node( + niu.Function( + input_names=["pet_projections", "atlas_files"], + output_names=["destrieux_tsv", "desikan_tsv"], + function=compute_average_pet_signal_based_on_annotations_task, + ), + name="atlas_tsv", + ) + atlas_tsv.inputs.atlas_files = { + "destrieux": FreeSurferAnnotationImage.from_raw( + destrieux_left, destrieux_right + ), + "desikan": FreeSurferAnnotationImage.from_raw(desikan_left, desikan_right), + } + + # 2 creation of workflow : working dir, inputnode, outputnode and datasink + name_workflow = subject_id.replace("-", "_") + "_" + session_id.replace("-", "_") + if is_longitudinal: + name_workflow += "_long" + + wf = pe.Workflow(name=name_workflow) + wf.base_dir = working_directory_subjects + + inputnode = pe.Node( + niu.IdentityInterface( + fields=[ + "orig_nu", + "pet", + "psf", + "white_surface_left", + "white_surface_right", + ] + ), + name="inputnode", + mandatory_inputs=True, + ) + + inputnode.inputs.orig_nu = orig_nu + inputnode.inputs.pet = pet + inputnode.inputs.psf = pvc_psf_tsv + inputnode.inputs.white_surface_right = white_surface_right + inputnode.inputs.white_surface_left = white_surface_left + + outputnode = pe.Node( + niu.IdentityInterface( + fields=[ + "mid_surf", + "projection_native_subject", + "projection_fsaverage_smoothed", + "destrieux_tsv", + "desikan_tsv", + ] + ), + name="outputnode", + mandatory_inputs=True, + ) + + datasink = pe.Node(nio.DataSink(), name="sinker") + datasink.inputs.base_directory = str( + get_output_dir(is_longitudinal, caps_dir, subject_id, session_id) + ) + datasink.inputs.parameterization = True + datasink.inputs.regexp_substitutions = get_regexp_substitutions( + acq_label, suvr_reference_region, is_longitudinal + ) + + # 3 Connecting the nodes + + # TODO(@arnaud.marcoux): Add titles for sections of connections. + # Could be useful to add sections title to group similar connections + # together. + + connections = [ + (inputnode, unzip_pet, [("pet", "in_file")]), + (unzip_pet, coreg, [("out_file", "source")]), + (inputnode, convert_mgh, [("orig_nu", "in_file")]), + (convert_mgh, unzip_orig_nu, [("out_file", "in_file")]), + (unzip_orig_nu, coreg, [("out_file", "target")]), + (coreg, removenan, [("coregistered_source", "image_path")]), + (removenan, vol2vol, [("vol_wo_nan", "source_file")]), + (inputnode, tkregister, [("orig_nu", "target_image")]), + (unzip_orig_nu, normalize12, [("out_file", "image_to_align")]), + (unzip_mask, apply_inverse_deformation, [("out_file", "img")]), + ( + normalize12, + apply_inverse_deformation, + [("deformation_field", "deformation_field")], + ), + (unzip_orig_nu, apply_inverse_deformation, [("out_file", "target")]), + ( + apply_inverse_deformation, + vol2vol_mask, + [("freesurfer_space_eroded_mask", "source_file")], + ), + (gtmsegmentation, vol2vol_mask, [("gtmseg_file", "target_file")]), + (gtmsegmentation, tkregister, [("gtmseg_file", "moving_image")]), + (gtmsegmentation, convert_gtmseg, [("gtmseg_file", "in_file")]), + (gtmsegmentation, vol2vol, [("gtmseg_file", "target_file")]), + (vol2vol, pons_normalization, [("transformed_file", "pet_image")]), + (vol2vol_mask, pons_normalization, [("transformed_file", "mask")]), + (convert_gtmseg, labelconversion, [("out_file", "gtmsegfile")]), + (labelconversion, merge_volume, [("list_of_regions", "in_files")]), + (merge_volume, pvc, [("merged_file", "mask_file")]), + (pons_normalization, pvc, [("suvr_image", "in_file")]), + (reformat_surface_name, mris_exp, [("out", "surface")]), + (mris_exp, extract_mid_surface, [("out_surface", "surfaces")]), + (mris_exp, surf_conversion, [("out_surface", "surface")]), + (tkregister, surf_conversion, [("reg_file", "registration")]), + (gtmsegmentation, surf_conversion, [("gtmseg_file", "gtmseg_file")]), + (pvc, vol_on_surf, [("out_file", "pet_volume")]), + (surf_conversion, vol_on_surf, [("tval", "surface")]), + (gtmsegmentation, vol_on_surf, [("gtmseg_file", "gtmsegfile")]), + (vol_on_surf, normal_average, [("output", "surfaces")]), + (normal_average, project_on_fsaverage, [("out_surface", "projection")]), + ( + normal_average, + gather_pet_projection, + [("out_surface", "pet_projection_lh_rh")], + ), + ( + gather_pet_projection, + atlas_tsv, + [("pet_projection_lh_rh", "pet_projections")], + ), + (atlas_tsv, outputnode, [("destrieux_tsv", "destrieux_tsv")]), + (atlas_tsv, outputnode, [("desikan_tsv", "desikan_tsv")]), + ( + project_on_fsaverage, + outputnode, + [("out_fsaverage", "projection_fsaverage_smoothed")], + ), + (extract_mid_surface, outputnode, [("mid_surface", "mid_surf")]), + (normal_average, outputnode, [("out_surface", "projection_native_subject")]), + ( + outputnode, + datasink, + [("projection_fsaverage_smoothed", "projection_fsaverage")], + ), + (outputnode, datasink, [("mid_surf", "midsurface")]), + (outputnode, datasink, [("projection_native_subject", "projection_native")]), + (outputnode, datasink, [("destrieux_tsv", "destrieux_tsv")]), + (outputnode, datasink, [("desikan_tsv", "desikan_tsv")]), + ] + wf.connect(connections) + # wf.write_graph(graph2use='flat') + wf.run() diff --git a/clinica/pipelines/pet_surface/__init__.py b/clinica/pipelines/pet_surface/__init__.py deleted file mode 100644 index a53706a29..000000000 --- a/clinica/pipelines/pet_surface/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import pet_surface_cli, pet_surface_longitudinal_cli diff --git a/clinica/pipelines/pet_surface/pet_surface_utils.py b/clinica/pipelines/pet_surface/pet_surface_utils.py deleted file mode 100644 index 92266bb78..000000000 --- a/clinica/pipelines/pet_surface/pet_surface_utils.py +++ /dev/null @@ -1,1435 +0,0 @@ -def get_new_subjects_dir(is_longitudinal, caps_dir, subject_id, session_id): - """Extract SUBJECT_DIR. - - Extract path to FreeSurfer segmentation in CAPS folder and FreeSurfer ID - (e.g. sub-CLNC01_ses-M000.long.sub-CLNC01_long-M000M018 or sub-CLNC01_ses-M000). - """ - import os - - from clinica.utils.exceptions import ClinicaCAPSError - - if is_longitudinal: - root = os.path.join(caps_dir, "subjects", subject_id, session_id, "t1") - long_folds = [f for f in os.listdir(root) if f.startswith("long-")] - if len(long_folds) > 1: - raise ClinicaCAPSError( - f"[Error] Folder {root} contains {len(long_folds)} folders labeled long-*. Only 1 can exist." - ) - elif len(long_folds) == 0: - raise ClinicaCAPSError( - f"[Error] Folder {root} does not contains a folder labeled long-*. Have you run t1-freesurfer-longitudinal?" - ) - else: - root_env = os.path.join(root, long_folds[0], "freesurfer_longitudinal") - sub_id_cmd = f"{subject_id}_{session_id}.long.{subject_id}_{long_folds[0]}" - else: - root_env = os.path.join( - caps_dir, - "subjects", - subject_id, - session_id, - "t1", - "freesurfer_cross_sectional", - ) - sub_id_cmd = subject_id + "_" + session_id - - return root_env, sub_id_cmd - - -def perform_gtmseg(caps_dir, subject_id, session_id, is_longitudinal): - """gtmseg is a freesurfer command used to perform a segmentation used in some partial volume correction methods. - - Warnings: - - This method changes the environment variable $SUBJECTS_DIR (but put - the original one back after execution). This has not been intensely - tested whether it can lead to some problems : (for instance if 2 - subjects are running in parallel) - - Args: - (string) caps_dir : CAPS directory. - (string) subject_id: The subject_id (something like sub-ADNI002S4213) - (string) session_id: The session id ( something like : ses-M012) - (bool) is_longitudinal: If longitudinal processing, subjects_dir must be put elsewhere - - Returns: - (string) Path to the segmentation volume : a volume where each voxel - has a label (ranging [0 2035] ), see Freesurfer lookup table to see the - labels with their corresponding names. - """ - import os - import shutil - - import nipype.pipeline.engine as pe - from nipype.interfaces.base import CommandLine - - import clinica.pipelines.pet_surface.pet_surface_utils as utils - - # Old subject_dir is saved for later - subjects_dir_backup = os.path.expandvars("$SUBJECTS_DIR") - - root_env, freesurfer_id = utils.get_new_subjects_dir( - is_longitudinal, caps_dir, subject_id, session_id - ) - - # Set the new subject dir for the function to work properly - os.environ["SUBJECTS_DIR"] = root_env - - if not os.path.exists( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.mgz" - ) - ): - # Creation of standalone node based on Command Line Interface. - # We simply put the command line we would run on a console - segmentation = pe.Node( - interface=CommandLine( - "gtmseg --s " + freesurfer_id + " --no-seg-stats --xcerseg", - terminal_output="stream", - ), - name="gtmseg", - ) - segmentation.run() - - # We specify the out file to be in the current directory of execution (easy for us to look at it afterward in the - # working directory). We copy then the file. - out_file = os.path.abspath("./gtmseg.mgz") - shutil.copy( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.mgz" - ), - out_file, - ) - - # Remove bunch of files created during segmentation in caps dir and not needed - gtmsegcab = os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.ctab" - ) - if os.path.exists(gtmsegcab): - os.remove(gtmsegcab) - - gtmseglta = os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.lta" - ) - if os.path.exists(gtmseglta): - os.remove(gtmseglta) - - # Set back the SUBJECT_DIR environment variable of the user - os.environ["SUBJECTS_DIR"] = subjects_dir_backup - return out_file - - -def remove_nan(volname): - """remove_nan is a method needed after a registration performed by spmregister : instead of filling space with 0, nan - are used to extend the PET space. We propose to replace them with 0s. - - Args: - (string) volname : path to the Nifti volume where NaNs need to be replaced by 0s - - Returns: - (string) Path to the volume in Nifti that does not contain any NaNs - """ - import os - - import nibabel as nib - import numpy as np - - # Load the volume and get the data - nifti_in = nib.load(volname) - data = np.nan_to_num(nifti_in.get_fdata(dtype="float32")) - - # Now create final image (using header of original image), and save it in current directory - nifti_out = nib.Nifti1Image(data, nifti_in.affine, header=nifti_in.header) - filename = os.path.basename(volname) - vol_wo_nan = "./no_nan_" + filename + ".gz" - vol_wo_nan = os.path.abspath(vol_wo_nan) - nib.save(nifti_out, vol_wo_nan) - return vol_wo_nan - - -def make_label_conversion(gtmsegfile, csv): - """make_label_conversion is a method used on the segmentation from gtmsegmentation. The purpose is to reduce the - number of label. The gathering of labels is specified in a separate file - - Args: - (string) gtmsegfile : path to the Nifti volume containing the gtmseg segmentation - (string) csv : path to .csv file that contains 3 columns : REGION SOURCE DST. Separator is , (coma). - - Returns: - (list of strings) List of path to the converted volumes according to the .csv file. Each volume is a mask - representing an area - """ - import os - - import nibabel as nib - import numpy - import pandas - - from clinica.utils.stream import cprint - - def isclose(a, b, rel_tol=1e-9, abs_tol=0.0): - """Small function designed to measure equality between to floating or double numbers, using 2 thresholds : a - relative tolerance, and an absolute tolerance - """ - return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) - - # Read label from gtmsegfile, change data into integers in order to have no problems when testing equality of labels - label = nib.load(gtmsegfile) - label.header.set_data_dtype("int8") - volume = label.get_fdata(dtype="float32") - - # Unique function gives a list where each label of the volume is listed once - old_labels = numpy.unique(volume) - old_labels = old_labels.astype("int16") - - # allsum is a control volume (sum of a pixel across the 4 th dimension must be equal to 1) - allsum = numpy.zeros(volume.shape) - - # Reading of csv file, raise exception if the pattern REGION, SOURCE, DST is not found - if not os.path.isfile(csv): - raise Exception("The CSV file does not exist.") - convert_lut = pandas.io.parsers.read_csv(csv, sep=",") - if list(convert_lut.columns.values) != ["REGION", "SOURCE", "DST"]: - raise Exception( - f"CSV file {csv} is not in the correct format. Columns should be: REGION, SOURCE, DST" - ) - - # Extract columns to a list form (values converted into integers) - src = list(convert_lut.SOURCE) - src_val = numpy.asanyarray(src) - src_val = src_val.astype("int") - - dst = list(convert_lut.DST) - dst_val = numpy.asarray(dst) - dst_val = dst_val.astype("int") - - reg = list(convert_lut.REGION) - - # Check that each label of original volume (old_label) has a matching transformation in the csv file - for i in range(old_labels.size): - index = numpy.argwhere(src_val == old_labels[i]) - # Size 0 means no occurrence found - if index.size == 0: - raise Exception( - f"Could not find label {old_labels[i]} on conversion table. Add it manually in CSV file to correct error" - ) - # else: - # cprint(str(old_labels[i]) + " has been found on conversion table") - - # Instantiation of final volume, with same dtype as original volume - new_volume = numpy.zeros(volume.shape, dtype=volume.dtype) - # Computing the transformation - for i in range(len(src)): - new_volume[volume == src_val[i]] = dst_val[i] - # cprint("Region " + reg[i] + " transformed") - # Get unique list of new label - new_labels = numpy.unique(new_volume) - new_labels = new_labels.astype("int") - list_of_regions = list() - - # For each label, create a volume file filled with 0s and 1s and save it in current directory under whatever name - for i in range(new_labels.size): - region_volume = numpy.zeros(volume.shape, dtype="uint8") - region_volume[new_volume == new_labels[i]] = 1 - myNifti = nib.Nifti1Image(region_volume, label.affine, header=label.header) - current_path = "./" + str(new_labels[i]) + ".nii.gz" - current_path = os.path.abspath(current_path) - nib.save(myNifti, current_path) - list_of_regions.append(current_path) - # cprint("Label " + str(new_labels[i]) + " created in " + current_path) - allsum = allsum + region_volume - - # The sum of a voxel location across the fourth dimension should be 1 - sum_voxel_mean = float(sum(sum(sum(allsum)))) / allsum.size - if not isclose(1.0, sum_voxel_mean): - raise Exception( - f"Problem during parcellation: the mean sum of a voxel across 4th dimension is {sum_voxel_mean} instead of 1.0" - ) - # The list of files is returned - return list_of_regions - - -def runApplyInverseDeformationField_SPM_standalone( - target, deformation_field, img, matscript_folder -): - """ - This function does the exact same job as runApplyInverseDeformationField but with SPM standalone. We directly create - a batch file that SPM standalone can run. This function does not check whether SPM standalone must be used. Previous - check when building the pipeline ensures that all the env vars exists ($SPMSTANDALONE_HOME and $MCR_HOME) - """ - import os - import platform - import subprocess - from os.path import abspath, basename, exists, join - - prefix = "subject_space_" - - # Write SPM batch command directly in a script that is readable by SPM standalone - script_location = abspath("./m_script.m") - script_file = open(script_location, "w+") - script_file.write( - "jobs{1}.spm.util.defs.comp{1}.inv.comp{1}.def = {'" - + deformation_field - + "'};\n" - ) - script_file.write("jobs{1}.spm.util.defs.comp{1}.inv.space = {'" + target + "'};\n") - script_file.write("jobs{1}.spm.util.defs.out{1}.pull.fnames = {'" + img + "'};\n") - script_file.write( - "jobs{1}.spm.util.defs.out{1}.pull.savedir.saveusr = {'" - + abspath(os.getcwd()) - + "'};\n" - ) - script_file.write("jobs{1}.spm.util.defs.out{1}.pull.interp = 4;\n") - script_file.write("jobs{1}.spm.util.defs.out{1}.pull.mask = 1;\n") - script_file.write("jobs{1}.spm.util.defs.out{1}.pull.fwhm = [0 0 0];\n") - script_file.write("jobs{1}.spm.util.defs.out{1}.pull.prefix = '" + prefix + "';\n") - script_file.close() - - # Generate command line to run - # SPM standalone must be run directly from its root folder - if platform.system() == "Darwin": - # Mac OS - cmdline = f"cd $SPMSTANDALONE_HOME && ./run_spm12.sh $MCR_HOME batch {script_location}" - elif platform.system() == "Linux": - # Linux OS - cmdline = f"$SPMSTANDALONE_HOME/run_spm12.sh $MCR_HOME batch {script_location}" - else: - raise SystemError("Only support Mac OS and Linux") - subprocess_run = subprocess.run( - cmdline, - shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if subprocess_run.returncode != 0: - raise ValueError( - "runApplyInverseDeformationField_SPM_standalone failed, returned non-zero code" - ) - - output_file = join(abspath("./"), prefix + basename(img)) - if not exists(output_file): - raise IOError( - "Something went wrong while trying to run runApplyInverseDeformationField_SPM_standalone" - + ". Output file not generated. Command launched :\n\t " - + cmdline - + "\n. We strongly recommend that you use the supported version of Matlab MCR " - + " recommended by the creators of SPM." - ) - return output_file - - -def runApplyInverseDeformationField(target, deformation_field, img, matscript_folder): - import os - import sys - - from nipype.interfaces.matlab import MatlabCommand, get_matlab_command - - prefix = "subject_space_" - - MatlabCommand.set_default_matlab_cmd(get_matlab_command()) - matlab = MatlabCommand() - if sys.platform.startswith("linux"): - matlab.inputs.args = "-nosoftwareopengl" - - matlab.inputs.paths = matscript_folder - matlab.inputs.script = """ - applyInverseDeformationField('%s', '%s', '%s', './', '%s') - """ % ( - target, - deformation_field, - img, - prefix, - ) - matlab.inputs.single_comp_thread = False - matlab.inputs.logfile = os.path.join("./", "matlab_output.log") - matlab.run() - - output_file = os.path.join(os.path.abspath("./"), prefix + os.path.basename(img)) - if not os.path.exists(output_file): - raise IOError( - f"Something went wrong, please check {os.path.abspath(matlab.inputs.logfile)} for more information" - ) - return output_file - - -def suvr_normalization(pet_path, mask): - """suvr_normalization is a way of getting suvr from your pet image, based on the segmentation performed by - gtmsegmentation. The Standard Uptake Value ratio is computed by dividing the whole PET volume by the mean value - observed in the pons. - - Args: - (string) pet_path : path to the Nifti volume containing PET scan, realigned on upsampled T1 - (string) mask : mask of the pons (18FFDG) or pons+cerebellum (18FAV45) already eroded - - Returns: - (string) Path to the suvr normalized volume in the current directory - """ - import os - - import nibabel as nib - - # Load mask - eroded_mask_nifti = nib.load(mask) - eroded_mask = eroded_mask_nifti.get_fdata(dtype="float32") - eroded_mask = eroded_mask > 0 - - # Load PET data (they must be in gtmsegspace, or same space as label file) - pet = nib.load(pet_path) - pet_data = pet.get_fdata(dtype="float32") - - # check that eroded mask is not null - mask_size = sum(sum(sum(eroded_mask))) - if mask_size == 0: - raise Exception( - "Number of non-zero value of mask is 0. A problem occurred when moving the eroded mask from MNI to gtmsegspace" - ) - - # Mask unwanted values to determine mean uptake value - pons_pet_activity = eroded_mask * pet_data - mean_pons_pet_activity = sum(sum(sum(pons_pet_activity))) / mask_size - - # Then normalize PET data by this mean activity - suvr_pet_data = pet_data / mean_pons_pet_activity - suvr = nib.Nifti1Image(suvr_pet_data, pet.affine, header=pet.header) - suvr_filename = "suvr_" + os.path.basename(pet_path) - suvr_filename = os.path.abspath("./" + suvr_filename) - nib.save(suvr, suvr_filename) - return suvr_filename - - -def mris_expand(in_surface): - """mris_expand is using the freesurfer function of the same name. It expands the white input surface toward the pial, - generating 7 surfaces at 35%, 40%, 45%, 50%, 55%, 60%, 65% of thickness. - - Args: - (string) in_surface : Path to the input white surface, but must be named lh.white or rh.white, and the folder - containing the surface file must also have ?h.pial, ?.sphere, ?h.thickness (freesurfer surf folder) - - Returns: - (list of strings) List of path to the generated surfaces - """ - import os - import shutil - import subprocess - import sys - - # bug in mris_expand : you are not allowed to write the surfaces elsewhere than in the surf folder - # -N is a hidden parameter (not documented) that allows the user to specify the number of surface generated between - # source and final target surface. Here target is 65% of thickness, with 13 surfaces. Then we only keep the surfaces - # we are interested in. - - out_file = in_surface + "_exp-" - cmd = "mris_expand -thickness -N 13 " + in_surface + " 0.65 " + out_file - # If system is MacOS, this export command must be run just before the mri_vol2surf command to bypass MacOs security - if sys.platform == "darwin": - cmd = "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && " + cmd - - subprocess_mris_expand = subprocess.run( - cmd, - shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if subprocess_mris_expand.returncode != 0: - raise ValueError("mris_expand failed, returned non-zero code") - - # Move surface file to the current directory - out_filelist = [ - out_file + "007", - out_file + "008", - out_file + "009", - out_file + "010", - out_file + "011", - out_file + "012", - out_file + "013", - ] - out_in_node = [os.path.abspath("./" + os.path.basename(x)) for x in out_filelist] - for i in range(len(out_filelist)): - shutil.move(out_filelist[i], out_in_node[i]) - - # Remove useless surfaces (0%, 5%, 10%, 15%, 20%, 25% and 30% of thickness) - to_remove = [ - out_file + "000", - out_file + "001", - out_file + "002", - out_file + "003", - out_file + "004", - out_file + "005", - out_file + "006", - ] - for i in range(len(to_remove)): - os.remove(to_remove[i]) - - return out_in_node - - -def surf2surf( - in_surface, reg_file, gtmsegfile, subject_id, session_id, caps_dir, is_longitudinal -): - """surf2surf is a wrapper of freesurfer command mri_surf2surf. Here the aim is to convert a input surface (which is - the native space of the subject), into the same surface but in the gtmseg space (space of the volume generated by - the gtmsegmentation) - - Args: - (string) in_surface : surface file that needs to be converted - (string) reg_file : Path to a registration file that represents the transformation needed to go from the native - space to the gtmsegspace (see https://surfer.nmr.mgh.harvard.edu/fswiki/FsAnat-to-NativeAnat for more - details) - (string) gtmsegfile : Path to the gtm segmentation file - (string) subject_id : The subject_id (something like sub-ADNI002S4213) - (string) session_id : The session id ( something like : ses-M012) - (string) caps_dir : Path to the CAPS directory - (bool) is_longitudinal: longitudinal files - - Returns: - (string) Path to the converted surface in current directory - """ - import os - import shutil - import subprocess - import sys - - import clinica.pipelines.pet_surface.pet_surface_utils as utils - - # set subjects_dir env. variable for mri_surf2surf to work properly - subjects_dir_backup = os.path.expandvars("$SUBJECTS_DIR") - - root_env, freesurfer_id = utils.get_new_subjects_dir( - is_longitudinal, caps_dir, subject_id, session_id - ) - - os.environ["SUBJECTS_DIR"] = root_env - - # make a copy of surface file to surface directory in CAPS in order to allow processing - shutil.copy( - in_surface, - os.path.join(os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "surf"), - ) - - # TODO write nicer way to grab hemi & filename (difficulty caused by the dots in filenames) - # extract hemisphere based on filename - hemi = os.path.basename(in_surface)[0:2] - surfname = os.path.basename(in_surface)[3:] - - # Perform surf2surf algorithm - tval = os.path.abspath("./" + os.path.basename(in_surface) + "_gtmsegspace") - cmd = ( - "mri_surf2surf --reg %s %s --sval-xyz %s --hemi %s --tval-xyz %s --tval %s --s %s " - % (reg_file, gtmsegfile, surfname, hemi, gtmsegfile, tval, freesurfer_id) - ) - - # If system is MacOS, this export command must be run just before the mri_vol2surf command to bypass MacOs security - if sys.platform == "darwin": - cmd = "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && " + cmd - subprocess_mri_surf2surf = subprocess.run( - cmd, - shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if subprocess_mri_surf2surf.returncode != 0: - raise ValueError("mri_surf2surf failed, returned non-zero code") - - # remove file in caps - os.remove( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), - freesurfer_id, - "surf", - os.path.basename(in_surface), - ) - ) - - # put back original subjects_dir env - os.environ["SUBJECTS_DIR"] = subjects_dir_backup - - return tval - - -def vol2surf( - volume, surface, subject_id, session_id, caps_dir, gtmsegfile, is_longitudinal -): - """vol2surf is a wrapper of freesurfer command mri_vol2surf. It projects the volume into the surface : the value at - each vertex is given by the value of the voxel it intersects - - Args: - (string) volume : Path to PET volume (in gtmseg space) that needs to be mapped into surface - (string) surface : Path to surface file - (string) gtmsegfile :l Path to the gtm segmentation file (provides information on space, labels are not used - (string) subject_id : The subject_id (something like sub-ADNI002S4213) - (string) session_id : The session id ( something like : ses-M012) - (string) caps_dir : Path to the CAPS directory - - Returns: - (string) Path to the data projected onto the surface - """ - import os - import shutil - import subprocess - import sys - - import clinica.pipelines.pet_surface.pet_surface_utils as utils - - # set subjects_dir env. variable for mri_vol2surf to work properly - subjects_dir_backup = os.path.expandvars("$SUBJECTS_DIR") - - root_env, freesurfer_id = utils.get_new_subjects_dir( - is_longitudinal, caps_dir, subject_id, session_id - ) - - os.environ["SUBJECTS_DIR"] = root_env - - # TODO write nicer way to grab hemi & filename (difficulty caused by the dots in filenames) - # extract hemisphere based on filename - hemi = os.path.basename(surface)[0:2] - surfname = os.path.basename(surface)[3:] - - # copy surface file in caps surf folder to allow processing - shutil.copy( - surface, - os.path.join(os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "surf"), - ) - - if not os.path.exists( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.mgz" - ) - ): - shutil.copy( - gtmsegfile, - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.mgz" - ), - ) - - # execute vol2surf - output = os.path.abspath( - "./" + hemi + ".projection_" + os.path.basename(surface) + ".mgh" - ) - cmd = "mri_vol2surf" - cmd += " --mov " + volume - cmd += " --o " + output - cmd += " --surf " + surfname - cmd += " --hemi " + hemi - cmd += " --regheader " + freesurfer_id - cmd += " --ref gtmseg.mgz" - cmd += " --interp nearest" - - # If system is MacOS, this export command must be run just before the mri_vol2surf command to bypass MacOs security - if sys.platform == "darwin": - cmd = "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && " + cmd - subprocess_mri_vol2surf = subprocess.run( - cmd, - shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - if subprocess_mri_vol2surf.returncode != 0: - raise ValueError("mri_vol2surf failed, returned non-zero code") - - # remove file in caps - os.remove( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), - freesurfer_id, - "surf", - os.path.basename(surface), - ) - ) - # TODO careful here... - # Removing gtmseg.mgz may lead to problems as other vol2surf are using it - os.remove( - os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), freesurfer_id, "mri", "gtmseg.mgz" - ) - ) - - # put back original subjects_dir env - os.environ["SUBJECTS_DIR"] = subjects_dir_backup - - return output - - -def weighted_mean(in_surfaces): - """weighted_mean make a weighted average at each node of the surface. The weight are defined by a normal - distribution (centered on the mid surface) - - Args: - (list of strings) in_surfaces : List of path to the data projected on the 7 surfaces (35 to 65 % of thickness) - at each nodes) - - Returns: - (string) Path to the data averaged - """ - import os - - import nibabel as nib - import numpy as np - - # coefficient for normal repartition - coefficient = [0.1034, 0.1399, 0.1677, 0.1782, 0.1677, 0.1399, 0.1034] - - # sample only to get dimension - sample = nib.load(in_surfaces[0]) - data_normalized = np.zeros(sample.header.get_data_shape()) - - if len(in_surfaces) != 7: - raise Exception( - f"There should be 7 surfaces at this point of the pipeline, but found {len(in_surfaces)}, something went wrong..." - ) - - for i in range(len(in_surfaces)): - current_surf = nib.load(in_surfaces[i]) - data_normalized += current_surf.get_fdata(dtype="float32") * coefficient[i] - - # hemisphere name will always be in our case the first 2 letters of the filename - hemi = os.path.basename(in_surfaces[0])[0:2] - # data_normalized = np.atleast_3d(data_normalized) - hemi_projection = nib.MGHImage( - data_normalized, affine=sample.affine, header=sample.header - ) - out_surface = "./" + hemi + ".averaged_projection_on_cortical_surface.mgh" - out_surface = os.path.abspath(out_surface) - nib.save(hemi_projection, out_surface) - - return out_surface - - -def fsaverage_projection( - projection, subject_id, caps_dir, session_id, fwhm, is_longitudinal -): - """fsaverage_projection projects your data into an averaged subject called fsaverage, available in your $SUBJECTS_DIR - folder. fsaverage and the subject must be in the subject_dir, so a copy of fsaverage is performed if necessary - - Args: - (string) projection : Path to the projected data onto native subject surface - (string) subject_id : The subject id (something like sub-ADNI002S4213) - (string) session_id : The session id ( something like : ses-M012) - (string) caps_dir : Path to the CAPS directory - (float) fwhm : FWHM of the Gaussian filter used for smoothing on fsaverage surface (not volume !) - (bool) is_longitudinal : longitudinal pipeline or not - - Returns: - (string) Path to the data averaged - """ - import os - import shutil - - from nipype.interfaces.freesurfer import MRISPreproc - - import clinica.pipelines.pet_surface.pet_surface_utils as utils - - subjects_dir_backup = os.path.expandvars("$SUBJECTS_DIR") - - root_env, freesurfer_id = utils.get_new_subjects_dir( - is_longitudinal, caps_dir, subject_id, session_id - ) - - os.environ["SUBJECTS_DIR"] = root_env - - # copy fsaverage folder next to : subject_id + '_' + session_id - # for the mris_preproc command to properly find src and target - fsaverage_has_been_copied = False - if not os.path.exists( - os.path.join(os.path.expandvars("$SUBJECTS_DIR"), "fsaverage") - ): - shutil.copytree( - os.path.join(subjects_dir_backup, "fsaverage"), - os.path.join(os.path.expandvars("$SUBJECTS_DIR"), "fsaverage"), - ) - fsaverage_has_been_copied = True - - # also copy the mgh file in the surf folder (needed by MRISPreproc - projection_in_surf_folder = os.path.join( - os.path.expandvars("$SUBJECTS_DIR"), - freesurfer_id, - "surf", - os.path.basename(projection), - ) - - if not os.path.exists(projection_in_surf_folder): - shutil.copy(projection, projection_in_surf_folder) - - hemi = os.path.basename(projection)[0:2] - out_fsaverage = os.path.abspath( - "./fsaverage_fwhm-" + str(fwhm) + "_" + os.path.basename(projection) - ) - - # Use standalone node - fsproj = MRISPreproc() - fsproj.inputs.target = "fsaverage" - fsproj.inputs.subjects = [freesurfer_id] - fsproj.inputs.fwhm = fwhm - fsproj.inputs.hemi = hemi - fsproj.inputs.surf_measure = os.path.basename(projection)[3:] - fsproj.inputs.out_file = out_fsaverage - fsproj.run() - - # remove projection file from surf folder - os.remove(projection_in_surf_folder) - - # remove fsaverage if it has been copied - if fsaverage_has_been_copied: - shutil.rmtree(os.path.join(os.path.expandvars("$SUBJECTS_DIR"), "fsaverage")) - - # put back original subjects_dir env - os.environ["SUBJECTS_DIR"] = subjects_dir_backup - return out_fsaverage - - -def get_mid_surface(in_surfaces): - """get_mid_surface gives the mid surface when dealing with the 7 different surfaces - - Args: - (list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand - - Returns: - (string) Path to the mid surface - """ - return in_surfaces[3] - - -def reformat_surfname(hemi, left_surface, right_surface): - res = None - if hemi == "lh": - res = left_surface - elif hemi == "rh": - res = right_surface - else: - raise Exception( - "First input of this reformat_surfname function must be either lh or rh" - ) - return res - - -def produce_tsv(pet, atlas_files): - """produce_tsv computes the average of PET signal based on annot files from Freesurfer. Those files describes the - brain according to known atlases. - - Args: - (string) pet : list of path to the PET projection (must be a MGH file) [left_hemisphere, right_hemisphere] - (string) atlas_files : Dictionary containing path to lh and rh annotation files for any number of atlases. - - Returns: - (string) tsv : path to the tsv containing average PET values - """ - import os - - import nibabel as nib - import numpy as np - import pandas as pds - - from clinica.utils.stream import cprint - - # Extract data from projected PET data - lh_pet_mgh = np.squeeze(nib.load(pet[0]).get_fdata(dtype="float32")) - rh_pet_mgh = np.squeeze(nib.load(pet[1]).get_fdata(dtype="float32")) - - filename_tsv = [] - for atlas in atlas_files: - annot_atlas_left = nib.freesurfer.io.read_annot( - atlas_files[atlas]["lh"], orig_ids=False - ) - annot_atlas_left[0][annot_atlas_left[0] == -1] = 0 - annot_atlas_right = nib.freesurfer.io.read_annot( - atlas_files[atlas]["rh"], orig_ids=False - ) - annot_atlas_right[0][annot_atlas_right[0] == -1] = 0 - - average_region = [] - region_names = [] - for r in range(len(annot_atlas_left[2])): - # cprint(annot_atlas_left[2][r]) - region_names.append(annot_atlas_left[2][r].astype(str) + "_lh") - region_names.append(annot_atlas_left[2][r].astype(str) + "_rh") - - mask_left = annot_atlas_left[0] == r - mask_left = np.uint(mask_left) - - masked_data_left = mask_left * lh_pet_mgh - if np.sum(mask_left) == 0: - average_region.append(np.nan) - else: - average_region.append(np.sum(masked_data_left) / np.sum(mask_left)) - - mask_right = annot_atlas_right[0] == r - mask_right = np.uint(mask_right) - masked_data_right = mask_right * rh_pet_mgh - if np.sum(mask_right) == 0: - average_region.append(np.nan) - else: - average_region.append(np.sum(masked_data_right) / np.sum(mask_right)) - - final_tsv = pds.DataFrame( - { - "index": range(len(region_names)), - "label_name": region_names, - "mean_scalar": list(average_region), - } - ) - filename_atlas_tsv = "./" + atlas + ".tsv" - filename_tsv.append(filename_atlas_tsv) - final_tsv.to_csv( - filename_atlas_tsv, - sep="\t", - index=False, - columns=["index", "label_name", "mean_scalar"], - ) - return os.path.abspath(filename_tsv[0]), os.path.abspath(filename_tsv[1]) - - -def get_wf( - subject_id, - session_id, - pvc_psf_tsv, - caps_dir, - pet, - orig_nu, - white_surface_left, - white_surface_right, - working_directory_subjects, - acq_label: str, - csv_segmentation, - suvr_reference_region: str, - matscript_folder_inverse_deformation, - destrieux_left, - destrieux_right, - desikan_left, - desikan_right, - is_longitudinal, -): - """get_wf create a full workflow for only one subject, and then executes it - - Args: - subject_id (string): The subject ID - session_id (string): The session ID - pvc_psf_tsv (string): Path the TSV file containing information on the point spread function (PSF) - caps_dir (string): Path to the CAPS directory - pet (string): Path to the PET image in the bids directory - orig_nu (string): Path to the orig_nu file (must be in the CAPS directory, in mri) - white_surface_left (string): Path to the left white surface in native space of subject - white_surface_right (string): Path to the right white surface in native space of subject - working_directory_subjects (string): - acq_label (string): - csv_segmentation (string): Path to the CSV for the segmentation (problems encountered while using __file__) - suvr_reference_region (string): Label of the SUVR reference region - matscript_folder_inverse_deformation (string): Path to the current folder containing the matlab script used to call the spm function for the inverse deformation - destrieux_left (string): - destrieux_right (string): - desikan_left (string): - desikan_right (string): - is_longitudinal (string): - - Returns: - Void - """ - import os - - import nipype.interfaces.io as nio - import nipype.interfaces.utility as niu - import nipype.pipeline.engine as pe - from nipype.interfaces.freesurfer import ApplyVolTransform, MRIConvert, Tkregister2 - from nipype.interfaces.fsl import Merge - from nipype.interfaces.petpvc import PETPVC - from nipype.interfaces.spm import Coregister, Normalize12 - - import clinica.pipelines.pet_surface.pet_surface_utils as utils - from clinica.utils.filemanip import get_subject_id, load_volume, unzip_nii - from clinica.utils.pet import ( - SUVRReferenceRegion, - Tracer, - get_suvr_mask, - read_psf_information, - ) - from clinica.utils.spm import get_tpm, use_spm_standalone_if_available - from clinica.utils.ux import print_begin_image - - using_spm_standalone = use_spm_standalone_if_available() - - suvr_reference_region = SUVRReferenceRegion(suvr_reference_region) - acq_label = Tracer(acq_label) - image_id = get_subject_id(pet) - try: - load_volume(pet) - except ValueError as e: - raise ValueError( - f"Clinica could not load volumes for {image_id.replace('_', ' | ')}. {str(e)}" - ) - print_begin_image(image_id) - - # Creation of workflow - # 1 Creation of node - unzip_pet = pe.Node( - niu.Function( - input_names=["in_file"], output_names=["out_file"], function=unzip_nii - ), - name="unzip_pet", - ) - - unzip_orig_nu = unzip_pet.clone(name="unzip_orig_nu") - - unzip_mask = unzip_pet.clone(name="unzip_mask") - unzip_mask.inputs.in_file = str(get_suvr_mask(suvr_reference_region)) - - coreg = pe.Node(Coregister(), name="coreg") - - convert_mgh = pe.Node(MRIConvert(), name="convert_mgh") - - removenan = pe.Node( - niu.Function( - input_names=["volname"], - output_names=["vol_wo_nan"], - function=utils.remove_nan, - ), - name="removenan", - ) - - gtmsegmentation = pe.Node( - niu.Function( - input_names=["caps_dir", "subject_id", "session_id", "is_longitudinal"], - output_names=["gtmseg_file"], - function=utils.perform_gtmseg, - ), - name="gtmseg", - ) - gtmsegmentation.inputs.caps_dir = caps_dir - gtmsegmentation.inputs.subject_id = subject_id - gtmsegmentation.inputs.session_id = session_id - gtmsegmentation.inputs.is_longitudinal = is_longitudinal - - tkregister = pe.Node(Tkregister2(reg_header=True), name="tkreg") - - convert_gtmseg = convert_mgh.clone(name="convert_gtmseg") - - labelconversion = pe.Node( - niu.Function( - input_names=["gtmsegfile", "csv"], - output_names=["list_of_regions"], - function=utils.make_label_conversion, - ), - name="conversion_of_labels", - ) - - labelconversion.inputs.csv = csv_segmentation - - if not os.path.exists(labelconversion.inputs.csv): - raise Exception("CSV file : " + labelconversion.inputs.csv + " does not exist.") - - merge_volume = pe.Node( - Merge(output_type="NIFTI_GZ", dimension="t"), name="merge_volume" - ) - - vol2vol = pe.Node( - ApplyVolTransform(reg_header=True, interp="trilin"), name="vol2vol" - ) - - vol2vol_mask = pe.Node( - ApplyVolTransform(reg_header=True, interp="nearest"), name="vol2vol_mask" - ) - - normalize12 = pe.Node( - Normalize12( - tpm=get_tpm(), - affine_regularization_type="mni", - jobtype="est", - bias_fwhm=60, - bias_regularization=0.0001, - warping_regularization=[0, 0.001, 0.5, 0.05, 0.2], - ), - name="normalize_to_MNI", - ) - - if using_spm_standalone: - fun_apply_inverse_deformation = ( - utils.runApplyInverseDeformationField_SPM_standalone - ) - else: - fun_apply_inverse_deformation = utils.runApplyInverseDeformationField - apply_inverse_deformation = pe.Node( - niu.Function( - input_names=["target", "deformation_field", "img", "matscript_folder"], - output_names=["freesurfer_space_eroded_mask"], - function=fun_apply_inverse_deformation, - ), - name="applyInverseDeformation", - ) - apply_inverse_deformation.inputs.matscript_folder = ( - matscript_folder_inverse_deformation - ) - - pons_normalization = pe.Node( - niu.Function( - input_names=["pet_path", "mask"], - output_names=["suvr"], - function=utils.suvr_normalization, - ), - name="pons_normalization", - ) - pons_normalization.inputs.pet_tracer = acq_label.value - - # read_psf_information expects a list of subjects/sessions and returns a list of PSF - psf_info = read_psf_information(pvc_psf_tsv, [subject_id], [session_id], acq_label)[ - 0 - ] - - pvc = pe.Node(PETPVC(pvc="IY"), name="petpvc") - pvc.inputs.fwhm_x = psf_info[0] - pvc.inputs.fwhm_y = psf_info[1] - pvc.inputs.fwhm_z = psf_info[2] - - reformat_surface_name = pe.Node( - niu.Function( - input_names=["hemi", "left_surface", "right_surface"], - output_names=["out"], - function=utils.reformat_surfname, - ), - name="reformat_surface_name", - ) - reformat_surface_name.inputs.left_surface = white_surface_left - reformat_surface_name.inputs.right_surface = white_surface_right - reformat_surface_name.iterables = ("hemi", ["lh", "rh"]) - - mris_exp = pe.Node( - niu.Function( - input_names=["in_surface"], - output_names=["out_surface"], - function=utils.mris_expand, - ), - name="mris_expand_white", - ) - - surf_conversion = pe.MapNode( - niu.Function( - input_names=[ - "in_surface", - "reg_file", - "gtmsegfile", - "subject_id", - "session_id", - "caps_dir", - "is_longitudinal", - ], - output_names=["tval"], - function=utils.surf2surf, - ), - name="surf_conversion", - iterfield=["in_surface"], - ) - surf_conversion.inputs.subject_id = subject_id - surf_conversion.inputs.session_id = session_id - surf_conversion.inputs.caps_dir = caps_dir - surf_conversion.inputs.is_longitudinal = is_longitudinal - - vol_on_surf = pe.MapNode( - niu.Function( - input_names=[ - "volume", - "surface", - "subject_id", - "session_id", - "caps_dir", - "gtmsegfile", - "is_longitudinal", - ], - output_names=["output"], - function=utils.vol2surf, - ), - name="vol_on_surf", - iterfield=["surface"], - ) - vol_on_surf.inputs.subject_id = subject_id - vol_on_surf.inputs.session_id = session_id - vol_on_surf.inputs.caps_dir = caps_dir - vol_on_surf.inputs.is_longitudinal = is_longitudinal - - normal_average = pe.Node( - niu.Function( - input_names=["in_surfaces"], - output_names=["out_surface"], - function=utils.weighted_mean, - ), - name="normal_average", - ) - - project_on_fsaverage = pe.Node( - niu.Function( - input_names=[ - "projection", - "subject_id", - "caps_dir", - "session_id", - "fwhm", - "is_longitudinal", - ], - output_names=["out_fsaverage"], - function=utils.fsaverage_projection, - ), - name="project_on_fsaverage", - ) - project_on_fsaverage.iterables = ("fwhm", [0, 5, 10, 15, 20, 25]) - project_on_fsaverage.inputs.subject_id = subject_id - project_on_fsaverage.inputs.session_id = session_id - project_on_fsaverage.inputs.caps_dir = caps_dir - project_on_fsaverage.inputs.is_longitudinal = is_longitudinal - - extract_mid_surface = pe.Node( - niu.Function( - input_names=["in_surfaces"], - output_names=["mid_surface"], - function=utils.get_mid_surface, - ), - name="extract_mid_surface", - ) - - surface_atlas = { - "destrieux": {"lh": destrieux_left, "rh": destrieux_right}, - "desikan": {"lh": desikan_left, "rh": desikan_right}, - } - - gather_pet_projection = pe.JoinNode( - niu.IdentityInterface(fields=["pet_projection_lh_rh"]), - name="gather_pet_projection_hemisphere", - joinsource="reformat_surface_name", - joinfield=["pet_projection_lh_rh"], - ) - - atlas_tsv = pe.Node( - niu.Function( - input_names=["pet", "atlas_files"], - output_names=["destrieux_tsv", "desikan_tsv"], - function=utils.produce_tsv, - ), - name="atlas_tsv", - ) - atlas_tsv.inputs.atlas_files = surface_atlas - - # 2 creation of workflow : working dir, inputnode, outputnode and datasink - name_workflow = subject_id.replace("-", "_") + "_" + session_id.replace("-", "_") - if is_longitudinal: - name_workflow += "_long" - - wf = pe.Workflow(name=name_workflow) - wf.base_dir = working_directory_subjects - - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "orig_nu", - "pet", - "psf", - "white_surface_left", - "white_surface_right", - ] - ), - name="inputnode", - mandatory_inputs=True, - ) - - inputnode.inputs.orig_nu = orig_nu - inputnode.inputs.pet = pet - inputnode.inputs.psf = pvc_psf_tsv - inputnode.inputs.white_surface_right = white_surface_right - inputnode.inputs.white_surface_left = white_surface_left - - outputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "mid_surf", - "projection_native_subject", - "projection_fsaverage_smoothed", - "destrieux_tsv", - "desikan_tsv", - ] - ), - name="outputnode", - mandatory_inputs=True, - ) - - datasink = pe.Node(nio.DataSink(), name="sinker") - - def get_output_dir(is_longitudinal, caps_dir, subject_id, session_id): - import os - - from clinica.utils.exceptions import ClinicaCAPSError - - if is_longitudinal: - root = os.path.join(caps_dir, "subjects", subject_id, session_id, "t1") - long_folds = [f for f in os.listdir(root) if f.startswith("long-")] - if len(long_folds) > 1: - raise ClinicaCAPSError( - f"[Error] Folder {root} contains {len(long_folds)} folders labeled long-*. Only 1 can exist" - ) - elif len(long_folds) == 0: - raise ClinicaCAPSError( - f"[Error] Folder {root} does not contains a folder labeled long-*. Have you run t1-freesurfer-longitudinal?" - ) - else: - output_dir = os.path.join( - caps_dir, - "subjects", - subject_id, - session_id, - "pet", - long_folds[0], - "surface_longitudinal", - ) - else: - output_dir = os.path.join( - caps_dir, "subjects", subject_id, session_id, "pet", "surface" - ) - - return output_dir - - datasink.inputs.base_directory = get_output_dir( - is_longitudinal, caps_dir, subject_id, session_id - ) - datasink.inputs.parameterization = True - cross_sectional_regexp_substitutions = [ - # Mid surface - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/midsurface\/.*_hemi_([a-z]+)(.*)$", - r"\1/\2_\3_hemi-\4_midcorticalsurface", - ), - # Projection in native space - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/projection_native\/.*_hemi_([a-z]+).*", - r"\1/\2_\3_trc-" - + acq_label.value - + r"_pet_space-native_suvr-" - + suvr_reference_region.value - + r"_pvc-iy_hemi-\4_projection.mgh", - ), - # Projection in fsaverage - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/projection_fsaverage\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*", - r"\1/\2_\3_trc-" - + acq_label.value - + r"_pet_space-fsaverage_suvr-" - + suvr_reference_region.value - + r"_pvc-iy_hemi-\4_fwhm-\5_projection.mgh", - ), - # TSV file for Destrieux atlas - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/destrieux_tsv\/destrieux.tsv", - r"\1/atlas_statistics/\2_\3_trc-" - + acq_label.value - + "_pet_space-destrieux_pvc-iy_suvr-" - + suvr_reference_region.value - + "_statistics.tsv", - ), - # TSV file for Desikan atlas - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/surface)\/desikan_tsv\/desikan.tsv", - r"\1/atlas_statistics/\2_\3_trc-" - + acq_label.value - + "_pet_space-desikan_pvc-iy_suvr-" - + suvr_reference_region.value - + "_statistics.tsv", - ), - ] - longitudinal_regexp_substitutions = [ - # Mid surface - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/midsurface\/.*_hemi_([a-z]+)(.*)$", - r"\1/\2_\3_\4_hemi-\5_midcorticalsurface", - ), - # Projection in native space - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/projection_native\/.*_hemi_([a-z]+).*", - r"\1/\2_\3_\4_trc-" - + acq_label.value - + r"_pet_space-native_suvr-" - + suvr_reference_region.value - + r"_pvc-iy_hemi-\5_projection.mgh", - ), - # Projection in fsaverage - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/projection_fsaverage\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*", - r"\1/\2_\3_\4_trc-" - + acq_label.value - + r"_pet_space-fsaverage_suvr-" - + suvr_reference_region.value - + r"_pvc-iy_hemi-\5_fwhm-\6_projection.mgh", - ), - # TSV file for Destrieux atlas - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/destrieux_tsv\/destrieux.tsv", - r"\1/atlas_statistics/\2_\3_\4_trc-" - + acq_label.value - + "_pet_space-destrieux_pvc-iy_suvr-" - + suvr_reference_region.value - + "_statistics.tsv", - ), - # TSV file for Desikan atlas - ( - r"(.*(sub-.*)\/(ses-.*)\/pet\/(long-.*)\/surface_longitudinal)\/desikan_tsv\/desikan.tsv", - r"\1/atlas_statistics/\2_\3_\4_trc-" - + acq_label.value - + "_pet_space-desikan_pvc-iy_suvr-" - + suvr_reference_region.value - + "_statistics.tsv", - ), - ] - if is_longitudinal: - datasink.inputs.regexp_substitutions = longitudinal_regexp_substitutions - else: - datasink.inputs.regexp_substitutions = cross_sectional_regexp_substitutions - - # 3 Connecting the nodes - - # TODO(@arnaud.marcoux): Add titles for sections of connections. - # Could be useful to add sections title to group similar connections - # together. - - # fmt: off - wf.connect( - [ - (inputnode, unzip_pet, [("pet", "in_file")]), - (unzip_pet, coreg, [("out_file", "source")]), - (inputnode, convert_mgh, [("orig_nu", "in_file")]), - (convert_mgh, unzip_orig_nu, [("out_file", "in_file")]), - (unzip_orig_nu, coreg, [("out_file", "target")]), - (coreg, removenan, [("coregistered_source", "volname")]), - (removenan, vol2vol, [("vol_wo_nan", "source_file")]), - (inputnode, tkregister, [("orig_nu", "target_image")]), - (unzip_orig_nu, normalize12, [("out_file", "image_to_align")]), - (unzip_mask, apply_inverse_deformation, [("out_file", "img")]), - (normalize12, apply_inverse_deformation, [("deformation_field", "deformation_field")]), - (unzip_orig_nu, apply_inverse_deformation, [("out_file", "target")]), - (apply_inverse_deformation, vol2vol_mask, [("freesurfer_space_eroded_mask", "source_file")]), - (gtmsegmentation, vol2vol_mask, [("gtmseg_file", "target_file")]), - (gtmsegmentation, tkregister, [("gtmseg_file", "moving_image")]), - (gtmsegmentation, convert_gtmseg, [("gtmseg_file", "in_file")]), - (gtmsegmentation, vol2vol, [("gtmseg_file", "target_file")]), - (vol2vol, pons_normalization, [("transformed_file", "pet_path")]), - (vol2vol_mask, pons_normalization, [("transformed_file", "mask")]), - (convert_gtmseg, labelconversion, [("out_file", "gtmsegfile")]), - (labelconversion, merge_volume, [("list_of_regions", "in_files")]), - (merge_volume, pvc, [("merged_file", "mask_file")]), - (pons_normalization, pvc, [("suvr", "in_file")]), - (reformat_surface_name, mris_exp, [("out", "in_surface")]), - (mris_exp, extract_mid_surface, [("out_surface", "in_surfaces")]), - (mris_exp, surf_conversion, [("out_surface", "in_surface")]), - (tkregister, surf_conversion, [("reg_file", "reg_file")]), - (gtmsegmentation, surf_conversion, [("gtmseg_file", "gtmsegfile")]), - (pvc, vol_on_surf, [("out_file", "volume")]), - (surf_conversion, vol_on_surf, [("tval", "surface")]), - (gtmsegmentation, vol_on_surf, [("gtmseg_file", "gtmsegfile")]), - (vol_on_surf, normal_average, [("output", "in_surfaces")]), - (normal_average, project_on_fsaverage, [("out_surface", "projection")]), - (normal_average, gather_pet_projection, [("out_surface", "pet_projection_lh_rh")]), - (gather_pet_projection, atlas_tsv, [("pet_projection_lh_rh", "pet")]), - (atlas_tsv, outputnode, [("destrieux_tsv", "destrieux_tsv")]), - (atlas_tsv, outputnode, [("desikan_tsv", "desikan_tsv")]), - (project_on_fsaverage, outputnode, [("out_fsaverage", "projection_fsaverage_smoothed")]), - (extract_mid_surface, outputnode, [("mid_surface", "mid_surf")]), - (normal_average, outputnode, [("out_surface", "projection_native_subject")]), - (outputnode, datasink, [("projection_fsaverage_smoothed", "projection_fsaverage")]), - (outputnode, datasink, [("mid_surf", "midsurface")]), - (outputnode, datasink, [("projection_native_subject", "projection_native")]), - (outputnode, datasink, [("destrieux_tsv", "destrieux_tsv")]), - (outputnode, datasink, [("desikan_tsv", "desikan_tsv")]), - ] - ) - # wf.write_graph(graph2use='flat') - wf.run() - # fmt: on diff --git a/clinica/pipelines/utils.py b/clinica/pipelines/utils.py new file mode 100644 index 000000000..daf4b7316 --- /dev/null +++ b/clinica/pipelines/utils.py @@ -0,0 +1,100 @@ +from dataclasses import dataclass +from os import PathLike +from pathlib import Path +from typing import List, Union + +import nibabel as nib +import numpy as np + +from clinica.utils.image import HemiSphere + +__all__ = [ + "FreeSurferAnnotationImageSingleHemisphere", + "FreeSurferAnnotationImage", + "FreeSurferAnnotation", +] + + +@dataclass +class FreeSurferAnnotationImageSingleHemisphere: + path: Path + + @classmethod + def from_raw(cls, path: Union[str, PathLike]): + path = Path(path) + if path.suffix != ".annot": + raise ValueError( + f"File {path} does not define a proper FreeSurfer annotation. " + "It is expected to be in '.annot' file format." + ) + return cls(path) + + +@dataclass +class FreeSurferAnnotationImage: + left: FreeSurferAnnotationImageSingleHemisphere + right: FreeSurferAnnotationImageSingleHemisphere + + @classmethod + def from_raw(cls, left: Union[str, PathLike], right: Union[str, PathLike]): + return cls( + FreeSurferAnnotationImageSingleHemisphere.from_raw(left), + FreeSurferAnnotationImageSingleHemisphere.from_raw(right), + ) + + +@dataclass +class FreeSurferAnnotation: + region_names: List[str] + left_annotations: np.ndarray + right_annotations: np.ndarray + left_ctab: np.ndarray + right_ctab: np.ndarray + + @classmethod + def from_annotation_image(cls, image: FreeSurferAnnotationImage): + left_annotations, left_ctab, region_names = nib.freesurfer.io.read_annot( + image.left.path, orig_ids=False + ) + right_annotations, right_ctab, _ = nib.freesurfer.io.read_annot( + image.right.path, orig_ids=False + ) + return cls( + [r.astype(str) for r in region_names], + left_annotations, + right_annotations, + left_ctab, + right_ctab, + ) + + @property + def n_regions(self) -> int: + return len(self.region_names) + + def get_annotation(self, hemisphere: Union[str, HemiSphere]) -> np.ndarray: + hemisphere = HemiSphere(hemisphere) + if hemisphere == HemiSphere.LEFT: + return self.left_annotations + return self.right_annotations + + def get_number_of_vertices(self, hemisphere: Union[str, HemiSphere]) -> int: + hemisphere = HemiSphere(hemisphere) + if hemisphere == HemiSphere.LEFT: + return self.left_annotations.shape[0] + return self.right_annotations.shape[0] + + def replace_minus_one_annotation_with_zero(self): + self.left_annotations[self.left_annotations == -1] = 0 + self.right_annotations[self.right_annotations == -1] = 0 + + def get_lateralized_region_names(self, left_first: bool = True) -> List[str]: + hemispheres = ( + (HemiSphere.LEFT, HemiSphere.RIGHT) + if left_first + else (HemiSphere.RIGHT, HemiSphere.LEFT) + ) + return [ + f"{region_name}_{hemisphere.value}" + for region_name in self.region_names + for hemisphere in hemispheres + ] diff --git a/clinica/utils/exceptions.py b/clinica/utils/exceptions.py index 2dfb13601..8231eb27c 100644 --- a/clinica/utils/exceptions.py +++ b/clinica/utils/exceptions.py @@ -46,6 +46,14 @@ class ClinicaPipelineConfigurationError(ClinicaException): """Base class for configuration errors of clinica pipelines.""" +class ClinicaImageError(ClinicaException): + """Base class for image errors.""" + + +class ClinicaSubprocessError(ClinicaException): + """Base class for subprocess errors.""" + + class ClinicaInconsistentDatasetError(ClinicaException): """Base class for inconsistent datasets errors.""" diff --git a/clinica/utils/filemanip.py b/clinica/utils/filemanip.py index f62fcced6..e2e6f5245 100644 --- a/clinica/utils/filemanip.py +++ b/clinica/utils/filemanip.py @@ -4,6 +4,7 @@ __all__ = [ "UserProvidedPath", + "copy_file", "delete_directories", "delete_directories_task", "extract_crash_files_from_log_file", @@ -14,6 +15,7 @@ "get_parent", "get_subject_id", "load_volume", + "move_file", "save_participants_sessions", "unzip_nii", "zip_nii", @@ -610,3 +612,90 @@ def delete_directories_task(directories: list) -> None: from clinica.utils.filemanip import delete_directories # noqa return delete_directories(directories) + + +def move_file(src: Path, dst: Path): + """Move file src to dst. + + Parameters + ---------- + src : Path + The path to the source file to be moved. + + dst : Path + The path to the destination file. + + Raises + ------ + FileNotFoundError : + If the source file does not exist. + If the destination file does not exist after moving. + """ + import shutil + import warnings + + from clinica.utils.stream import cprint + + if not src.exists(): + error_msg = f"File {src} is missing. Something wrong might have occurred prior to this step." + cprint(error_msg, lvl="error") + raise FileNotFoundError(error_msg) + if dst.exists(): + warning_msg = ( + f"File {dst} already exists and will be overwritten with file {src}." + ) + cprint(warning_msg, lvl="warning") + warnings.warn(warning_msg) + shutil.move(str(src), str(dst)) + cprint(f"Moved {src} to {dst}.", lvl="debug") + if not dst.exists(): + error_msg = f"Moving {src} to {dst} failed as file {dst} is missing." + cprint(error_msg, lvl="error") + raise FileNotFoundError(error_msg) + + +def copy_file(src: Path, dst: Path, exist_ok: bool = False): + """Copy file src to dst. + + Parameters + ---------- + src : Path + The path to the source file to be copied. + + dst : Path + The path to the destination file or folder. + If a folder, the name of the file will be the same as src. + + exist_ok : bool, optional + Whether to raise an error or not if the destination file + already exists. Default=False. + + Raises + ------ + FileNotFoundError : + If the source file does not exist. + + IOError : + If the destination file already exists and 'exist_ok' is set to False. + If the destination file does not exist after copying. + """ + import shutil + + from clinica.utils.stream import cprint + + if not src.is_file(): + error_msg = f"File {src} does not exist and cannot by copied to {dst}." + cprint(error_msg, lvl="error") + raise FileNotFoundError(error_msg) + if dst.is_dir(): + dst = dst / src.name + if dst.exists() and not exist_ok: + error_msg = f"File {dst} already exists. Set 'exist_ok' to True to overwrite with source file {src}." + cprint(error_msg, lvl="error") + raise IOError(error_msg) + shutil.copy(str(src), str(dst)) + cprint(f"Copied {src} to {dst}.", lvl="debug") + if not dst.exists(): + error_msg = f"An error occurred while copying {src} to {dst}. {dst} seems to be missing." + cprint(error_msg, lvl="error") + raise IOError(error_msg) diff --git a/test/instantiation/test_instantiate_all_pipelines.py b/test/instantiation/test_instantiate_all_pipelines.py index 161eaa335..9201695e8 100644 --- a/test/instantiation/test_instantiate_all_pipelines.py +++ b/test/instantiation/test_instantiate_all_pipelines.py @@ -286,7 +286,7 @@ def test_instantiate_statistics_surface(cmdopt, tmp_path): def test_instantiate_pet_surface_cross_sectional(cmdopt, tmp_path): - from clinica.pipelines.pet_surface.pet_surface_pipeline import PetSurface + from clinica.pipelines.pet.surface.pipeline import PetSurface base_dir = Path(cmdopt["input"]) working_dir = Path(cmdopt["wd"]) @@ -311,7 +311,7 @@ def test_instantiate_pet_surface_cross_sectional(cmdopt, tmp_path): @pytest.mark.skip(reason="Currently broken. Needs to be fixed...") def test_instantiate_pet_surface_longitudinal(cmdopt): - from clinica.pipelines.pet_surface.pet_surface_pipeline import PetSurface + from clinica.pipelines.pet.surface.pipeline import PetSurface input_dir = Path(cmdopt["input"]) root = input_dir / "PETSurfaceLongitudinal" diff --git a/test/nonregression/pipelines/pet/test_pet_surface.py b/test/nonregression/pipelines/pet/test_pet_surface.py index 3d3160f4f..dc54b289d 100644 --- a/test/nonregression/pipelines/pet/test_pet_surface.py +++ b/test/nonregression/pipelines/pet/test_pet_surface.py @@ -22,7 +22,7 @@ def test_pet_surface(cmdopt, tmp_path): def run_pet_surface( input_dir: Path, output_dir: Path, ref_dir: Path, working_dir: Path ) -> None: - from clinica.pipelines.pet_surface.pet_surface_pipeline import PetSurface + from clinica.pipelines.pet.surface.pipeline import PetSurface shutil.copytree(input_dir / "caps", output_dir / "caps", copy_function=shutil.copy) @@ -92,7 +92,7 @@ def test_run_pet_surface_longitudinal(cmdopt, tmp_path): def run_pet_surface_longitudinal( input_dir: Path, output_dir: Path, ref_dir: Path, working_dir: Path ) -> None: - from clinica.pipelines.pet_surface.pet_surface_pipeline import PetSurface + from clinica.pipelines.pet.surface.pipeline import PetSurface shutil.copytree(input_dir / "caps", output_dir / "caps", copy_function=shutil.copy) diff --git a/test/unittests/pipelines/pet/test_pet_surface_utils.py b/test/unittests/pipelines/pet/test_pet_surface_utils.py new file mode 100644 index 000000000..a8c3f4d1f --- /dev/null +++ b/test/unittests/pipelines/pet/test_pet_surface_utils.py @@ -0,0 +1,752 @@ +import os +import re +from pathlib import Path +from unittest.mock import patch + +import nibabel as nib +import numpy as np +import pandas as pd +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from clinica.utils.image import HemiSphere +from clinica.utils.pet import SUVRReferenceRegion, Tracer + + +def test_normalize_suvr_error(tmp_path): + from clinica.pipelines.pet.surface.utils import normalize_suvr + from clinica.utils.exceptions import ClinicaImageError + + nib.save( + nib.Nifti1Image(np.random.random((10, 10, 10)), np.eye(4)), + tmp_path / "pet.nii.gz", + ) + nib.save( + nib.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)), tmp_path / "mask.nii.gz" + ) + + with pytest.raises( + ClinicaImageError, + match=( + f"The eroded mask located at {tmp_path / 'mask.nii.gz'} contains only zero values. " + "A problem likely occurred when moving the eroded mask from MNI to gtmsegspace." + ), + ): + normalize_suvr(tmp_path / "pet.nii.gz", tmp_path / "mask.nii.gz") + + +def test_normalize_suvr(tmp_path): + from clinica.pipelines.pet.surface.utils import normalize_suvr + + nib.save( + nib.Nifti1Image(0.5 * np.ones((20, 20, 20)), np.eye(4)), tmp_path / "pet.nii.gz" + ) + + mask = np.zeros((20, 20, 20)) + mask[5:15, 5:15, 5:15] = 1 + nib.save(nib.Nifti1Image(mask, np.eye(4)), tmp_path / "mask.nii.gz") + + normalize_suvr(tmp_path / "pet.nii.gz", tmp_path / "mask.nii.gz", tmp_path) + + assert (tmp_path / "suvr_pet.nii.gz").exists() + suvr = nib.load(tmp_path / "suvr_pet.nii.gz") + assert_array_equal(suvr.affine, np.eye(4)) + assert_array_equal(suvr.get_fdata(), np.ones((20, 20, 20))) + + +@pytest.mark.parametrize( + "platform,prefix", + [ + ("linux", ""), + ("darwin", "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && "), + ], +) +def test_build_mri_expand_command(tmp_path, mocker, platform, prefix): + from clinica.pipelines.pet.surface.utils import _build_mri_expand_command + + mocker.patch("platform.system", return_value=platform) + cmd = _build_mri_expand_command(tmp_path / "lh.white", tmp_path / "lh.white_exp-") + + assert ( + cmd + == f"{prefix}mris_expand -thickness -N 13 {tmp_path / 'lh.white'} 0.65 {tmp_path / 'lh.white_exp-'}" + ) + + +def run_mri_expand_as_subprocess_patch(surface: Path, out_file: Path): + """Function used to mock clinica.pipelines.pet.surface.utils._run_mri_expand_as_subprocess. + + It simply writes the files that are expected to be written by the function without relying on mri_expand. + """ + for file in ( + out_file.parent / f"{out_file.name}0{n}" + for n in ( + "00", + "01", + "02", + "03", + "04", + "05", + "06", + "07", + "08", + "09", + "10", + "11", + "12", + "13", + ) + ): + file.touch() + + +def test_run_mris_expand(tmp_path): + from unittest.mock import patch + + from clinica.pipelines.pet.surface.utils import run_mris_expand + + with patch( + "clinica.pipelines.pet.surface.utils._run_mri_expand_as_subprocess", + wraps=run_mri_expand_as_subprocess_patch, + ) as mock: + run_mris_expand(tmp_path / "lh.white", output_dir=tmp_path / "output") + + mock.assert_called_once_with(tmp_path / "lh.white", tmp_path / "lh.white_exp-") + assert (tmp_path / "output").exists() + assert set([f.name for f in (tmp_path / "output").iterdir()]) == set( + [f"lh.white_exp-0{x}" for x in ("07", "08", "09", "10", "11", "12", "13")] + ) + + +def run_mri_expand_as_subprocess_patch_broken(surface: Path, out_file: Path): + """Function used to mock clinica.pipelines.pet.surface.utils._run_mri_expand_as_subprocess in order to make it fail. + + It does not generate one of the expected files. + """ + for file in ( + out_file.parent / f"{out_file.name}0{n}" + for n in ( + "00", + "01", + "02", + "03", + "04", + "05", + "06", + "08", + "09", + "10", + "11", + "12", + "13", + ) + ): + file.touch() + + +def test_run_mris_expand_error(tmp_path): + from clinica.pipelines.pet.surface.utils import run_mris_expand + + with patch( + "clinica.pipelines.pet.surface.utils._run_mri_expand_as_subprocess", + wraps=run_mri_expand_as_subprocess_patch_broken, + ): + with pytest.raises( + FileNotFoundError, + match=( + f"File {tmp_path / 'lh.white_exp-007'} is missing. " + "Something wrong might have occurred prior to this step." + ), + ): + run_mris_expand(tmp_path / "lh.white", output_dir=tmp_path / "output") + + +def test_get_new_subjects_directory(tmp_path): + from clinica.pipelines.pet.surface.utils import _get_new_subjects_directory + + subject_dir, freesurfer_id = _get_new_subjects_directory( + tmp_path / "caps", "sub-01", "ses-M006" + ) + + assert ( + subject_dir + == tmp_path + / "caps" + / "subjects" + / "sub-01" + / "ses-M006" + / "t1" + / "freesurfer_cross_sectional" + ) + assert freesurfer_id == "sub-01_ses-M006" + + +def test_get_new_subjects_directory_longitudinal_no_long_folder_error(tmp_path): + from clinica.pipelines.pet.surface.utils import ( + _get_new_subjects_directory_longitudinal, + ) + from clinica.utils.exceptions import ClinicaCAPSError + + folder = tmp_path / "caps" / "subjects" / "sub-01" / "ses-M006" / "t1" + folder.mkdir(parents=True) + + with pytest.raises( + ClinicaCAPSError, + match=re.escape( + f"Folder {folder} does not contains a folder labeled long-*. " + "Have you run t1-freesurfer-longitudinal?", + ), + ): + _get_new_subjects_directory_longitudinal( + tmp_path / "caps", "sub-01", "ses-M006" + ) + + +def test_get_new_subjects_directory_longitudinal_multiple_long_folder_error(tmp_path): + from clinica.pipelines.pet.surface.utils import ( + _get_new_subjects_directory_longitudinal, + ) + from clinica.utils.exceptions import ClinicaCAPSError + + folder = tmp_path / "caps" / "subjects" / "sub-01" / "ses-M006" / "t1" + folder.mkdir(parents=True) + (folder / "long-foo").mkdir() + (folder / "long-bar").mkdir() + + with pytest.raises( + ClinicaCAPSError, + match=re.escape( + f"Folder {folder} contains 2 folders labeled long-*. Only 1 can exist." + ), + ): + _get_new_subjects_directory_longitudinal( + tmp_path / "caps", "sub-01", "ses-M006" + ) + + +def test_get_new_subjects_directory_longitudinal(tmp_path): + from clinica.pipelines.pet.surface.utils import ( + _get_new_subjects_directory_longitudinal, + ) + + folder = tmp_path / "caps" / "subjects" / "sub-01" / "ses-M006" / "t1" + folder.mkdir(parents=True) + (folder / "long-foo").mkdir() + + subject_directory, freesurfer_id = _get_new_subjects_directory_longitudinal( + tmp_path / "caps", "sub-01", "ses-M006" + ) + + assert ( + subject_directory + == tmp_path + / "caps" + / "subjects" + / "sub-01" + / "ses-M006" + / "t1" + / "long-foo" + / "freesurfer_longitudinal" + ) + assert freesurfer_id == "sub-01_ses-M006.long.sub-01_long-foo" + + +@pytest.mark.parametrize("hemisphere", HemiSphere) +@pytest.mark.parametrize( + "platform,prefix", + [ + ("linux", ""), + ("darwin", "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && "), + ], +) +def test_build_mri_surf2surf_command(tmp_path, mocker, hemisphere, platform, prefix): + from clinica.pipelines.pet.surface.utils import _build_mri_surf2surf_command + + mocker.patch("platform.system", return_value=platform) + + command = _build_mri_surf2surf_command( + tmp_path / f"{hemisphere.value}.surface", + tmp_path / "registration", + tmp_path / "gtmseg", + "sub-01_ses-M006.long.sub-01_long-foo", + tmp_path / "output", + ) + + assert command == ( + f"{prefix}mri_surf2surf --reg {tmp_path / 'registration'} {tmp_path / 'gtmseg'} " + f"--sval-xyz surface --hemi {hemisphere.value} --tval-xyz {tmp_path / 'gtmseg'} " + f"--tval {tmp_path / 'output'} --s sub-01_ses-M006.long.sub-01_long-foo" + ) + + +def run_mri_surf2surf_as_subprocess_patch( + surface: Path, + registration: Path, + gtmsegfile: Path, + freesurfer_id: str, + output_file: Path, +): + output_file.touch() + + +def setup_folders(tmp_path: Path, hemisphere: HemiSphere, is_longitudinal: bool): + os.environ["SUBJECTS_DIR"] = str(tmp_path / "old_subject_directory") + folder = tmp_path / "caps" / "subjects" / "sub-01" / "ses-M006" / "t1" + folder.mkdir(parents=True) + if is_longitudinal: + surf_folder = ( + folder + / "long-foo" + / "freesurfer_longitudinal" + / "sub-01_ses-M006.long.sub-01_long-foo" + / "surf" + ) + else: + surf_folder = folder / "freesurfer_cross_sectional" / "sub-01_ses-M006" / "surf" + surf_folder.mkdir(parents=True) + surface = tmp_path / f"{hemisphere.value}.white" + surface.touch() + output_dir = tmp_path / "output" + output_dir.mkdir() + + return surface, surf_folder, output_dir + + +@pytest.mark.parametrize("is_longitudinal", (True, False)) +@pytest.mark.parametrize("hemisphere", HemiSphere) +def test_run_mri_surf2surf(tmp_path, is_longitudinal, hemisphere): + from clinica.pipelines.pet.surface.utils import run_mri_surf2surf + + surface, surf_folder, output_dir = setup_folders( + tmp_path, hemisphere, is_longitudinal + ) + + with patch( + "clinica.pipelines.pet.surface.utils._run_mri_surf2surf_as_subprocess", + wraps=run_mri_surf2surf_as_subprocess_patch, + ) as mock: + run_mri_surf2surf( + surface, + tmp_path / "registration", + tmp_path / "gtmseg", + "sub-01", + "ses-M006", + tmp_path / "caps", + is_longitudinal, + output_dir, + ) + mock.assert_called_once_with( + surface, + tmp_path / "registration", + tmp_path / "gtmseg", + "sub-01_ses-M006.long.sub-01_long-foo" + if is_longitudinal + else "sub-01_ses-M006", + output_dir / f"{hemisphere.value}.white_gtmsegspace", + ) + assert (output_dir / f"{hemisphere.value}.white_gtmsegspace").exists() + assert not (surf_folder / f"{hemisphere.value}.white").exists() + assert os.environ["SUBJECTS_DIR"] == str(tmp_path / "old_subject_directory") + + +@pytest.mark.parametrize("hemisphere", HemiSphere) +@pytest.mark.parametrize( + "platform,prefix", + [ + ("linux", ""), + ("darwin", "export DYLD_LIBRARY_PATH=$FREESURFER_HOME/lib/gcc/lib && "), + ], +) +def test_build_mri_vol2surf_command(tmp_path, mocker, hemisphere, platform, prefix): + from clinica.pipelines.pet.surface.utils import _build_mri_vol2surf_command + + mocker.patch("platform.system", return_value=platform) + + command = _build_mri_vol2surf_command( + tmp_path / "sub-01_ses-M006_pet.nii.gz", + tmp_path / f"{hemisphere.value}.white", + "sub-01_ses-M006.long.sub-01_long-foo", + tmp_path / "output", + ) + + assert command == ( + f"{prefix}mri_vol2surf --mov {tmp_path / 'sub-01_ses-M006_pet.nii.gz'} " + f"--o {tmp_path / 'output'} --surf white --hemi {hemisphere.value} " + f"--regheader sub-01_ses-M006.long.sub-01_long-foo --ref gtmseg.mgz --interp nearest" + ) + + +def _run_mri_vol2surf_as_subprocess_patch( + pet_volume: Path, + surface: Path, + freesurfer_id: str, + output_file: Path, +): + output_file.touch() + + +@pytest.mark.parametrize("is_longitudinal", (True, False)) +@pytest.mark.parametrize("hemisphere", HemiSphere) +def test_run_mri_vol2surf(tmp_path, is_longitudinal, hemisphere): + from clinica.pipelines.pet.surface.utils import run_mri_vol2surf + + surface, surf_folder, output_dir = setup_folders( + tmp_path, hemisphere, is_longitudinal + ) + (tmp_path / "gtmseg").touch() + (surf_folder.parent / "mri").mkdir() + + with patch( + "clinica.pipelines.pet.surface.utils._run_mri_vol2surf_as_subprocess", + wraps=_run_mri_vol2surf_as_subprocess_patch, + ) as mock: + run_mri_vol2surf( + tmp_path / "bids" / "sub-01_ses-M006_pet.nii.gz", + surface, + "sub-01", + "ses-M006", + tmp_path / "caps", + tmp_path / "gtmseg", + is_longitudinal, + output_dir, + ) + mock.assert_called_once_with( + tmp_path / "bids" / "sub-01_ses-M006_pet.nii.gz", + surface, + "sub-01_ses-M006.long.sub-01_long-foo" + if is_longitudinal + else "sub-01_ses-M006", + output_dir / f"{hemisphere.value}.projection_{surface.name}.mgh", + ) + assert ( + output_dir / f"{hemisphere.value}.projection_{surface.name}.mgh" + ).exists() + assert not (surf_folder / f"{hemisphere.value}.white").exists() + assert not (surf_folder.parent / "mri" / "gtmseg.mgz").exists() + assert os.environ["SUBJECTS_DIR"] == str(tmp_path / "old_subject_directory") + + +@pytest.mark.parametrize("surfaces", [(), ("foo",), ("foo", "bar"), range(6)]) +def test_compute_weighted_mean_surface_error(tmp_path, surfaces): + from clinica.pipelines.pet.surface.utils import compute_weighted_mean_surface + + with pytest.raises( + ValueError, + match=( + "There should be 7 surfaces at this point of the pipeline. " + f"However 'compute_weighted_mean_surface' received {len(surfaces)} surfaces. " + "Something probably went wrong in prior steps of the pipeline." + ), + ): + compute_weighted_mean_surface(surfaces, tmp_path / "output") + + +def test_get_coefficient_for_normal_repartition(): + from clinica.pipelines.pet.surface.utils import ( + _get_coefficient_for_normal_repartition, + ) + + assert _get_coefficient_for_normal_repartition() == ( + 0.1034, + 0.1399, + 0.1677, + 0.1782, + 0.1677, + 0.1399, + 0.1034, + ) + + +@pytest.mark.parametrize("hemisphere", HemiSphere) +def test_compute_weighted_mean_surface(tmp_path, hemisphere): + from clinica.pipelines.pet.surface.utils import ( + _get_coefficient_for_normal_repartition, + compute_weighted_mean_surface, + ) + + (tmp_path / "output").mkdir() + test_image = nib.MGHImage(np.ones((10, 10, 10), dtype="float32"), np.eye(4)) + for i in range(7): + nib.save(test_image, tmp_path / f"{hemisphere.value}.surf_{i}.mgh") + + average_image = compute_weighted_mean_surface( + [tmp_path / f"{hemisphere.value}.surf_{i}.mgh" for i in range(7)], + tmp_path / "output", + ) + + assert ( + average_image + == tmp_path + / "output" + / f"{hemisphere.value}.averaged_projection_on_cortical_surface.mgh" + ) + average_image = nib.load(average_image) + assert_array_equal(average_image.affine, np.eye(4)) + assert_array_almost_equal( + average_image.get_fdata(), + sum(_get_coefficient_for_normal_repartition()) * np.ones((10, 10, 10)), + ) + + +def _run_mris_preproc_as_standalone_nipype_node_patch( + projection: Path, + freesurfer_id: str, + fwhm: float, + output_file: Path, +): + output_file.touch() + + +@pytest.mark.parametrize("is_longitudinal", (True, False)) +@pytest.mark.parametrize("hemisphere", HemiSphere) +def test_project_onto_fsaverage(tmp_path, is_longitudinal, hemisphere): + from clinica.pipelines.pet.surface.utils import project_onto_fsaverage + + projection, surf_folder, output_dir = setup_folders( + tmp_path, hemisphere, is_longitudinal + ) + (tmp_path / "old_subject_directory" / "fsaverage").mkdir(parents=True) + + with patch( + "clinica.pipelines.pet.surface.utils._run_mris_preproc_as_standalone_nipype_node", + wraps=_run_mris_preproc_as_standalone_nipype_node_patch, + ) as mock: + project_onto_fsaverage( + projection, + "sub-01", + "ses-M006", + tmp_path / "caps", + 6, + is_longitudinal, + output_dir, + ) + mock.assert_called_once_with( + projection, + "sub-01_ses-M006.long.sub-01_long-foo" + if is_longitudinal + else "sub-01_ses-M006", + 6, + output_dir / f"fsaverage_fwhm-6_{projection.name}", + ) + assert (output_dir / f"fsaverage_fwhm-6_{projection.name}").exists() + assert not (surf_folder / projection.name).exists() + assert os.environ["SUBJECTS_DIR"] == str(tmp_path / "old_subject_directory") + + +def test_compute_average_pet_signal_based_on_annotations_error(tmp_path): + from clinica.pipelines.pet.surface.utils import ( + compute_average_pet_signal_based_on_annotations, + ) + + with pytest.raises( + ValueError, + match=( + "The compute_average_pet_signal_based_on_annotations function requires two files " + "for the argument 'pet_projections', one for the left hemisphere, one for the right. " + "The following 3 were received:" + ), + ): + compute_average_pet_signal_based_on_annotations( + ( + tmp_path / "left_projection.mgh", + tmp_path / "right_projection.mgh", + tmp_path / "fooo.mgh", + ), # noqa + {}, + tmp_path / "output", + ) + + +def test_compute_average_pet_signal_based_on_annotations(tmp_path): + from clinica.pipelines.pet.surface.utils import ( + compute_average_pet_signal_based_on_annotations, + ) + from clinica.pipelines.utils import FreeSurferAnnotationImage + + data = np.zeros((20, 20, 20), dtype="float32") + data[5:15, 5:15, 5:15] = 1.0 + # Left hemisphere has 2 values inside the "brain" + nib.save( + nib.MGHImage(2 * data.flatten(), np.eye(4)), + tmp_path / "left_projection.mgh", + ) + # Right hemisphere has 1 values inside the "brain" + nib.save( + nib.MGHImage(data.flatten(), np.eye(4)), + tmp_path / "right_projection.mgh", + ) + (tmp_path / "output").mkdir() + + for parcellation in ("destrieux", "desikan"): + for hemi in HemiSphere: + nib.freesurfer.io.write_annot( + tmp_path / f"{parcellation}_{hemi.value}.annot", + labels=data.flatten().astype("int"), + ctab=np.array([[25, 25, 25, 0], [255, 255, 255, 255]]), + names=["Background", "Brain"], + ) + + filenames = compute_average_pet_signal_based_on_annotations( + (tmp_path / "left_projection.mgh", tmp_path / "right_projection.mgh"), + { + "destrieux": FreeSurferAnnotationImage.from_raw( + tmp_path / "destrieux_lh.annot", + tmp_path / "destrieux_rh.annot", + ), + "desikan": FreeSurferAnnotationImage.from_raw( + tmp_path / "desikan_lh.annot", + tmp_path / "desikan_rh.annot", + ), + }, + tmp_path / "output", + ) + + assert set(filenames) == { + tmp_path / "output" / "destrieux.tsv", + tmp_path / "output" / "desikan.tsv", + } + for filename in filenames: + assert pd.read_csv(filename, sep="\t").to_dict() == { + "index": {0: 0, 1: 1, 2: 2, 3: 3}, + "label_name": { + 0: "Background_lh", + 1: "Background_rh", + 2: "Brain_lh", + 3: "Brain_rh", + }, + "mean_scalar": {0: 0.0, 1: 0.0, 2: 2.0, 3: 1.0}, + } + + +@pytest.mark.parametrize( + "is_longitudinal,expected", + [ + ( + True, + ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/(long-.*)\\/surface_longitudinal)\\/midsurface\\/.*_hemi_([a-z]+)(.*)$", + "\\1/\\2_\\3_\\4_hemi-\\5_midcorticalsurface", + ), + ), + ( + False, + ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/surface)\\/midsurface\\/.*_hemi_([a-z]+)(.*)$", + "\\1/\\2_\\3_hemi-\\4_midcorticalsurface", + ), + ), + ], +) +def test_get_mid_surface_substitutions(is_longitudinal: bool, expected: tuple): + from clinica.pipelines.pet.surface.utils import _get_mid_surface_substitutions # noqa + + assert _get_mid_surface_substitutions(is_longitudinal) == expected + + +@pytest.fixture +def expected_regexp_substitution_for_projection_in_native_space( + tracer: Tracer, + region: SUVRReferenceRegion, + is_longitudinal: bool, +): + if is_longitudinal: + return ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/(long-.*)\\/surface_longitudinal)\\/projection_native\\/.*_hemi_([a-z]+).*", + f"\\1/\\2_\\3_\\4_trc-{tracer.value}_pet_space-native_suvr-{region.value}_pvc-iy_hemi-\\5_projection.mgh", + ) + return ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/surface)\\/projection_native\\/.*_hemi_([a-z]+).*", + f"\\1/\\2_\\3_trc-{tracer.value}_pet_space-native_suvr-{region.value}_pvc-iy_hemi-\\4_projection.mgh", + ) + + +@pytest.mark.parametrize("tracer", Tracer) +@pytest.mark.parametrize("region", SUVRReferenceRegion) +@pytest.mark.parametrize("is_longitudinal", (True, False)) +def test_get_projection_in_native_space_substitutions( + tracer, + region, + is_longitudinal, + expected_regexp_substitution_for_projection_in_native_space, +): + from clinica.pipelines.pet.surface.utils import ( + _get_projection_in_native_space_substitutions, + ) + + assert ( + _get_projection_in_native_space_substitutions(tracer, region, is_longitudinal) + == expected_regexp_substitution_for_projection_in_native_space + ) + + +@pytest.fixture +def expected_regexp_substitution_for_projection_in_fsaverage( + tracer: Tracer, + region: SUVRReferenceRegion, + is_longitudinal: bool, +): + if is_longitudinal: + return ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/(long-.*)\\/surface_longitudinal)\\/projection_fsaverage\\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*", + f"\\1/\\2_\\3_\\4_trc-{tracer.value}_pet_space-fsaverage_suvr-{region.value}_pvc-iy_hemi-\\5_fwhm-\\6_projection.mgh", + ) + return ( + "(.*(sub-.*)\\/(ses-.*)\\/pet\\/surface)\\/projection_fsaverage\\/.*_hemi_([a-z]+).*_fwhm_([0-9]+).*", + f"\\1/\\2_\\3_trc-{tracer.value}_pet_space-fsaverage_suvr-{region.value}_pvc-iy_hemi-\\4_fwhm-\\5_projection.mgh", + ) + + +@pytest.mark.parametrize("tracer", Tracer) +@pytest.mark.parametrize("region", SUVRReferenceRegion) +@pytest.mark.parametrize("is_longitudinal", (True, False)) +def test_get_projection_in_fsaverage_substitution( + tracer, + region, + is_longitudinal, + expected_regexp_substitution_for_projection_in_fsaverage, +): + from clinica.pipelines.pet.surface.utils import ( + _get_projection_in_fsaverage_substitution, + ) + + assert ( + _get_projection_in_fsaverage_substitution(tracer, region, is_longitudinal) + == expected_regexp_substitution_for_projection_in_fsaverage + ) + + +@pytest.fixture +def expected_regexp_substitution_for_tsv_file_for_atlas( + tracer: Tracer, + region: SUVRReferenceRegion, + atlas: str, + is_longitudinal: bool, +): + if is_longitudinal: + return ( + f"(.*(sub-.*)\\/(ses-.*)\\/pet\\/(long-.*)\\/surface_longitudinal)\\/{atlas}_tsv\\/{atlas}.tsv", + f"\\1/atlas_statistics/\\2_\\3_\\4_trc-{tracer.value}_pet_space-{atlas}_pvc-iy_suvr-{region.value}_statistics.tsv", + ) + return ( + f"(.*(sub-.*)\\/(ses-.*)\\/pet\\/surface)\\/{atlas}_tsv\\/{atlas}.tsv", + f"\\1/atlas_statistics/\\2_\\3_trc-{tracer.value}_pet_space-{atlas}_pvc-iy_suvr-{region.value}_statistics.tsv", + ) + + +@pytest.mark.parametrize("tracer", Tracer) +@pytest.mark.parametrize("region", SUVRReferenceRegion) +@pytest.mark.parametrize("atlas", ("destrieux", "desikan")) +@pytest.mark.parametrize("is_longitudinal", (True, False)) +def test_get_tsv_file_for_atlas( + tracer, + region, + atlas, + is_longitudinal, + expected_regexp_substitution_for_tsv_file_for_atlas, +): + from clinica.pipelines.pet.surface.utils import _get_tsv_file_for_atlas # noqa + + assert ( + _get_tsv_file_for_atlas(tracer, region, atlas, is_longitudinal) + == expected_regexp_substitution_for_tsv_file_for_atlas + ) diff --git a/test/unittests/utils/test_filemanip.py b/test/unittests/utils/test_filemanip.py index 56d78f94a..413cd698a 100644 --- a/test/unittests/utils/test_filemanip.py +++ b/test/unittests/utils/test_filemanip.py @@ -279,3 +279,41 @@ def test_delete_directories(tmp_path): ) assert not (tmp_path / f"folder{i}").exists() assert record[3].message.args[0] == f"Was able to remove 180 B of data." + + +def test_move_file_no_src_error(tmp_path): + from clinica.utils.filemanip import move_file + + with pytest.raises( + FileNotFoundError, + match=f"File {tmp_path / 'src.txt'} is missing. Something wrong might have occurred prior to this step.", + ): + move_file(tmp_path / "src.txt", tmp_path / "dst.txt") + + +def test_move_file_existing_dst_warning(tmp_path): + from clinica.utils.filemanip import move_file + + (tmp_path / "src.txt").touch() + (tmp_path / "dst.txt").touch() + + with pytest.warns( + UserWarning, + match=f"File {tmp_path / 'dst.txt'} already exists and will be overwritten with file {tmp_path / 'src.txt'}", + ): + move_file(tmp_path / "src.txt", tmp_path / "dst.txt") + + +def test_move_file_no_dst_error(tmp_path, mocker): + from clinica.utils.filemanip import move_file + + mocker.patch("shutil.move", return_value=None) + (tmp_path / "src.txt").touch() + with pytest.raises( + FileNotFoundError, + match=( + f"Moving {tmp_path / 'src.txt'} to {tmp_path / 'dst.txt'} " + f"failed as file {tmp_path / 'dst.txt'} is missing." + ), + ): + move_file(tmp_path / "src.txt", tmp_path / "dst.txt")