diff --git a/examples/apps/ai_spleen_nnunet_seg_app/__init__.py b/examples/apps/ai_spleen_nnunet_seg_app/__init__.py new file mode 100644 index 00000000..526cee59 --- /dev/null +++ b/examples/apps/ai_spleen_nnunet_seg_app/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +_current_dir = os.path.abspath(os.path.dirname(__file__)) +if sys.path and os.path.abspath(sys.path[0]) != _current_dir: + sys.path.insert(0, _current_dir) +del _current_dir diff --git a/examples/apps/ai_spleen_nnunet_seg_app/__main__.py b/examples/apps/ai_spleen_nnunet_seg_app/__main__.py new file mode 100644 index 00000000..07fe20a4 --- /dev/null +++ b/examples/apps/ai_spleen_nnunet_seg_app/__main__.py @@ -0,0 +1,19 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from app import AISpleennnUNetSegApp + +if __name__ == "__main__": + logging.info(f"Begin {__name__}") + AISpleennnUNetSegApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/ai_spleen_nnunet_seg_app/app.py b/examples/apps/ai_spleen_nnunet_seg_app/app.py new file mode 100644 index 00000000..9402e233 --- /dev/null +++ b/examples/apps/ai_spleen_nnunet_seg_app/app.py @@ -0,0 +1,163 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from pydicom.sr.codedict import codes + +from monai.deploy.conditions import CountCondition +from monai.deploy.core import AppContext, Application +from monai.deploy.core.domain import Image +from monai.deploy.core.io_type import IOType +from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator +from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription +from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator +from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator +from monai.deploy.operators.monai_bundle_inference_operator import BundleConfigNames, IOMapping +from monai.deploy.operators.monet_bundle_inference_operator import MONetBundleInferenceOperator +from monai.deploy.operators.stl_conversion_operator import STLConversionOperator + + +# @resource(cpu=1, gpu=1, memory="7Gi") +# pip_packages can be a string that is a path(str) to requirements.txt file or a list of packages. +# The monai pkg is not required by this class, instead by the included operators. +class AISpleennnUNetSegApp(Application): + """Demonstrates inference with built-in MONet Bundle inference operator with DICOM files as input/output + + This application loads a set of DICOM instances, select the appropriate series, converts the series to + 3D volume image, performs inference with the built-in MONet Bundle inference operator, including nnUNet resampling, pre-processing + and post-processing, save the segmentation image in a DICOM Seg OID in an instance file, and optionally the + surface mesh in STL format. + + Pertinent nnUNet MONAI Bundle: + + + Execution Time Estimate: + With a Nvidia RTXA600 48GB GPU, for an input DICOM Series of size 106x415x415 and patches of size 64x192x160, the execution time is around + 50 seconds with saving both DICOM Seg and surface mesh STL file. + """ + + def __init__(self, *args, **kwargs): + """Creates an application instance.""" + self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) + super().__init__(*args, **kwargs) + + def run(self, *args, **kwargs): + # This method calls the base class to run. Can be omitted if simply calling through. + self._logger.info(f"Begin {self.run.__name__}") + super().run(*args, **kwargs) + self._logger.info(f"End {self.run.__name__}") + + def compose(self): + """Creates the app specific operators and chain them up in the processing DAG.""" + + logging.info(f"Begin {self.compose.__name__}") + + # Use Commandline options over environment variables to init context. + app_context: AppContext = Application.init_app_context(self.argv) + app_input_path = Path(app_context.input_path) + app_output_path = Path(app_context.output_path) + + # Create the custom operator(s) as well as SDK built-in operator(s). + study_loader_op = DICOMDataLoaderOperator( + self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" + ) + series_selector_op = DICOMSeriesSelectorOperator(self, rules=Sample_Rules_Text, name="series_selector_op") + series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") + + # Create the inference operator that supports MONAI Bundle and automates the inference. + # The IOMapping labels match the input and prediction keys in the pre and post processing. + # The model_name is optional when the app has only one model. + # The bundle_path argument optionally can be set to an accessible bundle file path in the dev + # environment, so when the app is packaged into a MAP, the operator can complete the bundle parsing + # during init. + + config_names = BundleConfigNames(config_names=["inference"]) # Same as the default + + bundle_spleen_seg_op = MONetBundleInferenceOperator( + self, + input_mapping=[IOMapping("image", Image, IOType.IN_MEMORY)], + output_mapping=[IOMapping("pred", Image, IOType.IN_MEMORY)], + app_context=app_context, + bundle_config_names=config_names, + name="nnunet_bundle_spleen_seg_op", + ) + + # Create DICOM Seg writer providing the required segment description for each segment with + # the actual algorithm and the pertinent organ/tissue. The segment_label, algorithm_name, + # and algorithm_version are of DICOM VR LO type, limited to 64 chars. + # https://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html + segment_descriptions = [ + SegmentDescription( + segment_label="Spleen", + segmented_property_category=codes.SCT.Organ, + segmented_property_type=codes.SCT.Spleen, + algorithm_name="volumetric (3D) segmentation of the spleen from CT image", + algorithm_family=codes.DCM.ArtificialIntelligence, + algorithm_version="0.3.2", + ) + ] + + custom_tags = {"SeriesDescription": "AI generated Seg, not for clinical use."} + + dicom_seg_writer = DICOMSegmentationWriterOperator( + self, + segment_descriptions=segment_descriptions, + custom_tags=custom_tags, + output_folder=app_output_path, + name="dicom_seg_writer", + ) + + # Create the processing pipeline, by specifying the source and destination operators, and + # ensuring the output from the former matches the input of the latter, in both name and type. + self.add_flow(study_loader_op, series_selector_op, {("dicom_study_list", "dicom_study_list")}) + self.add_flow( + series_selector_op, series_to_vol_op, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(series_to_vol_op, bundle_spleen_seg_op, {("image", "image")}) + # Note below the dicom_seg_writer requires two inputs, each coming from a source operator. + self.add_flow( + series_selector_op, dicom_seg_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(bundle_spleen_seg_op, dicom_seg_writer, {("pred", "seg_image")}) + # Create the surface mesh STL conversion operator and add it to the app execution flow, if needed, by + # uncommenting the following couple lines. + stl_conversion_op = STLConversionOperator( + self, output_file=app_output_path.joinpath("stl/spleen.stl"), name="stl_conversion_op" + ) + self.add_flow(bundle_spleen_seg_op, stl_conversion_op, {("pred", "image")}) + + logging.info(f"End {self.compose.__name__}") + + +# This is a sample series selection rule in JSON, simply selecting CT series. +# If the study has more than 1 CT series, then all of them will be selected. +# Please see more detail in DICOMSeriesSelectorOperator. +Sample_Rules_Text = """ +{ + "selections": [ + { + "name": "CT Series", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "SeriesDescription": "(.*?)" + } + } + ] +} +""" + +if __name__ == "__main__": + logging.info(f"Begin {__name__}") + AISpleennnUNetSegApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/ai_spleen_nnunet_seg_app/app.yaml b/examples/apps/ai_spleen_nnunet_seg_app/app.yaml new file mode 100644 index 00000000..9f65281c --- /dev/null +++ b/examples/apps/ai_spleen_nnunet_seg_app/app.yaml @@ -0,0 +1,27 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2022-2023 MONAI. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +application: + title: MONAI Deploy App Package - Spleen MONet Seg Inference + version: 1.0 + inputFormats: ["file"] + outputFormats: ["file"] + +resources: + cpu: 1 + gpu: 1 + memory: 1Gi + gpuMemory: 7Gi diff --git a/examples/apps/ai_spleen_nnunet_seg_app/requirements.txt b/examples/apps/ai_spleen_nnunet_seg_app/requirements.txt new file mode 100644 index 00000000..1d80987c --- /dev/null +++ b/examples/apps/ai_spleen_nnunet_seg_app/requirements.txt @@ -0,0 +1,13 @@ +scikit-image>=0.17.2 +pydicom>=2.3.0 +highdicom>=0.18.2 +SimpleITK>=2.0.0 +Pillow>=8.0.0 +numpy-stl>=2.12.0 +trimesh>=3.8.11 +nibabel>=3.2.1 +torch>=1.12.0 +nvflare +git+https://github.com/SimoneBendazzoli93/dynamic-network-architectures.git +git+https://github.com/SimoneBendazzoli93/MONAI.git@dev +git+https://github.com/SimoneBendazzoli93/nnUNet.git diff --git a/monai/deploy/operators/__init__.py b/monai/deploy/operators/__init__.py index 75176dab..bd9de71a 100644 --- a/monai/deploy/operators/__init__.py +++ b/monai/deploy/operators/__init__.py @@ -25,6 +25,7 @@ IOMapping ModelInfo MonaiBundleInferenceOperator + MONetBundleInferenceOperator MonaiSegInferenceOperator PNGConverterOperator PublisherOperator @@ -48,6 +49,7 @@ from .inference_operator import InferenceOperator from .monai_bundle_inference_operator import BundleConfigNames, IOMapping, MonaiBundleInferenceOperator from .monai_seg_inference_operator import MonaiSegInferenceOperator +from .monet_bundle_inference_operator import MONetBundleInferenceOperator from .nii_data_loader_operator import NiftiDataLoader from .png_converter_operator import PNGConverterOperator from .publisher_operator import PublisherOperator diff --git a/monai/deploy/operators/monai_bundle_inference_operator.py b/monai/deploy/operators/monai_bundle_inference_operator.py index 7ae4db4d..3959a794 100644 --- a/monai/deploy/operators/monai_bundle_inference_operator.py +++ b/monai/deploy/operators/monai_bundle_inference_operator.py @@ -22,7 +22,11 @@ from typing import Any, Dict, List, Optional, Tuple, Type, Union import numpy as np +import SimpleITK +from SimpleITK import Image as SimpleITKImage +import SimpleITK +from SimpleITK import Image as SimpleITKImage from monai.deploy.core import AppContext, Fragment, Image, IOType, OperatorSpec from monai.deploy.utils.importutil import optional_import @@ -60,7 +64,7 @@ def get_bundle_config(bundle_path, config_names): Gets the configuration parser from the specified Torchscript bundle file path. """ - bundle_suffixes = (".json", ".yaml", "yml") # The only supported file ext(s) + bundle_suffixes = (".json", ".yaml", ".yml") # The only supported file ext(s) config_folder = "extra" def _read_from_archive(archive, root_name: str, config_name: str, do_search=True): @@ -90,7 +94,7 @@ def _read_from_archive(archive, root_name: str, config_name: str, do_search=True name_list = archive.namelist() for suffix in bundle_suffixes: for n in name_list: - if (f"{config_name}{suffix}").casefold in n.casefold(): + if (f"{config_name}{suffix}").casefold() in n.casefold(): logging.debug(f"Trying to read content of config {config_name!r} from {n!r}.") content_text = archive.read(n) break @@ -577,6 +581,7 @@ def compute(self, op_input, op_output, context): # value: NdarrayOrTensor # MyPy complaints value, meta_data = self._receive_input(name, op_input, context) value = convert_to_dst_type(value, dst=value)[0] + meta_data = meta_data or {} if not isinstance(meta_data, dict): raise ValueError("`meta_data` must be a dict.") value = MetaTensor.ensure_torch_and_prune_meta(value, meta_data) diff --git a/monai/deploy/operators/monet_bundle_inference_operator.py b/monai/deploy/operators/monet_bundle_inference_operator.py new file mode 100644 index 00000000..ef567c90 --- /dev/null +++ b/monai/deploy/operators/monet_bundle_inference_operator.py @@ -0,0 +1,78 @@ +# Copyright 2002 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Tuple, Union + +from monai.deploy.core import Image +from monai.deploy.operators.monai_bundle_inference_operator import MonaiBundleInferenceOperator, get_bundle_config +from monai.deploy.utils.importutil import optional_import +from monai.transforms import ConcatItemsd, ResampleToMatch + +torch, _ = optional_import("torch", "1.10.2") +MetaTensor, _ = optional_import("monai.data.meta_tensor", name="MetaTensor") +__all__ = ["MONetBundleInferenceOperator"] + + +class MONetBundleInferenceOperator(MonaiBundleInferenceOperator): + """ + A specialized operator for performing inference using the MONet bundle. + This operator extends the `MonaiBundleInferenceOperator` to support nnUNet-specific + configurations and prediction logic. It initializes the nnUNet predictor and provides + a method for performing inference on input data. + + Attributes + ---------- + _nnunet_predictor : torch.nn.Module + The nnUNet predictor module used for inference. + + Methods + ------- + _init_config(config_names) + Initializes the configuration for the nnUNet bundle, including parsing the bundle + configuration and setting up the nnUNet predictor. + predict(data, *args, **kwargs) + Performs inference on the input data using the nnUNet predictor. + """ + + def __init__( + self, + *args, + **kwargs, + ): + + super().__init__(*args, **kwargs) + + self._nnunet_predictor: torch.nn.Module = None + + def _init_config(self, config_names): + + super()._init_config(config_names) + parser = get_bundle_config(str(self._bundle_path), config_names) + self._parser = parser + + self._nnunet_predictor = parser.get_parsed_content("network_def") + + def predict(self, data: Any, *args, **kwargs) -> Union[Image, Any, Tuple[Any, ...], Dict[Any, Any]]: + """Predicts output using the inferer. If multimodal data is provided as keyword arguments, + it concatenates the data with the main input data.""" + + self._nnunet_predictor.predictor.network = self._model_network + + if len(kwargs) > 0: + multimodal_data = {"image": data} + for key in kwargs.keys(): + if isinstance(kwargs[key], MetaTensor): + multimodal_data[key] = ResampleToMatch(mode="bilinear")(kwargs[key], img_dst=data + ) + data = ConcatItemsd(keys=list(multimodal_data.keys()),name="image")(multimodal_data)["image"] + if len(data.shape) == 4: + data = data[None] + return self._nnunet_predictor(data)