From 317330344a71e0cae63c9124d65affda31ef4050 Mon Sep 17 00:00:00 2001 From: M Q Date: Wed, 12 Feb 2025 15:24:43 -0800 Subject: [PATCH 1/4] Editorial changes to correct typos, rename parameters, adding comments. Retested examples. Signed-off-by: M Q --- examples/apps/ai_unetr_seg_app/app.py | 2 +- .../ai_unetr_seg_app/unetr_seg_operator.py | 8 +-- .../breast_density_classifier_app/README.md | 4 +- .../breast_density_classifier_operator.py | 8 +-- .../med_image_generation/app.py | 57 +++++++++---------- .../mednist_classifier_monaideploy.py | 6 +- examples/apps/simple_imaging_app/_version.py | 2 +- examples/apps/simple_imaging_app/app.py | 4 +- .../simple_imaging_app/gaussian_operator.py | 2 +- monai/deploy/operators/clara_viz_operator.py | 4 +- .../dicom_encapsulated_pdf_writer_operator.py | 6 +- .../operators/dicom_seg_writer_operator.py | 2 +- .../dicom_text_sr_writer_operator.py | 6 +- monai/deploy/operators/inference_operator.py | 2 +- .../monai_bundle_inference_operator.py | 2 +- .../operators/monai_seg_inference_operator.py | 18 ++++-- monai/deploy/operators/publisher_operator.py | 4 +- .../operators/stl_conversion_operator.py | 2 +- 18 files changed, 74 insertions(+), 65 deletions(-) diff --git a/examples/apps/ai_unetr_seg_app/app.py b/examples/apps/ai_unetr_seg_app/app.py index 7a399828..d2f90db7 100644 --- a/examples/apps/ai_unetr_seg_app/app.py +++ b/examples/apps/ai_unetr_seg_app/app.py @@ -80,7 +80,7 @@ def compose(self): _algorithm_family = codes.DCM.ArtificialIntelligence _algorithm_version = "0.1.0" - # List of (Segment name, [Code menaing str]), not including background which is value of 0. + # List of (Segment name, [Code meaning str]), not including background which is value of 0. # User must provide correct codes, which can be looked at, e.g. # https://bioportal.bioontology.org/ontologies/SNOMEDCT # Alternatively, consult the concept and code dictionaries in PyDicom diff --git a/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py b/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py index d48b68b1..dbbb2b30 100644 --- a/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py +++ b/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py @@ -56,7 +56,7 @@ class UnetrSegOperator(Operator): def __init__( self, - frament: Fragment, + fragment: Fragment, *args, app_context: AppContext, model_path: Path, @@ -71,13 +71,13 @@ def __init__( self.model_path = model_path self.output_folder = output_folder self.output_folder.mkdir(parents=True, exist_ok=True) - self.fragement = frament # Cache and later pass the Fragment/Application to contained operator(s) + self.app_fragment = fragment # Cache and later pass the Fragment/Application to contained operator(s) self.app_context = app_context self.input_name_image = "image" self.output_name_seg = "seg_image" self.output_name_saved_images_folder = "saved_images_folder" - super().__init__(frament, *args, **kwargs) + super().__init__(fragment, *args, **kwargs) def setup(self, spec: OperatorSpec): spec.input(self.input_name_image) @@ -102,7 +102,7 @@ def compute(self, op_input, op_output, context): # Delegates inference and saving output to the built-in operator. infer_operator = MonaiSegInferenceOperator( - self.fragement, + self.app_fragment, roi_size=( 96, 96, diff --git a/examples/apps/breast_density_classifier_app/README.md b/examples/apps/breast_density_classifier_app/README.md index c1256c49..b5573e12 100644 --- a/examples/apps/breast_density_classifier_app/README.md +++ b/examples/apps/breast_density_classifier_app/README.md @@ -9,7 +9,7 @@ Sample data and a torchscript model can be downloaded from https://drive.google. python app.py -i -o -m ``` -## Package the application as a MONAI Application Package (contianer image) +## Package the application as a MONAI Application Package (container image) In order to build the MONAI App Package, go a level up and execute the following command. ``` monai-deploy package breast_density_classification_app -m -c breast_density_classifer_app/app.yaml --tag breast_density:0.1.0 --platform x64-workstation -l DEBUG @@ -20,4 +20,4 @@ monai-deploy package breast_density_classification_app -m monai-deploy run breast_density-x64-workstation-dgpu-linux-amd64:0.1.0 -i -o ``` -Once the container exits successfully, check the results in the output directory. There should be a newly creeated DICOM instance file and a `output.json` file containing the classification results. \ No newline at end of file +Once the container exits successfully, check the results in the output directory. There should be a newly created DICOM instance file and a `output.json` file containing the classification results. \ No newline at end of file diff --git a/examples/apps/breast_density_classifier_app/breast_density_classifier_operator.py b/examples/apps/breast_density_classifier_app/breast_density_classifier_operator.py index 4a442088..2d2b6f0f 100644 --- a/examples/apps/breast_density_classifier_app/breast_density_classifier_operator.py +++ b/examples/apps/breast_density_classifier_app/breast_density_classifier_operator.py @@ -40,7 +40,7 @@ class ClassifierOperator(Operator): def __init__( self, - frament: Fragment, + fragment: Fragment, *args, model_name: Optional[str] = "", app_context: AppContext, @@ -67,7 +67,7 @@ def __init__( # The name of the optional input port for passing data to override the output folder path. self.input_name_output_folder = "output_folder" - # The output folder set on the object can be overriden at each compute by data in the optional named input + # The output folder set on the object can be overridden at each compute by data in the optional named input self.output_folder = output_folder # Need the name when there are multiple models loaded @@ -80,7 +80,7 @@ def __init__( self.model = self._get_model(self.app_context, self.model_path, self._model_name) - super().__init__(frament, *args, **kwargs) + super().__init__(fragment, *args, **kwargs) def _get_model(self, app_context: AppContext, model_path: Path, model_name: str): """Load the model with the given name from context or model path @@ -116,7 +116,7 @@ def _convert_dicom_metadata_datatype(self, metadata: Dict): if not metadata: return metadata - # Try to convert data type for the well knowned attributes. Add more as needed. + # Try to convert data type for the well known attributes. Add more as needed. if metadata.get("SeriesInstanceUID", None): try: metadata["SeriesInstanceUID"] = str(metadata["SeriesInstanceUID"]) diff --git a/examples/apps/hugging_face_integration_app/med_image_generation/app.py b/examples/apps/hugging_face_integration_app/med_image_generation/app.py index e4e37aa2..6f592668 100644 --- a/examples/apps/hugging_face_integration_app/med_image_generation/app.py +++ b/examples/apps/hugging_face_integration_app/med_image_generation/app.py @@ -1,39 +1,38 @@ -import logging +import argparse +import logging from pathlib import Path -import torch + +import numpy as np +import torch from diffusers import StableDiffusionPipeline -from monai.deploy.core import AppContext, Application from PIL import Image -import numpy as np -import argparse +from monai.deploy.core import AppContext, Application class App(Application): - name = "Diffusion Image App" - description = "Simple application showing diffusion to generate Images" - def compose(self): - model_id = "Nihirc/Prompt2MedImage" - device = "cuda" - parser = argparse.ArgumentParser() - parser.add_argument("--input_prompt", type=str, default="Generate a X-ray") - parser.add_argument("--output", type=str, default="./out.jpg") - args = parser.parse_args() - - input_prompt = args.input_prompt - output_path = args.output - print("Input Prompt: ", input_prompt) - pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) - pipe = pipe.to(device) - prompt = "Show me an X ray pevic fracture" - image = pipe(prompt).images[0] - image.save(output_path) + name = "Diffusion Image App" + description = "Simple application showing diffusion to generate Images" + + def compose(self): + model_id = "Nihirc/Prompt2MedImage" + device = "cuda" + parser = argparse.ArgumentParser() + parser.add_argument("--input_prompt", type=str, default="Generate a X-ray") + parser.add_argument("--output", type=str, default="./out.jpg") + args = parser.parse_args() + + input_prompt = args.input_prompt + output_path = args.output + print("Input Prompt: ", input_prompt) + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) + pipe = pipe.to(device) + prompt = "Show me an X ray pevic fracture" + image = pipe(prompt).images[0] + image.save(output_path) if __name__ == "__main__": - logging.info(f"Begin {__name__}") - App().run() - logging.info(f"End {__name__}") - - - + logging.info(f"Begin {__name__}") + App().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/mednist_classifier_monaideploy/mednist_classifier_monaideploy.py b/examples/apps/mednist_classifier_monaideploy/mednist_classifier_monaideploy.py index 95d872e0..e4e83655 100644 --- a/examples/apps/mednist_classifier_monaideploy/mednist_classifier_monaideploy.py +++ b/examples/apps/mednist_classifier_monaideploy/mednist_classifier_monaideploy.py @@ -100,7 +100,7 @@ class MedNISTClassifierOperator(Operator): def __init__( self, - frament: Fragment, + fragment: Fragment, *args, app_context: AppContext, model_name: Optional[str] = "", @@ -127,7 +127,7 @@ def __init__( # The name of the optional input port for passing data to override the output folder path. self.input_name_output_folder = "output_folder" - # The output folder set on the object can be overriden at each compute by data in the optional named input + # The output folder set on the object can be overridden at each compute by data in the optional named input self.output_folder = output_folder # Need the name when there are multiple models loaded @@ -138,7 +138,7 @@ def __init__( self.model = self._get_model(self.app_context, self.model_path, self._model_name) # This needs to be at the end of the constructor. - super().__init__(frament, *args, **kwargs) + super().__init__(fragment, *args, **kwargs) def _get_model(self, app_context: AppContext, model_path: Path, model_name: str): """Load the model with the given name from context or model path diff --git a/examples/apps/simple_imaging_app/_version.py b/examples/apps/simple_imaging_app/_version.py index 9eeaaf17..3fc3de8a 100644 --- a/examples/apps/simple_imaging_app/_version.py +++ b/examples/apps/simple_imaging_app/_version.py @@ -307,7 +307,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces diff --git a/examples/apps/simple_imaging_app/app.py b/examples/apps/simple_imaging_app/app.py index 6f38b2a3..82556ffa 100644 --- a/examples/apps/simple_imaging_app/app.py +++ b/examples/apps/simple_imaging_app/app.py @@ -49,10 +49,10 @@ def compose(self): output_data_path = Path(app_context.output_path) logging.info(f"sample_data_path: {sample_data_path}") - # Please note that the Application object, self, is passed as the first positonal argument + # Please note that the Application object, self, is passed as the first positional argument # and the others as kwargs. # Also note the CountCondition of 1 on the first operator, indicating to the application executor - # to invoke this operator, hence the pipleline, only once. + # to invoke this operator, hence the pipeline, only once. sobel_op = SobelOperator(self, CountCondition(self, 1), input_path=sample_data_path, name="sobel_op") median_op = MedianOperator(self, name="median_op") gaussian_op = GaussianOperator(self, output_folder=output_data_path, name="gaussian_op") diff --git a/examples/apps/simple_imaging_app/gaussian_operator.py b/examples/apps/simple_imaging_app/gaussian_operator.py index e924afc9..64f4014f 100644 --- a/examples/apps/simple_imaging_app/gaussian_operator.py +++ b/examples/apps/simple_imaging_app/gaussian_operator.py @@ -24,7 +24,7 @@ class GaussianOperator(Operator): single input: an image array object single output: - an image arrary object, without enforcing a downsteam receiver + an image array object, without enforcing a downstream receiver Besides, this operator also saves the image file in the given output folder. """ diff --git a/monai/deploy/operators/clara_viz_operator.py b/monai/deploy/operators/clara_viz_operator.py index 6524cd19..743286c5 100644 --- a/monai/deploy/operators/clara_viz_operator.py +++ b/monai/deploy/operators/clara_viz_operator.py @@ -33,7 +33,7 @@ class ClaraVizOperator(Operator): seg_image: Image object of the segmentation image derived from the input image. """ - def __init__(self, fragement: Fragment, *args, **kwargs): + def __init__(self, fragment: Fragment, *args, **kwargs): """Constructor of the operator. Args: @@ -43,7 +43,7 @@ def __init__(self, fragement: Fragment, *args, **kwargs): self.input_name_image = "image" self.input_name_seg_image = "seg_image" - super().__init__(fragement, *args, **kwargs) + super().__init__(fragment, *args, **kwargs) def setup(self, spec: OperatorSpec): spec.input(self.input_name_image) diff --git a/monai/deploy/operators/dicom_encapsulated_pdf_writer_operator.py b/monai/deploy/operators/dicom_encapsulated_pdf_writer_operator.py index c2085c2b..b7a4465a 100644 --- a/monai/deploy/operators/dicom_encapsulated_pdf_writer_operator.py +++ b/monai/deploy/operators/dicom_encapsulated_pdf_writer_operator.py @@ -45,12 +45,12 @@ class DICOMEncapsulatedPDFWriterOperator(Operator): None File output: - Generaed DICOM instance file in the provided output folder. + Generated DICOM instance file in the provided output folder. """ # File extension for the generated DICOM Part 10 file. DCM_EXTENSION = ".dcm" - # The default output folder for saveing the generated DICOM instance file. + # The default output folder for saving the generated DICOM instance file. DEFAULT_OUTPUT_FOLDER = Path(os.getcwd()) / "output" def __init__( @@ -249,7 +249,7 @@ def _is_pdf_bytes(self, content: bytes): return True -# Commenting out the following as pttype complains about the contructor for no reason +# Commenting out the following as pttype complains about the constructor for no reason # def test(test_copy_tags: bool = True): # from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator # from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator diff --git a/monai/deploy/operators/dicom_seg_writer_operator.py b/monai/deploy/operators/dicom_seg_writer_operator.py index 41bad8af..1a39e644 100644 --- a/monai/deploy/operators/dicom_seg_writer_operator.py +++ b/monai/deploy/operators/dicom_seg_writer_operator.py @@ -206,7 +206,7 @@ def __init__( Object encapsulating the description of each segment present in the segmentation. output_folder: Folder for file output, overridden by named input on compute. Defaults to current working dir's child folder, output. - custom_tags: Optonal[Dict[str, str]], optional + custom_tags: Optional[Dict[str, str]], optional Dictionary for setting custom DICOM tags using Keywords and str values only omit_empty_frames: bool, optional Whether to omit frames that contain no segmented pixels from the output segmentation. diff --git a/monai/deploy/operators/dicom_text_sr_writer_operator.py b/monai/deploy/operators/dicom_text_sr_writer_operator.py index 1b0fd21c..273236bb 100644 --- a/monai/deploy/operators/dicom_text_sr_writer_operator.py +++ b/monai/deploy/operators/dicom_text_sr_writer_operator.py @@ -42,12 +42,12 @@ class DICOMTextSRWriterOperator(Operator): None File output: - Generaed DICOM instance file in the provided output folder. + Generated DICOM instance file in the provided output folder. """ # File extension for the generated DICOM Part 10 file. DCM_EXTENSION = ".dcm" - # The default output folder for saveing the generated DICOM instance file. + # The default output folder for saving the generated DICOM instance file. # DEFAULT_OUTPUT_FOLDER = Path(os.path.join(os.path.dirname(__file__))) / "output" DEFAULT_OUTPUT_FOLDER = Path.cwd() / "output" @@ -259,7 +259,7 @@ def write(self, content_text, dicom_series: Optional[DICOMSeries], output_dir: P self._logger.info(f"DICOM SOP instance saved in {file_path}") -# Commenting out the following as pttype complains about the contructor for no reason +# Commenting out the following as pttype complains about the constructor for no reason # def test(test_copy_tags: bool = True): # from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator # from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator diff --git a/monai/deploy/operators/inference_operator.py b/monai/deploy/operators/inference_operator.py index 81ee226e..f8d423d8 100644 --- a/monai/deploy/operators/inference_operator.py +++ b/monai/deploy/operators/inference_operator.py @@ -17,7 +17,7 @@ class InferenceOperator(Operator): """The base operator for operators that perform AI inference. - This operator preforms pre-transforms on a input image, inference with + This operator performs pre-transforms on a input image, inference with a given model, post-transforms, and final results generation. """ diff --git a/monai/deploy/operators/monai_bundle_inference_operator.py b/monai/deploy/operators/monai_bundle_inference_operator.py index 89d873a7..7ae4db4d 100644 --- a/monai/deploy/operators/monai_bundle_inference_operator.py +++ b/monai/deploy/operators/monai_bundle_inference_operator.py @@ -546,7 +546,7 @@ def compute(self, op_input, op_output, context): # `context.models.get(model_name)` returns a model instance if exists. # If model_name is not specified and only one model exists, it returns that model. - # The models are loaded on contruction via the AppContext object in turn the model factory. + # The models are loaded on construction via the AppContext object in turn the model factory. self._model_network = self.app_context.models.get(self._model_name) if self.app_context.models else None if self._model_network: diff --git a/monai/deploy/operators/monai_seg_inference_operator.py b/monai/deploy/operators/monai_seg_inference_operator.py index 9fa2c5b2..5d5a9aeb 100644 --- a/monai/deploy/operators/monai_seg_inference_operator.py +++ b/monai/deploy/operators/monai_seg_inference_operator.py @@ -58,7 +58,7 @@ class InfererType(StrEnum): class MonaiSegInferenceOperator(InferenceOperator): """This segmentation operator uses MONAI transforms and Sliding Window Inference. - This operator preforms pre-transforms on a input image, inference + This operator performs pre-transforms on a input image, inference using a given model, and post-transforms. The segmentation image is saved as a named Image object in memory. @@ -241,7 +241,7 @@ def _convert_dicom_metadata_datatype(self, metadata: Dict): if not metadata: return metadata - # Try to convert data type for the well knowned attributes. Add more as needed. + # Try to convert data type for the well known attributes. Add more as needed. if metadata.get("SeriesInstanceUID", None): try: metadata["SeriesInstanceUID"] = str(metadata["SeriesInstanceUID"]) @@ -313,6 +313,7 @@ def compute_impl(self, input_image, context): with torch.no_grad(): for d in dataloader: images = d[self._input_dataset_key].to(device) + self._logger.info(f"Input of {type(images)} shape: {images.shape}") if self._inferer == InfererType.SLIDING_WINDOW: d[self._pred_dataset_key] = sliding_window_inference( inputs=images, @@ -331,7 +332,14 @@ def compute_impl(self, input_image, context): ) d = [post_transforms(i) for i in decollate_batch(d)] - out_ndarray = d[0][self._pred_dataset_key].cpu().numpy() + self._logger.info(f"Post transform length/batch size of output: {len(d)}") + self._logger.info( + f"Post transform pixel spacings of '{self._pred_dataset_key}' in the first output: {d[0][self._pred_dataset_key].pixdim}" + ) + out_ndarray = d[0][self._pred_dataset_key].cpu().numpy() # Single output to numpy on CPU + self._logger.info( + f"Post transform '{self._pred_dataset_key}' of {type(out_ndarray)} shape: {out_ndarray.shape}" + ) # Need to squeeze out the channel dim fist out_ndarray = np.squeeze(out_ndarray, 0) # NOTE: The domain Image object simply contains a Arraylike obj as image as of now. @@ -343,7 +351,9 @@ def compute_impl(self, input_image, context): # the resultant ndarray for the prediction image needs to be transposed back, so the # array index order is back to DHW, the same order as the in-memory input Image obj. out_ndarray = out_ndarray.T.astype(np.uint8) - self._logger.info(f"Output Seg image numpy array shaped: {out_ndarray.shape}") + self._logger.info( + f"Output Seg image numpy array of type '{type(out_ndarray)}' shape: {out_ndarray.shape}" + ) self._logger.info(f"Output Seg image pixel max value: {np.amax(out_ndarray)}") return Image(out_ndarray, input_img_metadata) diff --git a/monai/deploy/operators/publisher_operator.py b/monai/deploy/operators/publisher_operator.py index a46d9362..e4c0b1ce 100644 --- a/monai/deploy/operators/publisher_operator.py +++ b/monai/deploy/operators/publisher_operator.py @@ -29,10 +29,10 @@ class PublisherOperator(Operator): generates the render config file and the meta data file, then save all in the `publish` folder of the app. """ - # The default input folder for saveing the generated DICOM instance file. + # The default input folder for saving the generated DICOM instance file. DEFAULT_INPUT_FOLDER = Path(getcwd()) / "input" - # The default output folder for saveing the generated DICOM instance file. + # The default output folder for saving the generated DICOM instance file. DEFAULT_OUTPUT_FOLDER = Path(getcwd()) / "output" def __init__( diff --git a/monai/deploy/operators/stl_conversion_operator.py b/monai/deploy/operators/stl_conversion_operator.py index d8940a83..6cfe4020 100644 --- a/monai/deploy/operators/stl_conversion_operator.py +++ b/monai/deploy/operators/stl_conversion_operator.py @@ -41,7 +41,7 @@ class STLConversionOperator(Operator): """Converts volumetric image to surface mesh in STL format. If a file path is provided, the STL binary will be saved in the said output folder. - This operator also save the STL file as bytes in memory, idenfied by the named output. Being optional, + This operator also save the STL file as bytes in memory, identified by the named output. Being optional, this output does not require any downstream receiver. Named inputs: From f927767574d9de4e2d40de9b63662fd2bc710535 Mon Sep 17 00:00:00 2001 From: M Q Date: Wed, 12 Feb 2025 17:20:14 -0800 Subject: [PATCH 2/4] More editorial changes covering the whole repo Signed-off-by: M Q --- docs/_static/custom.css | 2 +- .../developing_with_sdk/packaging_app.md | 2 +- .../getting_started/tutorials/mednist_app.md | 2 +- .../tutorials/monai_bundle_app.md | 2 +- docs/source/release_notes/v0.3.0.md | 2 +- docs/source/release_notes/v0.4.0.md | 2 +- docs/source/release_notes/v0.5.0.md | 2 +- docs/srs.md | 2 +- monai/deploy/core/domain/dicom_series.py | 2 +- .../deploy/core/domain/dicom_sop_instance.py | 2 +- monai/deploy/core/domain/dicom_study.py | 2 +- monai/deploy/utils/importutil.py | 2 +- notebooks/tutorials/02_mednist_app.ipynb | 20 +++++++++---------- run | 8 ++++---- 14 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/_static/custom.css b/docs/_static/custom.css index e5d100e2..e2be75d5 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -107,7 +107,7 @@ blockquote p { } /* Mermaid -to avoid the lable text being cut off +to avoid the label text being cut off */ .edgeTerminals { font-size: 9px !important; diff --git a/docs/source/developing_with_sdk/packaging_app.md b/docs/source/developing_with_sdk/packaging_app.md index a81dd6a7..a46327ec 100644 --- a/docs/source/developing_with_sdk/packaging_app.md +++ b/docs/source/developing_with_sdk/packaging_app.md @@ -13,7 +13,7 @@ It is required that the application configuration yaml file as well as the depen ### Basic Usage of MONAI Application Packager ```bash -monai-deploy package --config --tag --platform [--models ] [--log-level ] [-h] +monai-deploy package --config --tag --platform [--models ] [--log-level ] [-h] ``` #### Required Arguments diff --git a/docs/source/getting_started/tutorials/mednist_app.md b/docs/source/getting_started/tutorials/mednist_app.md index 3a4a43ba..63809ae2 100644 --- a/docs/source/getting_started/tutorials/mednist_app.md +++ b/docs/source/getting_started/tutorials/mednist_app.md @@ -42,7 +42,7 @@ jupyter-lab
-

Video may show the use of previous SDK verson.

+

Video may show the use of previous SDK version.

``` diff --git a/docs/source/getting_started/tutorials/monai_bundle_app.md b/docs/source/getting_started/tutorials/monai_bundle_app.md index a8467d40..326b868a 100644 --- a/docs/source/getting_started/tutorials/monai_bundle_app.md +++ b/docs/source/getting_started/tutorials/monai_bundle_app.md @@ -32,7 +32,7 @@ jupyter-lab
-

Video may show the use of previous SDK verson.

+

Video may show the use of previous SDK version.

``` diff --git a/docs/source/release_notes/v0.3.0.md b/docs/source/release_notes/v0.3.0.md index 888f9d9c..959e0180 100644 --- a/docs/source/release_notes/v0.3.0.md +++ b/docs/source/release_notes/v0.3.0.md @@ -16,7 +16,7 @@ This operator uses [Clara Viz](https://pypi.org/project/clara-viz/) to provide i ### STL Surface Mesh Conversion Operator -This operator converts a volume image to surface mesh, in [STL file format](https://en.wikipedia.org/wiki/STL_(file_format)). Its API allows the client to control if smoothing needs to be applied, and if only keeping the largest connected component; the latter is useful when muliple disjoint segments are in a volume image, and the application needs to control if all or only the largest to be included in the output. +This operator converts a volume image to surface mesh, in [STL file format](https://en.wikipedia.org/wiki/STL_(file_format)). Its API allows the client to control if smoothing needs to be applied, and if only keeping the largest connected component; the latter is useful when multiple disjoint segments are in a volume image, and the application needs to control if all or only the largest to be included in the output. ## What's fixed/updated diff --git a/docs/source/release_notes/v0.4.0.md b/docs/source/release_notes/v0.4.0.md index 8704a927..5b6dd74d 100644 --- a/docs/source/release_notes/v0.4.0.md +++ b/docs/source/release_notes/v0.4.0.md @@ -9,7 +9,7 @@ The new operator, `MONAI Bundle Inference Operator`, is intended to automate the inference with a MONAI Bundle in TorchScript with the following functionalities: - Parse the model metadata and extra configuration data in the TorchScript file -- Instanciate MONAI transforms and inferer objects per bundle configuration data +- Instantiate MONAI transforms and inferer objects per bundle configuration data - Convert input/output of the operator to and from model network input - Support named model and can be used in a multi-model application diff --git a/docs/source/release_notes/v0.5.0.md b/docs/source/release_notes/v0.5.0.md index 32ac5af3..2aab6143 100644 --- a/docs/source/release_notes/v0.5.0.md +++ b/docs/source/release_notes/v0.5.0.md @@ -7,7 +7,7 @@ - Generated DICOM instances as AI evidence now have the attribute (0008,0201) Timezone Offset From UTC, in addition to the DICOM date and time which are set with values from the underlying operating system. The OS is expected to be synchronized with a known good timing source and have the correct timezone setting - Generated DICOM instance file names are now based on the SOP instance UID - Support DICOM instance level attribute matching in the DICOM Series Selection Operator -- Operators and example applications are verified to be re-runable without needing to reinitialize the application object or re-load the AI model network. This will allow a main function or an external script to instantiate the application object once and use it to process multiple discreet inputs, either in a batch processing mode or in a long running service +- Operators and example applications are verified to be re-runable without needing to reinitialize the application object or re-load the AI model network. This will allow a main function or an external script to instantiate the application object once and use it to process multiple discrete inputs, either in a batch processing mode or in a long running service - Tutorials, in Jupyter notebooks, are re-organized and updated - Reference added for MONAI Deploy Express for hosting MAPs in development environments - Removed is the reference to the deprecated MONAI Inference Service diff --git a/docs/srs.md b/docs/srs.md index f6a377af..a786357e 100644 --- a/docs/srs.md +++ b/docs/srs.md @@ -521,7 +521,7 @@ The SDK shall allow the packaging of the application in a standardized format so ### Background -Please refer to [MONAI Application Packge Spec](https://github.com/Project-MONAI/monai-deploy-experimental/blob/main/guidelines/monai-application-package.md)for details. +Please refer to [MONAI Application Package Spec](https://github.com/Project-MONAI/monai-deploy-experimental/blob/main/guidelines/monai-application-package.md)for details. ### Verification Strategy diff --git a/monai/deploy/core/domain/dicom_series.py b/monai/deploy/core/domain/dicom_series.py index 6bed23e0..6eed0178 100644 --- a/monai/deploy/core/domain/dicom_series.py +++ b/monai/deploy/core/domain/dicom_series.py @@ -51,7 +51,7 @@ def get_sop_instances(self): return self._sop_instances # Properties named after DICOM Series module attribute keywords - # There are two required (Type 1) attrbutes for a series: + # There are two required (Type 1) attributes for a series: # Keyword: SeriesInstanceUID, Tag: (0020,000E) # Keyword: Modality, Tag: (0008,0060) # diff --git a/monai/deploy/core/domain/dicom_sop_instance.py b/monai/deploy/core/domain/dicom_sop_instance.py index f2a2bdd7..015f60a5 100644 --- a/monai/deploy/core/domain/dicom_sop_instance.py +++ b/monai/deploy/core/domain/dicom_sop_instance.py @@ -27,7 +27,7 @@ class DICOMSOPInstance(Domain): - """This class representes a SOP Instance. + """This class represents a SOP Instance. An attribute can be looked up with a slice ([group_number, element number]). """ diff --git a/monai/deploy/core/domain/dicom_study.py b/monai/deploy/core/domain/dicom_study.py index 30bc053e..67c5b6be 100644 --- a/monai/deploy/core/domain/dicom_study.py +++ b/monai/deploy/core/domain/dicom_study.py @@ -35,7 +35,7 @@ def get_all_series(self): return list(self._series_dict.values()) # Properties named after DICOM Study module attribute keywords - # There is only one required (Type 1) attrbute for a study: + # There is only one required (Type 1) attribute for a study: # Keyword: StudyInstanceUID, Tag: (0020,000D) # @property diff --git a/monai/deploy/utils/importutil.py b/monai/deploy/utils/importutil.py index d56b7d42..95e48784 100644 --- a/monai/deploy/utils/importutil.py +++ b/monai/deploy/utils/importutil.py @@ -27,7 +27,7 @@ def get_docstring(cls: Type) -> str: """Get docstring of a class. Tries to get docstring from class itself, from its __doc__. - It trims the preceeding whitespace from docstring. + It trims the preceding whitespace from docstring. If __doc__ is not available, it returns empty string. Args: diff --git a/notebooks/tutorials/02_mednist_app.ipynb b/notebooks/tutorials/02_mednist_app.ipynb index 000ae501..cb2371ae 100644 --- a/notebooks/tutorials/02_mednist_app.ipynb +++ b/notebooks/tutorials/02_mednist_app.ipynb @@ -416,7 +416,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -439,7 +439,7 @@ "# Choose a file as test input\n", "test_input_path = image_files[0][0]\n", "!rm -rf {input_folder} && mkdir -p {input_folder} && cp {test_input_path} {input_folder} && ls {input_folder}\n", - "# Need to copy the model file to its own clean subfolder for pacakging, to workaround an issue in the Packager\n", + "# Need to copy the model file to its own clean subfolder for packaging, to workaround an issue in the Packager\n", "!rm -rf {models_folder} && mkdir -p {models_folder}/model && cp classifier.zip {models_folder}/model && ls {models_folder}/model\n", "\n", "%env HOLOSCAN_INPUT_PATH {input_folder}\n", @@ -558,7 +558,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -579,7 +579,7 @@ "\n", " def __init__(\n", " self,\n", - " frament: Fragment,\n", + " fragment: Fragment,\n", " *args,\n", " app_context: AppContext,\n", " model_name: Optional[str] = \"\",\n", @@ -606,7 +606,7 @@ " # The name of the optional input port for passing data to override the output folder path.\n", " self.input_name_output_folder = \"output_folder\"\n", "\n", - " # The output folder set on the object can be overriden at each compute by data in the optional named input\n", + " # The output folder set on the object can be overridden at each compute by data in the optional named input\n", " self.output_folder = output_folder\n", "\n", " # Need the name when there are multiple models loaded\n", @@ -617,7 +617,7 @@ " self.model = self._get_model(self.app_context, self.model_path, self._model_name)\n", "\n", " # This needs to be at the end of the constructor.\n", - " super().__init__(frament, *args, **kwargs)\n", + " super().__init__(fragment, *args, **kwargs)\n", "\n", " def _get_model(self, app_context: AppContext, model_path: Path, model_name: str):\n", " \"\"\"Load the model with the given name from context or model path\n", @@ -839,7 +839,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -954,7 +954,7 @@ "\n", " def __init__(\n", " self,\n", - " frament: Fragment,\n", + " fragment: Fragment,\n", " *args,\n", " app_context: AppContext,\n", " model_name: Optional[str] = \"\",\n", @@ -981,7 +981,7 @@ " # The name of the optional input port for passing data to override the output folder path.\n", " self.input_name_output_folder = \"output_folder\"\n", "\n", - " # The output folder set on the object can be overriden at each compute by data in the optional named input\n", + " # The output folder set on the object can be overridden at each compute by data in the optional named input\n", " self.output_folder = output_folder\n", "\n", " # Need the name when there are multiple models loaded\n", @@ -992,7 +992,7 @@ " self.model = self._get_model(self.app_context, self.model_path, self._model_name)\n", "\n", " # This needs to be at the end of the constructor.\n", - " super().__init__(frament, *args, **kwargs)\n", + " super().__init__(fragment, *args, **kwargs)\n", "\n", " def _get_model(self, app_context: AppContext, model_path: Path, model_name: str):\n", " \"\"\"Load the model with the given name from context or model path\n", diff --git a/run b/run index c1b6ee4f..d391ef44 100755 --- a/run +++ b/run @@ -407,7 +407,7 @@ install_edit_mode() { setup_desc() { c_echo 'Setup development environment -Arguements: +Arguments: $1 - configuration (default: "dev") ' } @@ -468,7 +468,7 @@ build_desc() { c_echo 'Build distribution package Build a distribution package for this SDK using "build" (https://pypa-build.readthedocs.io/en/stable/index.html). -Arguements: +Arguments: $1 - destination folder (default: ${TOP}/dist) ' } @@ -970,7 +970,7 @@ setup_gen_docs() { gen_docs_desc() { c_echo 'Generate documents -Generated docs would be avaialable at ${TOP}/dist/docs. +Generated docs would be available at ${TOP}/dist/docs. Arguments: $1 - output folder path (html docs) @@ -1012,7 +1012,7 @@ gen_docs_dev_desc() { c_echo 'Generate documents (with dev-server) Launch dev-server for sphinx. -Generated docs would be avaialable at ${TOP}/dist/docs. +Generated docs would be available at ${TOP}/dist/docs. Arguments: -p - port number From ddd4b9cb3bfca4a8b603009f1e0b9d51595a49f9 Mon Sep 17 00:00:00 2001 From: M Q Date: Wed, 12 Feb 2025 17:35:51 -0800 Subject: [PATCH 3/4] Fix flake8 complaint Signed-off-by: M Q --- monai/deploy/operators/monai_seg_inference_operator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/deploy/operators/monai_seg_inference_operator.py b/monai/deploy/operators/monai_seg_inference_operator.py index 5d5a9aeb..63d6007a 100644 --- a/monai/deploy/operators/monai_seg_inference_operator.py +++ b/monai/deploy/operators/monai_seg_inference_operator.py @@ -334,11 +334,11 @@ def compute_impl(self, input_image, context): d = [post_transforms(i) for i in decollate_batch(d)] self._logger.info(f"Post transform length/batch size of output: {len(d)}") self._logger.info( - f"Post transform pixel spacings of '{self._pred_dataset_key}' in the first output: {d[0][self._pred_dataset_key].pixdim}" + f"Post transform pixel spacings for {self._pred_dataset_key}: {d[0][self._pred_dataset_key].pixdim}" ) out_ndarray = d[0][self._pred_dataset_key].cpu().numpy() # Single output to numpy on CPU self._logger.info( - f"Post transform '{self._pred_dataset_key}' of {type(out_ndarray)} shape: {out_ndarray.shape}" + f"Post transform {self._pred_dataset_key} of {type(out_ndarray)} shape: {out_ndarray.shape}" ) # Need to squeeze out the channel dim fist out_ndarray = np.squeeze(out_ndarray, 0) @@ -352,7 +352,7 @@ def compute_impl(self, input_image, context): # array index order is back to DHW, the same order as the in-memory input Image obj. out_ndarray = out_ndarray.T.astype(np.uint8) self._logger.info( - f"Output Seg image numpy array of type '{type(out_ndarray)}' shape: {out_ndarray.shape}" + f"Output Seg image numpy array of type {type(out_ndarray)} shape: {out_ndarray.shape}" ) self._logger.info(f"Output Seg image pixel max value: {np.amax(out_ndarray)}") From 00fe381d622b73a25e0405084707497d84b8ae24 Mon Sep 17 00:00:00 2001 From: M Q Date: Wed, 12 Feb 2025 17:56:49 -0800 Subject: [PATCH 4/4] Fix flake8 complaints Signed-off-by: M Q --- .../hugging_face_integration_app/med_image_generation/app.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/apps/hugging_face_integration_app/med_image_generation/app.py b/examples/apps/hugging_face_integration_app/med_image_generation/app.py index 6f592668..de4ade6b 100644 --- a/examples/apps/hugging_face_integration_app/med_image_generation/app.py +++ b/examples/apps/hugging_face_integration_app/med_image_generation/app.py @@ -1,13 +1,10 @@ import argparse import logging -from pathlib import Path -import numpy as np import torch from diffusers import StableDiffusionPipeline -from PIL import Image -from monai.deploy.core import AppContext, Application +from monai.deploy.core import Application class App(Application):