diff --git a/examples/README.md b/examples/README.md
index e02b7c64f..924d1017d 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -143,11 +143,6 @@
-
- DICOM |
-  |
-  |
-
Tiled |
 |
diff --git a/examples/annotation_import/dicom.ipynb b/examples/annotation_import/dicom.ipynb
deleted file mode 100644
index 3f6aa0326..000000000
--- a/examples/annotation_import/dicom.ipynb
+++ /dev/null
@@ -1,266 +0,0 @@
-{
- "nbformat": 4,
- "nbformat_minor": 5,
- "metadata": {},
- "cells": [
- {
- "metadata": {},
- "source": [
- "",
- " ",
- " | \n"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "\n",
- " \n",
- " | \n",
- "\n",
- "\n",
- " \n",
- " | "
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "# DICOM Annotation Import\n",
- "\n",
- "* Annotations must be created and uploaded using NDJSON\n",
- "* Supported annotations that can be uploaded through the SDK:\n",
- " * Polyline\n",
- " * Segmentation masks \n",
- "* **NOT** supported:\n",
- " * Bounding box\n",
- " * Point\n",
- " * Polygons\n",
- " * Free form text classifications\n",
- " * Radio classifications \n",
- " * Checklist classifications"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "### Setup"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "%pip install -q \"labelbox[data]\"",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Replace with your API key \n",
- "Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "# Add your api key\nAPI_KEY = None\nclient = lb.Client(api_key=API_KEY)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "## Supported annotations for DICOM\n"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "######## Polyline ########\npolyline_annotation = [\n lb_types.DICOMObjectAnnotation(\n name=\"line_dicom\",\n group_key=lb_types.GroupKey.AXIAL,\n frame=1,\n value=lb_types.Line(points=[\n lb_types.Point(x=10, y=10),\n lb_types.Point(x=200, y=20),\n lb_types.Point(x=250, y=250),\n ]),\n segment_index=0,\n keyframe=True,\n ),\n lb_types.DICOMObjectAnnotation(\n name=\"line_dicom\",\n group_key=lb_types.GroupKey.AXIAL,\n frame=20,\n value=lb_types.Line(points=[\n lb_types.Point(x=10, y=10),\n lb_types.Point(x=200, y=10),\n lb_types.Point(x=300, y=300),\n ]),\n segment_index=1,\n keyframe=True,\n ),\n]\n\npolyline_annotation_ndjson = {\n \"name\":\n \"line_dicom\",\n \"groupKey\":\n \"axial\", # should be 'axial', 'sagittal', or 'coronal'\n \"segments\": [\n {\n \"keyframes\": [{\n \"frame\":\n 1,\n \"line\": [\n {\n \"x\": 10,\n \"y\": 10\n },\n {\n \"x\": 200,\n \"y\": 20\n },\n {\n \"x\": 250,\n \"y\": 250\n },\n ],\n }]\n },\n {\n \"keyframes\": [{\n \"frame\":\n 20,\n \"line\": [\n {\n \"x\": 10,\n \"y\": 10\n },\n {\n \"x\": 200,\n \"y\": 10\n },\n {\n \"x\": 300,\n \"y\": 300\n },\n ],\n }]\n },\n ],\n}",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": "######## Segmentation Masks ########\n\nmask_annotation = [\n lb_types.DICOMMaskAnnotation(\n group_key=\"axial\",\n frames=[\n lb_types.MaskFrame(\n index=1,\n instance_uri=\n \"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-mask-1.png\",\n ),\n lb_types.MaskFrame(\n index=5,\n instance_uri=\n \"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-mask-1.png\",\n ),\n ],\n instances=[\n lb_types.MaskInstance(color_rgb=(255, 255, 255),\n name=\"segmentation_mask_dicom\")\n ],\n )\n]\n\nmask_annotation_ndjson = {\n \"groupKey\": \"axial\",\n \"masks\": {\n \"frames\": [\n {\n \"index\":\n 1,\n \"instanceURI\":\n \"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-mask-1.png\",\n },\n {\n \"index\":\n 5,\n \"instanceURI\":\n \"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-mask-1.png\",\n },\n ],\n \"instances\": [{\n \"colorRGB\": (255, 255, 255),\n \"name\": \"segmentation_mask_dicom\"\n }],\n },\n}",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "## Upload Annotations - putting it all together"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "### Step 1: Import data rows into Catalog"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "global_key = \"sample-dicom-1.dcm\" + str(uuid.uuid4())\nasset = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-dicom-1.dcm\",\n \"global_key\":\n global_key,\n}\n\ndataset = client.create_dataset(name=\"dicom_demo_dataset\")\ntask = dataset.create_data_rows([asset])\ntask.wait_till_done()\nprint(\"Errors :\", task.errors)\nprint(\"Failed data rows:\", task.failed_data_rows)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Step 2: Create/select an ontology\n",
- "Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the `name` fields in your annotations to ensure the correct feature schemas are matched.\n",
- "\n",
- "For example, when we create the line annotation above, we provided the `name` as `line_dicom`. Now, when we setup our ontology, we must ensure that the name of my line tool is also `line_dicom`. The same alignment must hold true for the other tools and classifications we create in our ontology.\n",
- "\n",
- "\n",
- "[Documentation for reference ](https://docs.labelbox.com/reference/import-text-annotations)"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(\n tool=lb.Tool.Type.RASTER_SEGMENTATION,\n name=\"segmentation_mask_dicom\",\n ),\n lb.Tool(tool=lb.Tool.Type.LINE, name=\"line_dicom\"),\n])\n\nontology = client.create_ontology(\n \"Ontology DICOM Annotations\",\n ontology_builder.asdict(),\n media_type=lb.MediaType.Dicom,\n)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Step 3: Create a labeling project \n",
- "Connect the ontology to the labeling project."
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "# Project defaults to batch mode with benchmark quality settings if this argument is not provided\n# Queue mode will be deprecated once dataset mode is deprecated\n\nproject = client.create_project(name=\"dicom_project_demo\",\n media_type=lb.MediaType.Dicom)\n\n## connect ontology to your project\nproject.setup_editor(ontology)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Step 4: Send a batch of data rows to the project"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "# Create batches\n\n# Create a batch to send to your MAL project\nbatch = project.create_batch(\n \"first-batch-dicom-demo\", # Each batch in a project must have a unique name\n global_keys=[global_key\n ], # a list of data row objects, data row ids or global keys\n priority=5, # priority between 1(Highest) - 5(lowest)\n)\n\nprint(\"Batch: \", batch)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Step 5: Create the annotations payload \n",
- "Create the annotations payload using the snippets of code above.\n",
- "\n",
- "Labelbox supports two formats for the annotations payload: NDJSON and Python Annotation types."
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "#### Python Annotation Types"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "annotations_list = polyline_annotation + mask_annotation\nlabels = [\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations_list)\n]",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "#### NDJSON annotations\n",
- "Here we create the complete `label_ndjson` payload of annotations. There is one annotation for each *reference to an annotation* that we created above."
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "label_ndjson = []\n\nfor annotation in [polyline_annotation_ndjson, mask_annotation_ndjson]:\n annotation.update({\"dataRow\": {\"globalKey\": global_key}})\n label_ndjson.append(annotation)",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Step 6: Upload annotations to a project as pre-labels or completed labels\n",
- "For the purpose of this tutorial only run one of the label imports at once, otherwise the previous import might get overwritten."
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": [
- "#### Model-Assisted Labeling (MAL)"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "# Upload MAL label for this data row in project\nupload_job_mal = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_import_job-\" + str(uuid.uuid4()),\n predictions=labels,\n)\n\nupload_job_mal.wait_until_done()\nprint(\"Errors:\", upload_job_mal.errors)\nprint(\"Status of uploads: \", upload_job_mal.statuses)\nprint(\" \")",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "#### Label Import"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "upload_job_label_import = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job-\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nupload_job_label_import.wait_until_done()\nprint(\"Errors:\", upload_job_label_import.errors)\nprint(\"Status of uploads: \", upload_job_label_import.statuses)\nprint(\" \")",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- },
- {
- "metadata": {},
- "source": [
- "### Optional deletions for cleanup"
- ],
- "cell_type": "markdown"
- },
- {
- "metadata": {},
- "source": "# Delete Project\n# project.delete()\n# dataset.delete()",
- "cell_type": "code",
- "outputs": [],
- "execution_count": null
- }
- ]
-}
\ No newline at end of file
diff --git a/libs/labelbox/src/labelbox/data/annotation_types/__init__.py b/libs/labelbox/src/labelbox/data/annotation_types/__init__.py
index 1a78127e1..fc75652cf 100644
--- a/libs/labelbox/src/labelbox/data/annotation_types/__init__.py
+++ b/libs/labelbox/src/labelbox/data/annotation_types/__init__.py
@@ -15,12 +15,9 @@
from .video import VideoClassificationAnnotation
from .video import VideoObjectAnnotation
-from .video import DICOMObjectAnnotation
-from .video import GroupKey
from .video import MaskFrame
from .video import MaskInstance
from .video import VideoMaskAnnotation
-from .video import DICOMMaskAnnotation
from .ner import ConversationEntity
from .ner import DocumentEntity
diff --git a/libs/labelbox/src/labelbox/data/annotation_types/video.py b/libs/labelbox/src/labelbox/data/annotation_types/video.py
index 14a692bae..e2c9dac7a 100644
--- a/libs/labelbox/src/labelbox/data/annotation_types/video.py
+++ b/libs/labelbox/src/labelbox/data/annotation_types/video.py
@@ -1,9 +1,12 @@
-from enum import Enum
from typing import List, Optional, Tuple
-from labelbox.data.annotation_types.annotation import (
- ClassificationAnnotation,
- ObjectAnnotation,
+from pydantic import (
+ AliasChoices,
+ BaseModel,
+ ConfigDict,
+ Field,
+ field_validator,
+ model_validator,
)
from labelbox.data.annotation_types.annotation import (
@@ -16,14 +19,6 @@
CustomMetricsNotSupportedMixin,
)
from labelbox.utils import _CamelCaseMixin, is_valid_uri
-from pydantic import (
- model_validator,
- BaseModel,
- field_validator,
- Field,
- ConfigDict,
- AliasChoices,
-)
class VideoClassificationAnnotation(ClassificationAnnotation):
@@ -72,43 +67,6 @@ class VideoObjectAnnotation(
segment_index: Optional[int] = None
-class GroupKey(Enum):
- """Group key for DICOM annotations"""
-
- AXIAL = "axial"
- SAGITTAL = "sagittal"
- CORONAL = "coronal"
-
-
-class DICOMObjectAnnotation(VideoObjectAnnotation):
- """DICOM object annotation
- >>> DICOMObjectAnnotation(
- >>> name="dicom_polyline",
- >>> frame=2,
- >>> value=lb_types.Line(points = [
- >>> lb_types.Point(x=680, y=100),
- >>> lb_types.Point(x=100, y=190),
- >>> lb_types.Point(x=190, y=220)
- >>> ]),
- >>> segment_index=0,
- >>> keyframe=True,
- >>> Group_key=GroupKey.AXIAL
- >>> )
- Args:
- name (Optional[str])
- feature_schema_id (Optional[Cuid])
- value (Geometry)
- group_key (GroupKey)
- frame (Int): The frame index that this annotation corresponds to
- keyframe (bool): Whether or not this annotation was a human generated or interpolated annotation
- segment_id (Optional[Int]): Index of video segment this annotation belongs to
- classifications (List[ClassificationAnnotation]) = []
- extra (Dict[str, Any])
- """
-
- group_key: GroupKey
-
-
class MaskFrame(_CamelCaseMixin, BaseModel):
index: int
instance_uri: Optional[str] = Field(
@@ -162,23 +120,3 @@ class VideoMaskAnnotation(BaseModel):
frames: List[MaskFrame]
instances: List[MaskInstance]
-
-
-class DICOMMaskAnnotation(VideoMaskAnnotation):
- """DICOM mask annotation
- >>> DICOMMaskAnnotation(
- >>> name="dicom_mask",
- >>> group_key=GroupKey.AXIAL,
- >>> frames=[
- >>> MaskFrame(index=1, instance_uri='https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA'),
- >>> MaskFrame(index=5, instance_uri='https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys1%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA'),
- >>> ],
- >>> instances=[
- >>> MaskInstance(color_rgb=(0, 0, 255), name="mask1"),
- >>> MaskInstance(color_rgb=(0, 255, 0), name="mask2"),
- >>> MaskInstance(color_rgb=(255, 0, 0), name="mask3")
- >>> ]
- >>> )
- """
-
- group_key: GroupKey
diff --git a/libs/labelbox/src/labelbox/data/serialization/ndjson/label.py b/libs/labelbox/src/labelbox/data/serialization/ndjson/label.py
index af938e511..f8fd832f4 100644
--- a/libs/labelbox/src/labelbox/data/serialization/ndjson/label.py
+++ b/libs/labelbox/src/labelbox/data/serialization/ndjson/label.py
@@ -1,48 +1,47 @@
+from collections import defaultdict
from itertools import groupby
from operator import itemgetter
from typing import Dict, Generator, List, Tuple, Union
-from collections import defaultdict
+
+from pydantic import BaseModel
from ...annotation_types.annotation import (
ClassificationAnnotation,
ObjectAnnotation,
)
-from ...annotation_types.relationship import RelationshipAnnotation
-from ...annotation_types.video import (
- VideoClassificationAnnotation,
-)
-from ...annotation_types.video import VideoObjectAnnotation, VideoMaskAnnotation
from ...annotation_types.collection import LabelCollection, LabelGenerator
from ...annotation_types.data.generic_data_row_data import GenericDataRowData
from ...annotation_types.label import Label
-from ...annotation_types.metrics import ScalarMetric, ConfusionMatrixMetric
from ...annotation_types.llm_prompt_response.prompt import (
PromptClassificationAnnotation,
)
+from ...annotation_types.metrics import ConfusionMatrixMetric, ScalarMetric
from ...annotation_types.mmc import MessageEvaluationTaskAnnotation
-
-from .metric import NDScalarMetric, NDMetricAnnotation, NDConfusionMatrixMetric
+from ...annotation_types.relationship import RelationshipAnnotation
+from ...annotation_types.video import (
+ VideoClassificationAnnotation,
+ VideoMaskAnnotation,
+ VideoObjectAnnotation,
+)
+from .base import DataRow
from .classification import (
NDChecklistSubclass,
NDClassification,
NDClassificationType,
- NDRadioSubclass,
NDPromptClassification,
NDPromptClassificationType,
NDPromptText,
+ NDRadioSubclass,
)
+from .metric import NDConfusionMatrixMetric, NDMetricAnnotation, NDScalarMetric
+from .mmc import NDMessageTask
from .objects import (
NDObject,
NDObjectType,
NDSegments,
- NDDicomSegments,
NDVideoMasks,
- NDDicomMasks,
)
-from .mmc import NDMessageTask
from .relationship import NDRelationship
-from .base import DataRow
-from pydantic import BaseModel
AnnotationType = Union[
NDObjectType,
@@ -50,9 +49,7 @@
NDPromptClassificationType,
NDConfusionMatrixMetric,
NDScalarMetric,
- NDDicomSegments,
NDSegments,
- NDDicomMasks,
NDVideoMasks,
NDRelationship,
NDPromptText,
@@ -136,15 +133,7 @@ def _generate_annotations(
# deserialized objects in the _AnnotationGroupTuple
# object *if* the object can be used in a relationship
for uuid, ndjson_annotation in group.ndjson_annotations.items():
- if isinstance(ndjson_annotation, NDDicomSegments):
- annotations.extend(
- NDDicomSegments.to_common(
- ndjson_annotation,
- ndjson_annotation.name,
- ndjson_annotation.schema_id,
- )
- )
- elif isinstance(ndjson_annotation, NDSegments):
+ if isinstance(ndjson_annotation, NDSegments):
annotations.extend(
NDSegments.to_common(
ndjson_annotation,
@@ -152,10 +141,6 @@ def _generate_annotations(
ndjson_annotation.schema_id,
)
)
- elif isinstance(ndjson_annotation, NDDicomMasks):
- annotations.append(
- NDDicomMasks.to_common(ndjson_annotation)
- )
elif isinstance(ndjson_annotation, NDVideoMasks):
annotations.append(
NDVideoMasks.to_common(ndjson_annotation)
diff --git a/libs/labelbox/src/labelbox/data/serialization/ndjson/objects.py b/libs/labelbox/src/labelbox/data/serialization/ndjson/objects.py
index 28f3a45c8..fe45bed86 100644
--- a/libs/labelbox/src/labelbox/data/serialization/ndjson/objects.py
+++ b/libs/labelbox/src/labelbox/data/serialization/ndjson/objects.py
@@ -1,58 +1,55 @@
-from io import BytesIO
-from typing import Any, Dict, List, Tuple, Union, Optional
import base64
+from io import BytesIO
+from typing import Any, Dict, List, Optional, Tuple, Union
+import numpy as np
+from PIL import Image
+from pydantic import BaseModel
+
+from labelbox.data.annotation_types.data import GenericDataRowData
from labelbox.data.annotation_types.data.raster import MaskData
from labelbox.data.annotation_types.ner.conversation_entity import (
ConversationEntity,
)
from labelbox.data.annotation_types.video import (
VideoObjectAnnotation,
- DICOMObjectAnnotation,
)
from labelbox.data.mixins import (
ConfidenceMixin,
- CustomMetricsMixin,
CustomMetric,
+ CustomMetricsMixin,
CustomMetricsNotSupportedMixin,
)
-from ....annotated_types import Cuid
-import numpy as np
-from PIL import Image
-
-from labelbox.data.annotation_types.data import GenericDataRowData
-
-from ...annotation_types.data import GenericDataRowData
-from ...annotation_types.ner import (
- DocumentEntity,
- DocumentTextSelection,
- TextEntity,
+from ....annotated_types import Cuid
+from ...annotation_types.annotation import (
+ ClassificationAnnotation,
+ ObjectAnnotation,
)
+from ...annotation_types.data import GenericDataRowData
from ...annotation_types.geometry import (
DocumentRectangle,
- Rectangle,
- Polygon,
Line,
- Point,
Mask,
+ Point,
+ Polygon,
+ Rectangle,
)
-from ...annotation_types.annotation import (
- ClassificationAnnotation,
- ObjectAnnotation,
+from ...annotation_types.ner import (
+ DocumentEntity,
+ DocumentTextSelection,
+ TextEntity,
)
from ...annotation_types.video import (
- VideoMaskAnnotation,
- DICOMMaskAnnotation,
MaskFrame,
MaskInstance,
+ VideoMaskAnnotation,
)
+from .base import DataRow, NDAnnotation, NDJsonBase
from .classification import (
NDSubclassification,
NDSubclassificationType,
)
-from .base import DataRow, NDAnnotation, NDJsonBase
-from pydantic import BaseModel
class NDBaseObject(NDAnnotation):
@@ -64,10 +61,6 @@ class VideoSupported(BaseModel):
frame: int
-class DicomSupported(BaseModel):
- group_key: str
-
-
class _Point(BaseModel):
x: float
y: float
@@ -210,25 +203,6 @@ def from_common(
)
-class NDDicomLine(NDFrameLine):
- def to_common(
- self,
- name: str,
- feature_schema_id: Cuid,
- segment_index: int,
- group_key: str,
- ) -> DICOMObjectAnnotation:
- return DICOMObjectAnnotation(
- frame=self.frame,
- segment_index=segment_index,
- keyframe=True,
- name=name,
- feature_schema_id=feature_schema_id,
- value=Line(points=[Point(x=pt.x, y=pt.y) for pt in self.line]),
- group_key=group_key,
- )
-
-
class NDPolygon(NDBaseObject, ConfidenceMixin, CustomMetricsMixin):
polygon: List[_Point]
@@ -452,116 +426,6 @@ def from_common(cls, segment):
)
-class NDDicomSegment(NDSegment):
- keyframes: List[NDDicomLine]
-
- @staticmethod
- def lookup_segment_object_type(segment: List) -> "NDDicomObjectType":
- """Used for determining which object type the annotation contains
- returns the object type"""
- segment_class = type(segment[0].value)
- if segment_class == Line:
- return NDDicomLine
- else:
- raise ValueError("DICOM segments only support Line objects")
-
- def to_common(
- self,
- name: str,
- feature_schema_id: Cuid,
- uuid: str,
- segment_index: int,
- group_key: str,
- ):
- return [
- self.segment_with_uuid(
- keyframe.to_common(
- name=name,
- feature_schema_id=feature_schema_id,
- segment_index=segment_index,
- group_key=group_key,
- ),
- uuid,
- )
- for keyframe in self.keyframes
- ]
-
-
-class NDSegments(NDBaseObject):
- segments: List[NDSegment]
-
- def to_common(self, name: str, feature_schema_id: Cuid):
- result = []
- for idx, segment in enumerate(self.segments):
- result.extend(
- segment.to_common(
- name=name,
- feature_schema_id=feature_schema_id,
- segment_index=idx,
- uuid=self.uuid,
- )
- )
- return result
-
- @classmethod
- def from_common(
- cls,
- segments: List[VideoObjectAnnotation],
- data: GenericDataRowData,
- name: str,
- feature_schema_id: Cuid,
- extra: Dict[str, Any],
- ) -> "NDSegments":
- segments = [NDSegment.from_common(segment) for segment in segments]
-
- return cls(
- segments=segments,
- data_row=DataRow(id=data.uid, global_key=data.global_key),
- name=name,
- schema_id=feature_schema_id,
- uuid=extra.get("uuid"),
- )
-
-
-class NDDicomSegments(NDBaseObject, DicomSupported):
- segments: List[NDDicomSegment]
-
- def to_common(self, name: str, feature_schema_id: Cuid):
- result = []
- for idx, segment in enumerate(self.segments):
- result.extend(
- segment.to_common(
- name=name,
- feature_schema_id=feature_schema_id,
- segment_index=idx,
- uuid=self.uuid,
- group_key=self.group_key,
- )
- )
- return result
-
- @classmethod
- def from_common(
- cls,
- segments: List[DICOMObjectAnnotation],
- data: GenericDataRowData,
- name: str,
- feature_schema_id: Cuid,
- extra: Dict[str, Any],
- group_key: str,
- ) -> "NDDicomSegments":
- segments = [NDDicomSegment.from_common(segment) for segment in segments]
-
- return cls(
- segments=segments,
- dataRow=DataRow(id=data.uid, global_key=data.global_key),
- name=name,
- schema_id=feature_schema_id,
- uuid=extra.get("uuid"),
- group_key=group_key,
- )
-
-
class _URIMask(BaseModel):
instanceURI: str
colorRGB: Tuple[int, int, int]
@@ -666,25 +530,6 @@ def from_common(cls, annotation, data):
)
-class NDDicomMasks(NDVideoMasks, DicomSupported):
- def to_common(self) -> DICOMMaskAnnotation:
- return DICOMMaskAnnotation(
- frames=self.masks.frames,
- instances=self.masks.instances,
- group_key=self.group_key,
- )
-
- @classmethod
- def from_common(cls, annotation, data):
- return cls(
- data_row=DataRow(id=data.uid, global_key=data.global_key),
- masks=NDVideoMasksFramesInstances(
- frames=annotation.frames, instances=annotation.instances
- ),
- group_key=annotation.group_key.value,
- )
-
-
class Location(BaseModel):
start: int
end: int
@@ -848,22 +693,7 @@ def from_common(
obj = cls.lookup_object(annotation)
# if it is video segments
- if obj == NDSegments or obj == NDDicomSegments:
- first_video_annotation = annotation[0][0]
- args = dict(
- segments=annotation,
- data=data,
- name=first_video_annotation.name,
- feature_schema_id=first_video_annotation.feature_schema_id,
- extra=first_video_annotation.extra,
- )
-
- if isinstance(first_video_annotation, DICOMObjectAnnotation):
- group_key = first_video_annotation.group_key.value
- args.update(dict(group_key=group_key))
-
- return obj.from_common(**args)
- elif obj == NDVideoMasks or obj == NDDicomMasks:
+ if obj == NDVideoMasks:
return obj.from_common(annotation, data)
subclasses = [
@@ -892,20 +722,10 @@ def from_common(
def lookup_object(
annotation: Union[ObjectAnnotation, List],
) -> "NDObjectType":
- if isinstance(annotation, DICOMMaskAnnotation):
- result = NDDicomMasks
- elif isinstance(annotation, VideoMaskAnnotation):
+ if isinstance(annotation, VideoMaskAnnotation):
result = NDVideoMasks
elif isinstance(annotation, list):
- try:
- first_annotation = annotation[0][0]
- except IndexError:
- raise ValueError("Annotation list cannot be empty")
-
- if isinstance(first_annotation, DICOMObjectAnnotation):
- result = NDDicomSegments
- else:
- result = NDSegments
+ result = NDSegments
else:
result = {
Line: NDLine,
@@ -938,4 +758,3 @@ def lookup_object(
]
NDFrameObjectType = NDFrameRectangle, NDFramePoint, NDFrameLine
-NDDicomObjectType = NDDicomLine
diff --git a/libs/labelbox/src/labelbox/schema/media_type.py b/libs/labelbox/src/labelbox/schema/media_type.py
index ae0bbbb3f..f55a65daf 100644
--- a/libs/labelbox/src/labelbox/schema/media_type.py
+++ b/libs/labelbox/src/labelbox/schema/media_type.py
@@ -6,7 +6,6 @@
class MediaType(Enum):
Audio = "AUDIO"
Conversational = "CONVERSATIONAL"
- Dicom = "DICOM"
Document = "PDF"
Geospatial_Tile = "TMS_GEO"
Html = "HTML"
diff --git a/libs/labelbox/tests/data/annotation_import/conftest.py b/libs/labelbox/tests/data/annotation_import/conftest.py
index 001b96771..735166807 100644
--- a/libs/labelbox/tests/data/annotation_import/conftest.py
+++ b/libs/labelbox/tests/data/annotation_import/conftest.py
@@ -1,20 +1,17 @@
+import time
import uuid
-from typing import Union
+from typing import Tuple, Type, Union
-from labelbox.schema.model_run import ModelRun
-from labelbox.schema.ontology import Ontology
-from labelbox.schema.project import Project
import pytest
-import time
import requests
-
-from labelbox import parser, MediaType, OntologyKind
-from labelbox import Client, Dataset
-
-from typing import Tuple, Type
-from labelbox.schema.annotation_import import LabelImport, AnnotationImportState
from pytest import FixtureRequest
+from labelbox import Client, Dataset, MediaType, OntologyKind, parser
+from labelbox.schema.annotation_import import AnnotationImportState, LabelImport
+from labelbox.schema.model_run import ModelRun
+from labelbox.schema.ontology import Ontology
+from labelbox.schema.project import Project
+
"""
The main fixtures of this library are configured_project and configured_project_by_global_key. Both fixtures generate data rows with a parametrize media type. They create the amount of data rows equal to the DATA_ROW_COUNT variable below. The data rows are generated with a factory fixture that returns a function that allows you to pass a global key. The ontologies are generated normalized and based on the MediaType given (i.e. only features supported by MediaType are created). This ontology is later used to obtain the correct annotations with the prediction_id_mapping and corresponding inferences. Each data row will have all possible annotations attached supported for the MediaType.
"""
@@ -59,18 +56,6 @@ def conversational_data_row(global_key):
return conversational_data_row
-@pytest.fixture(scope="module", autouse=True)
-def dicom_data_row_factory():
- def dicom_data_row(global_key):
- return {
- "row_data": "https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-dicom-1.dcm",
- "global_key": f"https://storage.googleapis.com/labelbox-datasets/dicom-sample-data/sample-dicom-1.dcm-{global_key}",
- "media_type": "DICOM",
- }
-
- return dicom_data_row
-
-
@pytest.fixture(scope="module", autouse=True)
def geospatial_data_row_factory():
def geospatial_data_row(global_key):
@@ -173,7 +158,6 @@ def offline_model_evaluation_data_row(global_key: str):
def data_row_json_by_media_type(
audio_data_row_factory,
conversational_data_row_factory,
- dicom_data_row_factory,
geospatial_data_row_factory,
html_data_row_factory,
image_data_row_factory,
@@ -185,7 +169,6 @@ def data_row_json_by_media_type(
return {
MediaType.Audio: audio_data_row_factory,
MediaType.Conversational: conversational_data_row_factory,
- MediaType.Dicom: dicom_data_row_factory,
MediaType.Geospatial_Tile: geospatial_data_row_factory,
MediaType.Html: html_data_row_factory,
MediaType.Image: image_data_row_factory,
@@ -541,10 +524,6 @@ def normalized_ontology_by_media_type():
radio,
],
},
- MediaType.Dicom: {
- "tools": [raster_segmentation_tool, polyline_tool],
- "classifications": [],
- },
MediaType.Conversational: {
"tools": [entity_tool],
"classifications": [
@@ -1256,35 +1235,6 @@ def line_inference(prediction_id_mapping):
return lines
-@pytest.fixture
-def line_inference_v2(prediction_id_mapping):
- lines = []
- for feature in prediction_id_mapping:
- if "line" not in feature:
- continue
- line = feature["line"].copy()
- line_data = {
- "groupKey": "axial",
- "segments": [
- {
- "keyframes": [
- {
- "frame": 1,
- "line": [
- {"x": 147.692, "y": 118.154},
- {"x": 150.692, "y": 160.154},
- ],
- }
- ]
- },
- ],
- }
- line.update(line_data)
- del line["tool"]
- lines.append(line)
- return lines
-
-
@pytest.fixture
def point_inference(prediction_id_mapping):
points = []
@@ -1796,7 +1746,6 @@ def annotations_by_media_type(
polygon_inference,
rectangle_inference,
rectangle_inference_document,
- line_inference_v2,
line_inference,
entity_inference,
entity_inference_index,
@@ -1825,7 +1774,6 @@ def annotations_by_media_type(
text_inference_index,
entity_inference_index,
],
- MediaType.Dicom: [line_inference_v2],
MediaType.Document: [
entity_inference_document,
checklist_inference,
@@ -2313,51 +2261,6 @@ def expected_export_v2_conversation():
return expected_annotations
-@pytest.fixture()
-def expected_export_v2_dicom():
- expected_annotations = {
- "groups": {
- "Axial": {
- "name": "Axial",
- "classifications": [],
- "frames": {
- "1": {
- "objects": {
- "": {
- "name": "polyline",
- "value": "polyline",
- "annotation_kind": "DICOMPolyline",
- "classifications": [],
- "line": [
- {"x": 147.692, "y": 118.154},
- {"x": 150.692, "y": 160.154},
- ],
- }
- },
- "classifications": [],
- }
- },
- },
- "Sagittal": {
- "name": "Sagittal",
- "classifications": [],
- "frames": {},
- },
- "Coronal": {"name": "Coronal", "classifications": [], "frames": {}},
- },
- "segments": {
- "Axial": {"": [[1, 1]]},
- "Sagittal": {},
- "Coronal": {},
- },
- "classifications": [],
- "key_frame_feature_map": {
- "": {"Axial": {"1": True}, "Coronal": {}, "Sagittal": {}}
- },
- }
- return expected_annotations
-
-
@pytest.fixture()
def expected_export_v2_document():
expected_annotations = {
@@ -2665,7 +2568,6 @@ def exports_v2_by_media_type(
expected_export_v2_text,
expected_export_v2_video,
expected_export_v2_conversation,
- expected_export_v2_dicom,
expected_export_v2_document,
expected_export_v2_llm_prompt_response_creation,
expected_export_v2_llm_prompt_creation,
@@ -2679,7 +2581,6 @@ def exports_v2_by_media_type(
MediaType.Text: expected_export_v2_text,
MediaType.Video: expected_export_v2_video,
MediaType.Conversational: expected_export_v2_conversation,
- MediaType.Dicom: expected_export_v2_dicom,
MediaType.Document: expected_export_v2_document,
MediaType.LLMPromptResponseCreation: expected_export_v2_llm_prompt_response_creation,
MediaType.LLMPromptCreation: expected_export_v2_llm_prompt_creation,
diff --git a/libs/labelbox/tests/data/annotation_import/test_generic_data_types.py b/libs/labelbox/tests/data/annotation_import/test_generic_data_types.py
index e7ff7c684..805c24edf 100644
--- a/libs/labelbox/tests/data/annotation_import/test_generic_data_types.py
+++ b/libs/labelbox/tests/data/annotation_import/test_generic_data_types.py
@@ -34,7 +34,6 @@ def validate_iso_format(date_string: str):
(MediaType.Video, MediaType.Video),
(MediaType.Conversational, MediaType.Conversational),
(MediaType.Document, MediaType.Document),
- (MediaType.Dicom, MediaType.Dicom),
(OntologyKind.ResponseCreation, OntologyKind.ResponseCreation),
(OntologyKind.ModelEvaluation, OntologyKind.ModelEvaluation),
],
@@ -161,7 +160,6 @@ def test_import_media_types_llm(
(MediaType.Video, MediaType.Video),
(MediaType.Conversational, MediaType.Conversational),
(MediaType.Document, MediaType.Document),
- (MediaType.Dicom, MediaType.Dicom),
(OntologyKind.ResponseCreation, OntologyKind.ResponseCreation),
(OntologyKind.ModelEvaluation, OntologyKind.ModelEvaluation),
],
@@ -241,7 +239,6 @@ def test_import_media_types_by_global_key(
(MediaType.Video, MediaType.Video),
(MediaType.Conversational, MediaType.Conversational),
(MediaType.Document, MediaType.Document),
- (MediaType.Dicom, MediaType.Dicom),
(
MediaType.LLMPromptResponseCreation,
MediaType.LLMPromptResponseCreation,
@@ -281,7 +278,6 @@ def test_import_mal_annotations(
(MediaType.Video, MediaType.Video),
(MediaType.Conversational, MediaType.Conversational),
(MediaType.Document, MediaType.Document),
- (MediaType.Dicom, MediaType.Dicom),
(OntologyKind.ResponseCreation, OntologyKind.ResponseCreation),
(OntologyKind.ModelEvaluation, OntologyKind.ModelEvaluation),
],
diff --git a/libs/labelbox/tests/data/serialization/ndjson/test_dicom.py b/libs/labelbox/tests/data/serialization/ndjson/test_dicom.py
deleted file mode 100644
index 6a00fa871..000000000
--- a/libs/labelbox/tests/data/serialization/ndjson/test_dicom.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from copy import copy
-import pytest
-import labelbox.types as lb_types
-from labelbox.data.serialization import NDJsonConverter
-from labelbox.data.serialization.ndjson.objects import (
- NDDicomSegments,
- NDDicomSegment,
- NDDicomLine,
-)
-
-"""
-Polyline test data
-"""
-
-dicom_polyline_annotations = [
- lb_types.DICOMObjectAnnotation(
- uuid="78a8a027-9089-420c-8348-6099eb77e4aa",
- name="dicom_polyline",
- frame=2,
- value=lb_types.Line(
- points=[
- lb_types.Point(x=680, y=100),
- lb_types.Point(x=100, y=190),
- lb_types.Point(x=190, y=220),
- ]
- ),
- segment_index=0,
- keyframe=True,
- group_key=lb_types.GroupKey.AXIAL,
- )
-]
-
-polyline_label = lb_types.Label(
- data=lb_types.GenericDataRowData(uid="test-uid"),
- annotations=dicom_polyline_annotations,
-)
-
-polyline_annotation_ndjson = {
- "classifications": [],
- "dataRow": {"id": "test-uid"},
- "name": "dicom_polyline",
- "groupKey": "axial",
- "segments": [
- {
- "keyframes": [
- {
- "frame": 2,
- "line": [
- {"x": 680.0, "y": 100.0},
- {"x": 100.0, "y": 190.0},
- {"x": 190.0, "y": 220.0},
- ],
- "classifications": [],
- }
- ]
- }
- ],
-}
-
-polyline_with_global_key = lb_types.Label(
- data=lb_types.GenericDataRowData(global_key="test-global-key"),
- annotations=dicom_polyline_annotations,
-)
-
-polyline_annotation_ndjson_with_global_key = copy(polyline_annotation_ndjson)
-polyline_annotation_ndjson_with_global_key["dataRow"] = {
- "globalKey": "test-global-key"
-}
-"""
-Video test data
-"""
-
-instance_uri_1 = "https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA"
-instance_uri_5 = "https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys1%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA"
-frames = [
- lb_types.MaskFrame(index=1, instance_uri=instance_uri_1),
- lb_types.MaskFrame(index=5, instance_uri=instance_uri_5),
-]
-instances = [
- lb_types.MaskInstance(color_rgb=(0, 0, 255), name="mask1"),
- lb_types.MaskInstance(color_rgb=(0, 255, 0), name="mask2"),
- lb_types.MaskInstance(color_rgb=(255, 0, 0), name="mask3"),
-]
-
-video_mask_annotation = lb_types.VideoMaskAnnotation(
- frames=frames, instances=instances
-)
-
-video_mask_annotation_ndjson = {
- "dataRow": {"id": "test-uid"},
- "masks": {
- "frames": [
- {"index": 1, "instanceURI": instance_uri_1},
- {"index": 5, "instanceURI": instance_uri_5},
- ],
- "instances": [
- {"colorRGB": (0, 0, 255), "name": "mask1"},
- {"colorRGB": (0, 255, 0), "name": "mask2"},
- {"colorRGB": (255, 0, 0), "name": "mask3"},
- ],
- },
-}
-
-video_mask_annotation_ndjson_with_global_key = copy(
- video_mask_annotation_ndjson
-)
-video_mask_annotation_ndjson_with_global_key["dataRow"] = {
- "globalKey": "test-global-key"
-}
-
-video_mask_label = lb_types.Label(
- data=lb_types.GenericDataRowData(uid="test-uid"),
- annotations=[video_mask_annotation],
-)
-
-video_mask_label_with_global_key = lb_types.Label(
- data=lb_types.GenericDataRowData(global_key="test-global-key"),
- annotations=[video_mask_annotation],
-)
-"""
-DICOM Mask test data
-"""
-
-dicom_mask_annotation = lb_types.DICOMMaskAnnotation(
- name="dicom_mask",
- group_key=lb_types.GroupKey.AXIAL,
- frames=frames,
- instances=instances,
-)
-
-dicom_mask_label = lb_types.Label(
- data=lb_types.GenericDataRowData(uid="test-uid"),
- annotations=[dicom_mask_annotation],
-)
-
-dicom_mask_label_with_global_key = lb_types.Label(
- data=lb_types.GenericDataRowData(global_key="test-global-key"),
- annotations=[dicom_mask_annotation],
-)
-
-dicom_mask_annotation_ndjson = copy(video_mask_annotation_ndjson)
-dicom_mask_annotation_ndjson["groupKey"] = "axial"
-dicom_mask_annotation_ndjson_with_global_key = copy(
- dicom_mask_annotation_ndjson
-)
-dicom_mask_annotation_ndjson_with_global_key["dataRow"] = {
- "globalKey": "test-global-key"
-}
-"""
-Tests
-"""
-
-labels = [
- polyline_label,
- polyline_with_global_key,
- dicom_mask_label,
- dicom_mask_label_with_global_key,
- video_mask_label,
- video_mask_label_with_global_key,
-]
-ndjsons = [
- polyline_annotation_ndjson,
- polyline_annotation_ndjson_with_global_key,
- dicom_mask_annotation_ndjson,
- dicom_mask_annotation_ndjson_with_global_key,
- video_mask_annotation_ndjson,
- video_mask_annotation_ndjson_with_global_key,
-]
-labels_ndjsons = list(zip(labels, ndjsons))
-
-
-def test_deserialize_nd_dicom_segments():
- nd_dicom_segments = NDDicomSegments(**polyline_annotation_ndjson)
- assert isinstance(nd_dicom_segments, NDDicomSegments)
- assert isinstance(nd_dicom_segments.segments[0], NDDicomSegment)
- assert isinstance(nd_dicom_segments.segments[0].keyframes[0], NDDicomLine)
-
-
-@pytest.mark.parametrize("label, ndjson", labels_ndjsons)
-def test_serialize_label(label, ndjson):
- serialized_label = next(NDJsonConverter().serialize([label]))
- if "uuid" in serialized_label:
- serialized_label.pop("uuid")
- assert serialized_label == ndjson