From 8a6b885407d5058cf4ef7a3601fac024639b4edf Mon Sep 17 00:00:00 2001
From: Gabefire <33893811+Gabefire@users.noreply.github.com>
Date: Tue, 16 Jul 2024 16:33:26 -0500
Subject: [PATCH 1/3] added correct data param
---
.../prediction_upload/video_predictions.ipynb | 1376 +++++++++++++++--
1 file changed, 1248 insertions(+), 128 deletions(-)
diff --git a/examples/prediction_upload/video_predictions.ipynb b/examples/prediction_upload/video_predictions.ipynb
index 63fe579bd..5b8ed4330 100644
--- a/examples/prediction_upload/video_predictions.ipynb
+++ b/examples/prediction_upload/video_predictions.ipynb
@@ -1,18 +1,16 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {},
"cells": [
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
- "
",
- " ",
+ " | \n",
+ " \n",
" | \n"
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"\n",
@@ -24,10 +22,10 @@
" \n",
" | "
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"# Video Prediction Import \n",
@@ -49,328 +47,1450 @@
"- Raster segmentation masks [not supported in model]\n",
"- Vector segmentation masks [not supported in video editor]\n",
"\n"
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "%pip install -q \"labelbox[data]\"",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "%pip install -q \"labelbox[data]\""
+ ]
},
{
- "metadata": {},
- "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "import labelbox as lb\n",
+ "import labelbox.types as lb_types\n",
+ "import uuid"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Replace with your API Key \n",
"Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "API_KEY = \"\"\nclient = lb.Client(API_KEY)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "API_KEY = \"\"\n",
+ "client = lb.Client(API_KEY)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Supported Predictions\n",
"- Confidence scores are currently not supported for segment or frame annotations, which are required for bounding box, point, and line for video assets. For this tutorial, only the radio and checklist annotations will have confidence scores."
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "####### Bounding box (frame specific) ###########\n\n# Confidence scores are not supported for frame specific bounding box annotations and VideoObjectAnnotation\n\n# bbox dimensions\nbbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n\n# Python Annotation\nbbox_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"],\n y=bbox_dm[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=15,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=19,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n]\n\n# NDJSON\nbbox_prediction_ndjson = {\n \"name\":\n \"bbox_video\",\n \"segments\": [{\n \"keyframes\": [\n {\n \"frame\": 13,\n \"bbox\": bbox_dm\n },\n {\n \"frame\": 15,\n \"bbox\": bbox_dm\n },\n {\n \"frame\": 19,\n \"bbox\": bbox_dm\n },\n ]\n }],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "####### Bounding box (frame specific) ###########\n",
+ "\n",
+ "# Confidence scores are not supported for frame specific bounding box annotations and VideoObjectAnnotation\n",
+ "\n",
+ "# bbox dimensions\n",
+ "bbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n",
+ "\n",
+ "# Python Annotation\n",
+ "bbox_prediction = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=13,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"],\n",
+ " y=bbox_dm[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=15,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=19,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "bbox_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"bbox_video\",\n",
+ " \"segments\": [{\n",
+ " \"keyframes\": [\n",
+ " {\n",
+ " \"frame\": 13,\n",
+ " \"bbox\": bbox_dm\n",
+ " },\n",
+ " {\n",
+ " \"frame\": 15,\n",
+ " \"bbox\": bbox_dm\n",
+ " },\n",
+ " {\n",
+ " \"frame\": 19,\n",
+ " \"bbox\": bbox_dm\n",
+ " },\n",
+ " ]\n",
+ " }],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "######## Point ########\n# Confidence score is not supported for VideoObjectAnnotation\n# Python Annotation\npoint_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"point_video\",\n keyframe=True,\n frame=17,\n value=lb_types.Point(x=660.134, y=407.926),\n )\n]\n\n# NDJSON\npoint_prediction_ndjson = {\n \"name\":\n \"point_video\",\n \"confidence\":\n 0.5,\n \"segments\": [{\n \"keyframes\": [{\n \"frame\": 17,\n \"point\": {\n \"x\": 660.134,\n \"y\": 407.926\n }\n }]\n }],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "######## Point ########\n",
+ "# Confidence score is not supported for VideoObjectAnnotation\n",
+ "# Python Annotation\n",
+ "point_prediction = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"point_video\",\n",
+ " keyframe=True,\n",
+ " frame=17,\n",
+ " value=lb_types.Point(x=660.134, y=407.926),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "point_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"point_video\",\n",
+ " \"confidence\":\n",
+ " 0.5,\n",
+ " \"segments\": [{\n",
+ " \"keyframes\": [{\n",
+ " \"frame\": 17,\n",
+ " \"point\": {\n",
+ " \"x\": 660.134,\n",
+ " \"y\": 407.926\n",
+ " }\n",
+ " }]\n",
+ " }],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "######## Polyline (frame specific) ########\n# confidence scores are not supported in polyline annotations\n\n# Python Annotation\npolyline_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=5,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=12,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=20,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=24,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=45,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n]\n\n# NDJSON\npolyline_prediction_ndjson = {\n \"name\":\n \"line_video_frame\",\n \"segments\": [\n {\n \"keyframes\": [\n {\n \"frame\":\n 5,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 100\n },\n {\n \"x\": 100,\n \"y\": 190\n },\n {\n \"x\": 190,\n \"y\": 220\n },\n ],\n },\n {\n \"frame\":\n 12,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 280\n },\n {\n \"x\": 300,\n \"y\": 380\n },\n {\n \"x\": 400,\n \"y\": 460\n },\n ],\n },\n {\n \"frame\":\n 20,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 180\n },\n {\n \"x\": 100,\n \"y\": 200\n },\n {\n \"x\": 200,\n \"y\": 260\n },\n ],\n },\n ]\n },\n {\n \"keyframes\": [\n {\n \"frame\": 24,\n \"line\": [{\n \"x\": 300,\n \"y\": 310\n }, {\n \"x\": 330,\n \"y\": 430\n }],\n },\n {\n \"frame\": 45,\n \"line\": [{\n \"x\": 600,\n \"y\": 810\n }, {\n \"x\": 900,\n \"y\": 930\n }],\n },\n ]\n },\n ],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "######## Polyline (frame specific) ########\n",
+ "# confidence scores are not supported in polyline annotations\n",
+ "\n",
+ "# Python Annotation\n",
+ "polyline_prediction = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=5,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=12,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=20,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=24,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=45,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "polyline_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"line_video_frame\",\n",
+ " \"segments\": [\n",
+ " {\n",
+ " \"keyframes\": [\n",
+ " {\n",
+ " \"frame\":\n",
+ " 5,\n",
+ " \"line\": [\n",
+ " {\n",
+ " \"x\": 680,\n",
+ " \"y\": 100\n",
+ " },\n",
+ " {\n",
+ " \"x\": 100,\n",
+ " \"y\": 190\n",
+ " },\n",
+ " {\n",
+ " \"x\": 190,\n",
+ " \"y\": 220\n",
+ " },\n",
+ " ],\n",
+ " },\n",
+ " {\n",
+ " \"frame\":\n",
+ " 12,\n",
+ " \"line\": [\n",
+ " {\n",
+ " \"x\": 680,\n",
+ " \"y\": 280\n",
+ " },\n",
+ " {\n",
+ " \"x\": 300,\n",
+ " \"y\": 380\n",
+ " },\n",
+ " {\n",
+ " \"x\": 400,\n",
+ " \"y\": 460\n",
+ " },\n",
+ " ],\n",
+ " },\n",
+ " {\n",
+ " \"frame\":\n",
+ " 20,\n",
+ " \"line\": [\n",
+ " {\n",
+ " \"x\": 680,\n",
+ " \"y\": 180\n",
+ " },\n",
+ " {\n",
+ " \"x\": 100,\n",
+ " \"y\": 200\n",
+ " },\n",
+ " {\n",
+ " \"x\": 200,\n",
+ " \"y\": 260\n",
+ " },\n",
+ " ],\n",
+ " },\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"keyframes\": [\n",
+ " {\n",
+ " \"frame\": 24,\n",
+ " \"line\": [{\n",
+ " \"x\": 300,\n",
+ " \"y\": 310\n",
+ " }, {\n",
+ " \"x\": 330,\n",
+ " \"y\": 430\n",
+ " }],\n",
+ " },\n",
+ " {\n",
+ " \"frame\": 45,\n",
+ " \"line\": [{\n",
+ " \"x\": 600,\n",
+ " \"y\": 810\n",
+ " }, {\n",
+ " \"x\": 900,\n",
+ " \"y\": 930\n",
+ " }],\n",
+ " },\n",
+ " ]\n",
+ " },\n",
+ " ],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "######## Frame base classifications ########\n\n# Python Annotation\nradio_prediction = [\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=9,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=15,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n ),\n]\n\nchecklist_prediction = [\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=29,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=35,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=39,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=45,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n]\n\n## NDJSON\nframe_radio_classification_prediction_ndjson = {\n \"name\": \"radio_class\",\n \"answer\": {\n \"name\": \"first_radio_answer\",\n \"frames\": [{\n \"start\": 9,\n \"end\": 15\n }],\n },\n}\n\n## frame specific\nframe_checklist_classification_prediction_ndjson = {\n \"name\":\n \"checklist_class\",\n \"answer\": [\n {\n \"name\": \"first_checklist_answer\",\n \"frames\": [{\n \"start\": 29,\n \"end\": 35\n }],\n },\n {\n \"name\": \"second_checklist_answer\",\n \"frames\": [{\n \"start\": 39,\n \"end\": 45\n }],\n },\n ],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "######## Frame base classifications ########\n",
+ "\n",
+ "# Python Annotation\n",
+ "radio_prediction = [\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"radio_class\",\n",
+ " frame=9,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\", confidence=0.5)),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"radio_class\",\n",
+ " frame=15,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\", confidence=0.5)),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "checklist_prediction = [\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=29,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
+ " confidence=0.5)\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=35,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
+ " confidence=0.5)\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=39,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
+ " confidence=0.5)\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=45,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
+ " confidence=0.5)\n",
+ " ]),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "## NDJSON\n",
+ "frame_radio_classification_prediction_ndjson = {\n",
+ " \"name\": \"radio_class\",\n",
+ " \"answer\": {\n",
+ " \"name\": \"first_radio_answer\",\n",
+ " \"frames\": [{\n",
+ " \"start\": 9,\n",
+ " \"end\": 15\n",
+ " }],\n",
+ " },\n",
+ "}\n",
+ "\n",
+ "## frame specific\n",
+ "frame_checklist_classification_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"checklist_class\",\n",
+ " \"answer\": [\n",
+ " {\n",
+ " \"name\": \"first_checklist_answer\",\n",
+ " \"frames\": [{\n",
+ " \"start\": 29,\n",
+ " \"end\": 35\n",
+ " }],\n",
+ " },\n",
+ " {\n",
+ " \"name\": \"second_checklist_answer\",\n",
+ " \"frames\": [{\n",
+ " \"start\": 39,\n",
+ " \"end\": 45\n",
+ " }],\n",
+ " },\n",
+ " ],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "####### Global Classifications #########\n\n# Python Annotation\n## For global classifications use ClassificationAnnotation\nglobal_radio_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"radio_class_global\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n )\n]\n\nglobal_checklist_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"checklist_class_global\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5),\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5),\n ]),\n )\n]\n\n# NDJSON\nglobal_radio_classification_ndjson = {\n \"name\": \"radio_class_global\",\n \"answer\": {\n \"name\": \"first_radio_answer\",\n \"confidence\": 0.5\n },\n}\n\nglobal_checklist_classification_ndjson = {\n \"name\":\n \"checklist_class_global\",\n \"answer\": [\n {\n \"name\": \"first_checklist_answer\",\n \"confidence\": 0.5\n },\n {\n \"name\": \"second_checklist_answer\",\n \"confidence\": 0.5\n },\n ],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "####### Global Classifications #########\n",
+ "\n",
+ "# Python Annotation\n",
+ "## For global classifications use ClassificationAnnotation\n",
+ "global_radio_prediction = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"radio_class_global\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\", confidence=0.5)),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "global_checklist_prediction = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class_global\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
+ " confidence=0.5),\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
+ " confidence=0.5),\n",
+ " ]),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "global_radio_classification_ndjson = {\n",
+ " \"name\": \"radio_class_global\",\n",
+ " \"answer\": {\n",
+ " \"name\": \"first_radio_answer\",\n",
+ " \"confidence\": 0.5\n",
+ " },\n",
+ "}\n",
+ "\n",
+ "global_checklist_classification_ndjson = {\n",
+ " \"name\":\n",
+ " \"checklist_class_global\",\n",
+ " \"answer\": [\n",
+ " {\n",
+ " \"name\": \"first_checklist_answer\",\n",
+ " \"confidence\": 0.5\n",
+ " },\n",
+ " {\n",
+ " \"name\": \"second_checklist_answer\",\n",
+ " \"confidence\": 0.5\n",
+ " },\n",
+ " ],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "########## Nested Global Classification ###########\n\n# Python Annotation\nnested_radio_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"nested_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\",\n confidence=0.5,\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_sub_radio_answer\", confidence=0.5)),\n )\n ],\n )),\n )\n]\n\n# NDJSON\nnested_radio_prediction_ndjson = {\n \"name\": \"nested_radio_question\",\n \"answer\": {\n \"name\":\n \"first_radio_answer\",\n \"confidence\":\n 0.5,\n \"classifications\": [{\n \"name\": \"sub_radio_question\",\n \"answer\": {\n \"name\": \"first_sub_radio_answer\",\n \"confidence\": 0.5\n },\n }],\n },\n}\n\n# Python Annotation\nnested_checklist_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"nested_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_checklist_answer\",\n confidence=0.5,\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_sub_checklist_answer\",\n confidence=0.5,\n )\n ]),\n )\n ],\n )\n ]),\n )\n]\n\n# NDJSON\nnested_checklist_prediction_ndjson = {\n \"name\":\n \"nested_checklist_question\",\n \"answer\": [{\n \"name\":\n \"first_checklist_answer\",\n \"confidence\":\n 0.5,\n \"classifications\": [{\n \"name\": \"sub_checklist_question\",\n \"answer\": {\n \"name\": \"first_sub_checklist_answer\",\n \"confidence\": 0.5,\n },\n }],\n }],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "########## Nested Global Classification ###########\n",
+ "\n",
+ "# Python Annotation\n",
+ "nested_radio_prediction = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"nested_radio_question\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\",\n",
+ " confidence=0.5,\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"sub_radio_question\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_sub_radio_answer\", confidence=0.5)),\n",
+ " )\n",
+ " ],\n",
+ " )),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "nested_radio_prediction_ndjson = {\n",
+ " \"name\": \"nested_radio_question\",\n",
+ " \"answer\": {\n",
+ " \"name\":\n",
+ " \"first_radio_answer\",\n",
+ " \"confidence\":\n",
+ " 0.5,\n",
+ " \"classifications\": [{\n",
+ " \"name\": \"sub_radio_question\",\n",
+ " \"answer\": {\n",
+ " \"name\": \"first_sub_radio_answer\",\n",
+ " \"confidence\": 0.5\n",
+ " },\n",
+ " }],\n",
+ " },\n",
+ "}\n",
+ "\n",
+ "# Python Annotation\n",
+ "nested_checklist_prediction = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"nested_checklist_question\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"first_checklist_answer\",\n",
+ " confidence=0.5,\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"sub_checklist_question\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"first_sub_checklist_answer\",\n",
+ " confidence=0.5,\n",
+ " )\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " )\n",
+ " ]),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "# NDJSON\n",
+ "nested_checklist_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"nested_checklist_question\",\n",
+ " \"answer\": [{\n",
+ " \"name\":\n",
+ " \"first_checklist_answer\",\n",
+ " \"confidence\":\n",
+ " 0.5,\n",
+ " \"classifications\": [{\n",
+ " \"name\": \"sub_checklist_question\",\n",
+ " \"answer\": {\n",
+ " \"name\": \"first_sub_checklist_answer\",\n",
+ " \"confidence\": 0.5,\n",
+ " },\n",
+ " }],\n",
+ " }],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "########## Classifications under frame base tools ##########\n# Confidence scores are not supported for frame specific bounding box annotations with sub-classifications\n\n# bounding box dimensions\nbbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n\n# Python Annotation\nframe_bbox_with_checklist_subclass_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=10,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=11,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ),\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n )\n ],\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ),\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"second_checklist_answer\", confidence=0.5)\n ]),\n )\n ],\n ),\n]\n\nframe_bbox_with_checklist_subclass_prediction_ndjson = {\n \"name\":\n \"bbox_class\",\n \"segments\": [{\n \"keyframes\": [\n {\n \"frame\": 10,\n \"bbox\": bbox_dm2\n },\n {\n \"frame\":\n 11,\n \"bbox\":\n bbox_dm2,\n \"classifications\": [{\n \"name\":\n \"bbox_radio\",\n \"answer\": [{\n \"name\": \"first_checklist_answer\",\n \"confidence\": 0.5,\n }],\n }],\n },\n {\n \"frame\":\n 13,\n \"bbox\":\n bbox_dm2,\n \"classifications\": [{\n \"name\":\n \"bbox_radio\",\n \"answer\": [{\n \"name\": \"second_checklist_answer\",\n \"confidence\": 0.5,\n }],\n }],\n },\n ]\n }],\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "########## Classifications under frame base tools ##########\n",
+ "# Confidence scores are not supported for frame specific bounding box annotations with sub-classifications\n",
+ "\n",
+ "# bounding box dimensions\n",
+ "bbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n",
+ "\n",
+ "# Python Annotation\n",
+ "frame_bbox_with_checklist_subclass_prediction = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=10,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
+ " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=11,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
+ " confidence=0.5)\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=13,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"second_checklist_answer\", confidence=0.5)\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "frame_bbox_with_checklist_subclass_prediction_ndjson = {\n",
+ " \"name\":\n",
+ " \"bbox_class\",\n",
+ " \"segments\": [{\n",
+ " \"keyframes\": [\n",
+ " {\n",
+ " \"frame\": 10,\n",
+ " \"bbox\": bbox_dm2\n",
+ " },\n",
+ " {\n",
+ " \"frame\":\n",
+ " 11,\n",
+ " \"bbox\":\n",
+ " bbox_dm2,\n",
+ " \"classifications\": [{\n",
+ " \"name\":\n",
+ " \"bbox_radio\",\n",
+ " \"answer\": [{\n",
+ " \"name\": \"first_checklist_answer\",\n",
+ " \"confidence\": 0.5,\n",
+ " }],\n",
+ " }],\n",
+ " },\n",
+ " {\n",
+ " \"frame\":\n",
+ " 13,\n",
+ " \"bbox\":\n",
+ " bbox_dm2,\n",
+ " \"classifications\": [{\n",
+ " \"name\":\n",
+ " \"bbox_radio\",\n",
+ " \"answer\": [{\n",
+ " \"name\": \"second_checklist_answer\",\n",
+ " \"confidence\": 0.5,\n",
+ " }],\n",
+ " }],\n",
+ " },\n",
+ " ]\n",
+ " }],\n",
+ "}"
+ ]
},
{
- "metadata": {},
- "source": "######### Free text classification ###########\ntext_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"free_text\", # must match your ontology feature's name\n value=lb_types.Text(answer=\"sample text\", confidence=0.5),\n )\n]\n\ntext_prediction_ndjson = {\n \"name\": \"free_text\",\n \"confidence\": 0.5,\n \"answer\": \"sample text\",\n}",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "######### Free text classification ###########\n",
+ "text_prediction = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"free_text\", # must match your ontology feature's name\n",
+ " value=lb_types.Text(answer=\"sample text\", confidence=0.5),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "text_prediction_ndjson = {\n",
+ " \"name\": \"free_text\",\n",
+ " \"confidence\": 0.5,\n",
+ " \"answer\": \"sample text\",\n",
+ "}"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 1: Import data rows into Catalog"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# send a sample image as batch to the project\nglobal_key = \"sample-video-2.mp4\" + str(uuid.uuid4())\ntest_img_url = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\",\n \"global_key\":\n global_key,\n}\ndataset = client.create_dataset(\n name=\"Video prediction demo\",\n iam_integration=\n None, # Removing this argument will default to the organziation's default iam integration\n)\ntask = dataset.create_data_rows([test_img_url])\ntask.wait_till_done()\nprint(\"Errors: \", task.errors)\nprint(\"Failed data rows: \", task.failed_data_rows)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# send a sample image as batch to the project\n",
+ "global_key = \"sample-video-2.mp4\" + str(uuid.uuid4())\n",
+ "test_img_url = {\n",
+ " \"row_data\":\n",
+ " \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\",\n",
+ " \"global_key\":\n",
+ " global_key,\n",
+ "}\n",
+ "dataset = client.create_dataset(\n",
+ " name=\"Video prediction demo\",\n",
+ " iam_integration=\n",
+ " None, # Removing this argument will default to the organziation's default iam integration\n",
+ ")\n",
+ "task = dataset.create_data_rows([test_img_url])\n",
+ "task.wait_till_done()\n",
+ "print(\"Errors: \", task.errors)\n",
+ "print(\"Failed data rows: \", task.failed_data_rows)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 2: Create/select an Ontology for your model predictions\n",
"Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the name/instructions fields in your annotations to ensure the correct feature schemas are matched.\n"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "ontology_builder = lb.OntologyBuilder(\n tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"bbox_video\"),\n lb.Tool(tool=lb.Tool.Type.POINT, name=\"point_video\"),\n lb.Tool(tool=lb.Tool.Type.LINE, name=\"line_video_frame\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"video_mask\"),\n lb.Tool(\n tool=lb.Tool.Type.BBOX,\n name=\"bbox_class\",\n classifications=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class\",\n scope=lb.Classification.Scope.\n INDEX, ## defined scope for frame classifications\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n )\n ],\n ),\n ],\n classifications=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class\",\n scope=lb.Classification.Scope.\n INDEX, ## defined scope for frame classifications\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"radio_class\",\n scope=lb.Classification.Scope.INDEX,\n options=[\n lb.Option(value=\"first_radio_answer\"),\n lb.Option(value=\"second_radio_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"nested_radio_question\",\n options=[\n lb.Option(\n \"first_radio_answer\",\n options=[\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"sub_radio_question\",\n options=[lb.Option(\"first_sub_radio_answer\")],\n )\n ],\n )\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"nested_checklist_question\",\n options=[\n lb.Option(\n \"first_checklist_answer\",\n options=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"sub_checklist_question\",\n options=[lb.Option(\"first_sub_checklist_answer\")],\n )\n ],\n )\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"radio_class_global\",\n options=[\n lb.Option(value=\"first_radio_answer\"),\n lb.Option(value=\"second_radio_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class_global\",\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n ),\n lb.Classification(class_type=lb.Classification.Type.TEXT,\n name=\"free_text\"),\n ],\n)\n\nontology = client.create_ontology(\n \"Ontology Video Annotations\",\n ontology_builder.asdict(),\n media_type=lb.MediaType.Video,\n)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "ontology_builder = lb.OntologyBuilder(\n",
+ " tools=[\n",
+ " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"bbox_video\"),\n",
+ " lb.Tool(tool=lb.Tool.Type.POINT, name=\"point_video\"),\n",
+ " lb.Tool(tool=lb.Tool.Type.LINE, name=\"line_video_frame\"),\n",
+ " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"video_mask\"),\n",
+ " lb.Tool(\n",
+ " tool=lb.Tool.Type.BBOX,\n",
+ " name=\"bbox_class\",\n",
+ " classifications=[\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.CHECKLIST,\n",
+ " name=\"checklist_class\",\n",
+ " scope=lb.Classification.Scope.\n",
+ " INDEX, ## defined scope for frame classifications\n",
+ " options=[\n",
+ " lb.Option(value=\"first_checklist_answer\"),\n",
+ " lb.Option(value=\"second_checklist_answer\"),\n",
+ " ],\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ " ],\n",
+ " classifications=[\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.CHECKLIST,\n",
+ " name=\"checklist_class\",\n",
+ " scope=lb.Classification.Scope.\n",
+ " INDEX, ## defined scope for frame classifications\n",
+ " options=[\n",
+ " lb.Option(value=\"first_checklist_answer\"),\n",
+ " lb.Option(value=\"second_checklist_answer\"),\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.RADIO,\n",
+ " name=\"radio_class\",\n",
+ " scope=lb.Classification.Scope.INDEX,\n",
+ " options=[\n",
+ " lb.Option(value=\"first_radio_answer\"),\n",
+ " lb.Option(value=\"second_radio_answer\"),\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.RADIO,\n",
+ " name=\"nested_radio_question\",\n",
+ " options=[\n",
+ " lb.Option(\n",
+ " \"first_radio_answer\",\n",
+ " options=[\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.RADIO,\n",
+ " name=\"sub_radio_question\",\n",
+ " options=[lb.Option(\"first_sub_radio_answer\")],\n",
+ " )\n",
+ " ],\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.CHECKLIST,\n",
+ " name=\"nested_checklist_question\",\n",
+ " options=[\n",
+ " lb.Option(\n",
+ " \"first_checklist_answer\",\n",
+ " options=[\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.CHECKLIST,\n",
+ " name=\"sub_checklist_question\",\n",
+ " options=[lb.Option(\"first_sub_checklist_answer\")],\n",
+ " )\n",
+ " ],\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.RADIO,\n",
+ " name=\"radio_class_global\",\n",
+ " options=[\n",
+ " lb.Option(value=\"first_radio_answer\"),\n",
+ " lb.Option(value=\"second_radio_answer\"),\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(\n",
+ " class_type=lb.Classification.Type.CHECKLIST,\n",
+ " name=\"checklist_class_global\",\n",
+ " options=[\n",
+ " lb.Option(value=\"first_checklist_answer\"),\n",
+ " lb.Option(value=\"second_checklist_answer\"),\n",
+ " ],\n",
+ " ),\n",
+ " lb.Classification(class_type=lb.Classification.Type.TEXT,\n",
+ " name=\"free_text\"),\n",
+ " ],\n",
+ ")\n",
+ "\n",
+ "ontology = client.create_ontology(\n",
+ " \"Ontology Video Annotations\",\n",
+ " ontology_builder.asdict(),\n",
+ " media_type=lb.MediaType.Video,\n",
+ ")"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 3: Create a Model and Model Run"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# create Model\nmodel = client.create_model(name=\"video_model_run_\" + str(uuid.uuid4()),\n ontology_id=ontology.uid)\n# create Model Run\nmodel_run = model.create_model_run(\"iteration 1\")",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# create Model\n",
+ "model = client.create_model(name=\"video_model_run_\" + str(uuid.uuid4()),\n",
+ " ontology_id=ontology.uid)\n",
+ "# create Model Run\n",
+ "model_run = model.create_model_run(\"iteration 1\")"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 4: Send data rows to the Model Run"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "model_run.upsert_data_rows(global_keys=[global_key])",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "model_run.upsert_data_rows(global_keys=[global_key])"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 5. Create the predictions payload\n",
"\n",
"Create the annotations payload using the snippets of [code here](https://docs.labelbox.com/reference/import-video-annotations).\n",
"\n"
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"#### Python Annotation Types"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "label_predictions = []\nannotations_list = [\n point_prediction,\n bbox_prediction,\n polyline_prediction,\n checklist_prediction,\n radio_prediction,\n nested_radio_prediction,\n nested_checklist_prediction,\n frame_bbox_with_checklist_subclass_prediction,\n global_radio_prediction,\n global_checklist_prediction,\n text_prediction,\n]\n\nflatten_list_annotations = [\n ann for ann_sublist in annotations_list for ann in ann_sublist\n]\n\nlabel_predictions.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=flatten_list_annotations))",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "label_predictions = []\n",
+ "annotations_list = [\n",
+ " point_prediction,\n",
+ " bbox_prediction,\n",
+ " polyline_prediction,\n",
+ " checklist_prediction,\n",
+ " radio_prediction,\n",
+ " nested_radio_prediction,\n",
+ " nested_checklist_prediction,\n",
+ " frame_bbox_with_checklist_subclass_prediction,\n",
+ " global_radio_prediction,\n",
+ " global_checklist_prediction,\n",
+ " text_prediction,\n",
+ "]\n",
+ "\n",
+ "flatten_list_annotations = [\n",
+ " ann for ann_sublist in annotations_list for ann in ann_sublist\n",
+ "]\n",
+ "\n",
+ "label_predictions.append(\n",
+ " lb_types.Label(data={\"global_key\": global_key},\n",
+ " annotations=flatten_list_annotations))"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"#### NDJSON annotations"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\nlabel_prediction_ndjson = []\n\nfor annotation in [\n point_prediction_ndjson,\n bbox_prediction_ndjson,\n polyline_prediction_ndjson,\n frame_checklist_classification_prediction_ndjson,\n frame_radio_classification_prediction_ndjson,\n nested_radio_prediction_ndjson,\n nested_checklist_prediction_ndjson,\n frame_bbox_with_checklist_subclass_prediction_ndjson,\n global_radio_classification_ndjson,\n global_checklist_classification_ndjson,\n text_prediction_ndjson,\n]:\n annotation.update({\"dataRow\": {\"globalKey\": global_key}})\n label_prediction_ndjson.append(annotation)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n",
+ "label_prediction_ndjson = []\n",
+ "\n",
+ "for annotation in [\n",
+ " point_prediction_ndjson,\n",
+ " bbox_prediction_ndjson,\n",
+ " polyline_prediction_ndjson,\n",
+ " frame_checklist_classification_prediction_ndjson,\n",
+ " frame_radio_classification_prediction_ndjson,\n",
+ " nested_radio_prediction_ndjson,\n",
+ " nested_checklist_prediction_ndjson,\n",
+ " frame_bbox_with_checklist_subclass_prediction_ndjson,\n",
+ " global_radio_classification_ndjson,\n",
+ " global_checklist_classification_ndjson,\n",
+ " text_prediction_ndjson,\n",
+ "]:\n",
+ " annotation.update({\"dataRow\": {\"globalKey\": global_key}})\n",
+ " label_prediction_ndjson.append(annotation)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 6. Upload the predictions payload to the Model Run "
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# Upload the prediction label to the Model Run\nupload_job_prediction = model_run.add_predictions(\n name=\"prediction_upload_job\" + str(uuid.uuid4()),\n predictions=label_predictions,\n)\n\n# Errors will appear for annotation uploads that failed.\nprint(\"Errors:\", upload_job_prediction.errors)\nprint(\"Status of uploads: \", upload_job_prediction.statuses)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# Upload the prediction label to the Model Run\n",
+ "upload_job_prediction = model_run.add_predictions(\n",
+ " name=\"prediction_upload_job\" + str(uuid.uuid4()),\n",
+ " predictions=label_predictions,\n",
+ ")\n",
+ "\n",
+ "# Errors will appear for annotation uploads that failed.\n",
+ "print(\"Errors:\", upload_job_prediction.errors)\n",
+ "print(\"Status of uploads: \", upload_job_prediction.statuses)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 7: Send annotations to the Model Run \n",
"To send annotations to a Model Run, we must first import them into a project, create a label payload and then send them to the Model Run."
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.1. Create a labelbox project"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# Create a Labelbox project\nproject = client.create_project(name=\"video_prediction_demo\",\n media_type=lb.MediaType.Video)\nproject.setup_editor(ontology)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# Create a Labelbox project\n",
+ "project = client.create_project(name=\"video_prediction_demo\",\n",
+ " media_type=lb.MediaType.Video)\n",
+ "project.setup_editor(ontology)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.2. Create a batch to send to the project "
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "project.create_batch(\n \"batch_video_prediction_demo\", # Each batch in a project must have a unique name\n global_keys=[global_key\n ], # A list of data rows, data row ids or global keys\n priority=5, # priority between 1(Highest) - 5(lowest)\n)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "project.create_batch(\n",
+ " \"batch_video_prediction_demo\", # Each batch in a project must have a unique name\n",
+ " global_keys=[global_key\n",
+ " ], # A list of data rows, data row ids or global keys\n",
+ " priority=5, # priority between 1(Highest) - 5(lowest)\n",
+ ")"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.3 Create the annotations payload"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# Python Annotation\npoint_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"point_video\",\n keyframe=True,\n frame=17,\n value=lb_types.Point(x=660.134, y=407.926),\n )\n]\n\n######## Polyline ########\n\n# Python Annotation\npolyline_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=5,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=12,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=20,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=24,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=45,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n]\n\nradio_annotation = [\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=9,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=15,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n ),\n]\n\nchecklist_annotation = [\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=29,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=35,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=39,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=45,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n ]),\n ),\n]\n\nglobal_radio_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"radio_class_global\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n )\n]\n\nglobal_checklist_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"checklist_class_global\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\"),\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\"),\n ]),\n )\n]\n\nnested_radio_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"nested_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_sub_radio_answer\")),\n )\n ],\n )),\n )\n]\n\nnested_checklist_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"nested_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_checklist_answer\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_sub_checklist_answer\")\n ]),\n )\n ],\n )\n ]),\n )\n]\n\nbbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\nframe_bbox_with_checklist_subclass = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=10,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=11,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n )\n ],\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"second_checklist_answer\")\n ]),\n )\n ],\n ),\n]\n\nbbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\nbbox_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"],\n y=bbox_dm[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=15,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=19,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n]\n\ntext_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"free_text\", # must match your ontology feature's name\n value=lb_types.Text(answer=\"sample text\"),\n )\n]",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# Python Annotation\n",
+ "point_annotation = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"point_video\",\n",
+ " keyframe=True,\n",
+ " frame=17,\n",
+ " value=lb_types.Point(x=660.134, y=407.926),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "######## Polyline ########\n",
+ "\n",
+ "# Python Annotation\n",
+ "polyline_annotation = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=5,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=12,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=20,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=24,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"line_video_frame\",\n",
+ " keyframe=True,\n",
+ " frame=45,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Line(\n",
+ " points=[lb_types.Point(x=680, y=100),\n",
+ " lb_types.Point(x=100, y=190)]),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "radio_annotation = [\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"radio_class\",\n",
+ " frame=9,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\")),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"radio_class\",\n",
+ " frame=15,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\")),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "checklist_annotation = [\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=29,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=35,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=39,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n",
+ " ]),\n",
+ " ),\n",
+ " lb_types.VideoClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " frame=45,\n",
+ " segment_index=1,\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n",
+ " ]),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "global_radio_annotation = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"radio_class_global\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\")),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "global_checklist_annotation = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class_global\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\"),\n",
+ " lb_types.ClassificationAnswer(name=\"second_checklist_answer\"),\n",
+ " ]),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "nested_radio_annotation = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"nested_radio_question\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_radio_answer\",\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"sub_radio_question\",\n",
+ " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
+ " name=\"first_sub_radio_answer\")),\n",
+ " )\n",
+ " ],\n",
+ " )),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "nested_checklist_annotation = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"nested_checklist_question\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"first_checklist_answer\",\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"sub_checklist_question\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"first_sub_checklist_answer\")\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " )\n",
+ " ]),\n",
+ " )\n",
+ "]\n",
+ "\n",
+ "bbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n",
+ "frame_bbox_with_checklist_subclass = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=10,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
+ " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=11,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
+ " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_class\",\n",
+ " keyframe=True,\n",
+ " frame=13,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
+ " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
+ " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " classifications=[\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"checklist_class\",\n",
+ " value=lb_types.Checklist(answer=[\n",
+ " lb_types.ClassificationAnswer(\n",
+ " name=\"second_checklist_answer\")\n",
+ " ]),\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "bbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n",
+ "bbox_annotation = [\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=13,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"],\n",
+ " y=bbox_dm[\"top\"]), # x = left, y = top\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ), # x= left + width , y = top + height\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=15,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " ),\n",
+ " lb_types.VideoObjectAnnotation(\n",
+ " name=\"bbox_video\",\n",
+ " keyframe=True,\n",
+ " frame=19,\n",
+ " segment_index=0,\n",
+ " value=lb_types.Rectangle(\n",
+ " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
+ " end=lb_types.Point(\n",
+ " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
+ " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
+ " ),\n",
+ " ),\n",
+ " ),\n",
+ "]\n",
+ "\n",
+ "text_annotation = [\n",
+ " lb_types.ClassificationAnnotation(\n",
+ " name=\"free_text\", # must match your ontology feature's name\n",
+ " value=lb_types.Text(answer=\"sample text\"),\n",
+ " )\n",
+ "]"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.4. Create the label object"
- ],
- "cell_type": "markdown"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
- "source": [],
- "cell_type": "markdown"
+ "source": []
},
{
- "metadata": {},
- "source": "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n\nlabels = []\nannotations_list = [\n checklist_annotation,\n radio_annotation,\n bbox_annotation,\n frame_bbox_with_checklist_subclass,\n point_annotation,\n polyline_annotation,\n global_checklist_annotation,\n global_radio_annotation,\n nested_checklist_annotation,\n nested_radio_annotation,\n text_annotation,\n]\n\nflatten_list_annotations = [\n ann for ann_sublist in annotations_list for ann in ann_sublist\n]\n\nlabels.append(\n lb_types.Label(\n data=lb_types.VideoData(global_key=global_key),\n annotations=flatten_list_annotations,\n ))",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n",
+ "\n",
+ "labels = []\n",
+ "annotations_list = [\n",
+ " checklist_annotation,\n",
+ " radio_annotation,\n",
+ " bbox_annotation,\n",
+ " frame_bbox_with_checklist_subclass,\n",
+ " point_annotation,\n",
+ " polyline_annotation,\n",
+ " global_checklist_annotation,\n",
+ " global_radio_annotation,\n",
+ " nested_checklist_annotation,\n",
+ " nested_radio_annotation,\n",
+ " text_annotation,\n",
+ "]\n",
+ "\n",
+ "flatten_list_annotations = [\n",
+ " ann for ann_sublist in annotations_list for ann in ann_sublist\n",
+ "]\n",
+ "\n",
+ "labels.append(\n",
+ " lb_types.Label(\n",
+ " data={\"global_key\":global_key},\n",
+ " annotations=flatten_list_annotations,\n",
+ " ))"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.5. Upload annotations to the project using Label Import"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "upload_job_annotation = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"video_annotations_import_\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nupload_job_annotation.wait_until_done()\n# Errors will appear for annotation uploads that failed.\nprint(\"Errors:\", upload_job_annotation.errors)\nprint(\"Status of uploads: \", upload_job_annotation.statuses)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "upload_job_annotation = lb.LabelImport.create_from_objects(\n",
+ " client=client,\n",
+ " project_id=project.uid,\n",
+ " name=\"video_annotations_import_\" + str(uuid.uuid4()),\n",
+ " labels=labels,\n",
+ ")\n",
+ "\n",
+ "upload_job_annotation.wait_until_done()\n",
+ "# Errors will appear for annotation uploads that failed.\n",
+ "print(\"Errors:\", upload_job_annotation.errors)\n",
+ "print(\"Status of uploads: \", upload_job_annotation.statuses)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.6. Send the annotations to the Model Run"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# get the labels id from the project\nmodel_run.upsert_labels(project_id=project.uid)",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# get the labels id from the project\n",
+ "model_run.upsert_labels(project_id=project.uid)"
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {},
"source": [
"## Optional deletions for cleanup \n"
- ],
- "cell_type": "markdown"
+ ]
},
{
- "metadata": {},
- "source": "# project.delete()\n# dataset.delete()",
"cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
- "execution_count": null
+ "source": [
+ "# project.delete()\n",
+ "# dataset.delete()"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
}
- ]
-}
\ No newline at end of file
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
From 641061189f606f3475cb1904afba90a8e2f281a3 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Tue, 16 Jul 2024 21:35:30 +0000
Subject: [PATCH 2/3] :art: Cleaned
---
.../prediction_upload/video_predictions.ipynb | 1376 ++---------------
1 file changed, 128 insertions(+), 1248 deletions(-)
diff --git a/examples/prediction_upload/video_predictions.ipynb b/examples/prediction_upload/video_predictions.ipynb
index 5b8ed4330..1157f4441 100644
--- a/examples/prediction_upload/video_predictions.ipynb
+++ b/examples/prediction_upload/video_predictions.ipynb
@@ -1,16 +1,18 @@
{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {},
"cells": [
{
- "cell_type": "markdown",
"metadata": {},
"source": [
- "\n",
- " \n",
+ " | ",
+ " ",
" | \n"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"\n",
@@ -22,10 +24,10 @@
" \n",
" | "
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"# Video Prediction Import \n",
@@ -47,1450 +49,328 @@
"- Raster segmentation masks [not supported in model]\n",
"- Vector segmentation masks [not supported in video editor]\n",
"\n"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Setup"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "%pip install -q \"labelbox[data]\"",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "%pip install -q \"labelbox[data]\""
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "import labelbox as lb\n",
- "import labelbox.types as lb_types\n",
- "import uuid"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Replace with your API Key \n",
"Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "API_KEY = \"\"\nclient = lb.Client(API_KEY)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "API_KEY = \"\"\n",
- "client = lb.Client(API_KEY)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Supported Predictions\n",
"- Confidence scores are currently not supported for segment or frame annotations, which are required for bounding box, point, and line for video assets. For this tutorial, only the radio and checklist annotations will have confidence scores."
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "####### Bounding box (frame specific) ###########\n\n# Confidence scores are not supported for frame specific bounding box annotations and VideoObjectAnnotation\n\n# bbox dimensions\nbbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n\n# Python Annotation\nbbox_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"],\n y=bbox_dm[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=15,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=19,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n]\n\n# NDJSON\nbbox_prediction_ndjson = {\n \"name\":\n \"bbox_video\",\n \"segments\": [{\n \"keyframes\": [\n {\n \"frame\": 13,\n \"bbox\": bbox_dm\n },\n {\n \"frame\": 15,\n \"bbox\": bbox_dm\n },\n {\n \"frame\": 19,\n \"bbox\": bbox_dm\n },\n ]\n }],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "####### Bounding box (frame specific) ###########\n",
- "\n",
- "# Confidence scores are not supported for frame specific bounding box annotations and VideoObjectAnnotation\n",
- "\n",
- "# bbox dimensions\n",
- "bbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n",
- "\n",
- "# Python Annotation\n",
- "bbox_prediction = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=13,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"],\n",
- " y=bbox_dm[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=15,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ),\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=19,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ),\n",
- " ),\n",
- " ),\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "bbox_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"bbox_video\",\n",
- " \"segments\": [{\n",
- " \"keyframes\": [\n",
- " {\n",
- " \"frame\": 13,\n",
- " \"bbox\": bbox_dm\n",
- " },\n",
- " {\n",
- " \"frame\": 15,\n",
- " \"bbox\": bbox_dm\n",
- " },\n",
- " {\n",
- " \"frame\": 19,\n",
- " \"bbox\": bbox_dm\n",
- " },\n",
- " ]\n",
- " }],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "######## Point ########\n# Confidence score is not supported for VideoObjectAnnotation\n# Python Annotation\npoint_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"point_video\",\n keyframe=True,\n frame=17,\n value=lb_types.Point(x=660.134, y=407.926),\n )\n]\n\n# NDJSON\npoint_prediction_ndjson = {\n \"name\":\n \"point_video\",\n \"confidence\":\n 0.5,\n \"segments\": [{\n \"keyframes\": [{\n \"frame\": 17,\n \"point\": {\n \"x\": 660.134,\n \"y\": 407.926\n }\n }]\n }],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "######## Point ########\n",
- "# Confidence score is not supported for VideoObjectAnnotation\n",
- "# Python Annotation\n",
- "point_prediction = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"point_video\",\n",
- " keyframe=True,\n",
- " frame=17,\n",
- " value=lb_types.Point(x=660.134, y=407.926),\n",
- " )\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "point_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"point_video\",\n",
- " \"confidence\":\n",
- " 0.5,\n",
- " \"segments\": [{\n",
- " \"keyframes\": [{\n",
- " \"frame\": 17,\n",
- " \"point\": {\n",
- " \"x\": 660.134,\n",
- " \"y\": 407.926\n",
- " }\n",
- " }]\n",
- " }],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "######## Polyline (frame specific) ########\n# confidence scores are not supported in polyline annotations\n\n# Python Annotation\npolyline_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=5,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=12,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=20,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=24,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=45,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n]\n\n# NDJSON\npolyline_prediction_ndjson = {\n \"name\":\n \"line_video_frame\",\n \"segments\": [\n {\n \"keyframes\": [\n {\n \"frame\":\n 5,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 100\n },\n {\n \"x\": 100,\n \"y\": 190\n },\n {\n \"x\": 190,\n \"y\": 220\n },\n ],\n },\n {\n \"frame\":\n 12,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 280\n },\n {\n \"x\": 300,\n \"y\": 380\n },\n {\n \"x\": 400,\n \"y\": 460\n },\n ],\n },\n {\n \"frame\":\n 20,\n \"line\": [\n {\n \"x\": 680,\n \"y\": 180\n },\n {\n \"x\": 100,\n \"y\": 200\n },\n {\n \"x\": 200,\n \"y\": 260\n },\n ],\n },\n ]\n },\n {\n \"keyframes\": [\n {\n \"frame\": 24,\n \"line\": [{\n \"x\": 300,\n \"y\": 310\n }, {\n \"x\": 330,\n \"y\": 430\n }],\n },\n {\n \"frame\": 45,\n \"line\": [{\n \"x\": 600,\n \"y\": 810\n }, {\n \"x\": 900,\n \"y\": 930\n }],\n },\n ]\n },\n ],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "######## Polyline (frame specific) ########\n",
- "# confidence scores are not supported in polyline annotations\n",
- "\n",
- "# Python Annotation\n",
- "polyline_prediction = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=5,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=12,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=20,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=24,\n",
- " segment_index=1,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=45,\n",
- " segment_index=1,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "polyline_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"line_video_frame\",\n",
- " \"segments\": [\n",
- " {\n",
- " \"keyframes\": [\n",
- " {\n",
- " \"frame\":\n",
- " 5,\n",
- " \"line\": [\n",
- " {\n",
- " \"x\": 680,\n",
- " \"y\": 100\n",
- " },\n",
- " {\n",
- " \"x\": 100,\n",
- " \"y\": 190\n",
- " },\n",
- " {\n",
- " \"x\": 190,\n",
- " \"y\": 220\n",
- " },\n",
- " ],\n",
- " },\n",
- " {\n",
- " \"frame\":\n",
- " 12,\n",
- " \"line\": [\n",
- " {\n",
- " \"x\": 680,\n",
- " \"y\": 280\n",
- " },\n",
- " {\n",
- " \"x\": 300,\n",
- " \"y\": 380\n",
- " },\n",
- " {\n",
- " \"x\": 400,\n",
- " \"y\": 460\n",
- " },\n",
- " ],\n",
- " },\n",
- " {\n",
- " \"frame\":\n",
- " 20,\n",
- " \"line\": [\n",
- " {\n",
- " \"x\": 680,\n",
- " \"y\": 180\n",
- " },\n",
- " {\n",
- " \"x\": 100,\n",
- " \"y\": 200\n",
- " },\n",
- " {\n",
- " \"x\": 200,\n",
- " \"y\": 260\n",
- " },\n",
- " ],\n",
- " },\n",
- " ]\n",
- " },\n",
- " {\n",
- " \"keyframes\": [\n",
- " {\n",
- " \"frame\": 24,\n",
- " \"line\": [{\n",
- " \"x\": 300,\n",
- " \"y\": 310\n",
- " }, {\n",
- " \"x\": 330,\n",
- " \"y\": 430\n",
- " }],\n",
- " },\n",
- " {\n",
- " \"frame\": 45,\n",
- " \"line\": [{\n",
- " \"x\": 600,\n",
- " \"y\": 810\n",
- " }, {\n",
- " \"x\": 900,\n",
- " \"y\": 930\n",
- " }],\n",
- " },\n",
- " ]\n",
- " },\n",
- " ],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "######## Frame base classifications ########\n\n# Python Annotation\nradio_prediction = [\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=9,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=15,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n ),\n]\n\nchecklist_prediction = [\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=29,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=35,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=39,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=45,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5)\n ]),\n ),\n]\n\n## NDJSON\nframe_radio_classification_prediction_ndjson = {\n \"name\": \"radio_class\",\n \"answer\": {\n \"name\": \"first_radio_answer\",\n \"frames\": [{\n \"start\": 9,\n \"end\": 15\n }],\n },\n}\n\n## frame specific\nframe_checklist_classification_prediction_ndjson = {\n \"name\":\n \"checklist_class\",\n \"answer\": [\n {\n \"name\": \"first_checklist_answer\",\n \"frames\": [{\n \"start\": 29,\n \"end\": 35\n }],\n },\n {\n \"name\": \"second_checklist_answer\",\n \"frames\": [{\n \"start\": 39,\n \"end\": 45\n }],\n },\n ],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "######## Frame base classifications ########\n",
- "\n",
- "# Python Annotation\n",
- "radio_prediction = [\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"radio_class\",\n",
- " frame=9,\n",
- " segment_index=0,\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\", confidence=0.5)),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"radio_class\",\n",
- " frame=15,\n",
- " segment_index=0,\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\", confidence=0.5)),\n",
- " ),\n",
- "]\n",
- "\n",
- "checklist_prediction = [\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=29,\n",
- " segment_index=0,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
- " confidence=0.5)\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=35,\n",
- " segment_index=0,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
- " confidence=0.5)\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=39,\n",
- " segment_index=1,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
- " confidence=0.5)\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=45,\n",
- " segment_index=1,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
- " confidence=0.5)\n",
- " ]),\n",
- " ),\n",
- "]\n",
- "\n",
- "## NDJSON\n",
- "frame_radio_classification_prediction_ndjson = {\n",
- " \"name\": \"radio_class\",\n",
- " \"answer\": {\n",
- " \"name\": \"first_radio_answer\",\n",
- " \"frames\": [{\n",
- " \"start\": 9,\n",
- " \"end\": 15\n",
- " }],\n",
- " },\n",
- "}\n",
- "\n",
- "## frame specific\n",
- "frame_checklist_classification_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"checklist_class\",\n",
- " \"answer\": [\n",
- " {\n",
- " \"name\": \"first_checklist_answer\",\n",
- " \"frames\": [{\n",
- " \"start\": 29,\n",
- " \"end\": 35\n",
- " }],\n",
- " },\n",
- " {\n",
- " \"name\": \"second_checklist_answer\",\n",
- " \"frames\": [{\n",
- " \"start\": 39,\n",
- " \"end\": 45\n",
- " }],\n",
- " },\n",
- " ],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "####### Global Classifications #########\n\n# Python Annotation\n## For global classifications use ClassificationAnnotation\nglobal_radio_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"radio_class_global\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\", confidence=0.5)),\n )\n]\n\nglobal_checklist_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"checklist_class_global\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5),\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n confidence=0.5),\n ]),\n )\n]\n\n# NDJSON\nglobal_radio_classification_ndjson = {\n \"name\": \"radio_class_global\",\n \"answer\": {\n \"name\": \"first_radio_answer\",\n \"confidence\": 0.5\n },\n}\n\nglobal_checklist_classification_ndjson = {\n \"name\":\n \"checklist_class_global\",\n \"answer\": [\n {\n \"name\": \"first_checklist_answer\",\n \"confidence\": 0.5\n },\n {\n \"name\": \"second_checklist_answer\",\n \"confidence\": 0.5\n },\n ],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "####### Global Classifications #########\n",
- "\n",
- "# Python Annotation\n",
- "## For global classifications use ClassificationAnnotation\n",
- "global_radio_prediction = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"radio_class_global\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\", confidence=0.5)),\n",
- " )\n",
- "]\n",
- "\n",
- "global_checklist_prediction = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class_global\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
- " confidence=0.5),\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\",\n",
- " confidence=0.5),\n",
- " ]),\n",
- " )\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "global_radio_classification_ndjson = {\n",
- " \"name\": \"radio_class_global\",\n",
- " \"answer\": {\n",
- " \"name\": \"first_radio_answer\",\n",
- " \"confidence\": 0.5\n",
- " },\n",
- "}\n",
- "\n",
- "global_checklist_classification_ndjson = {\n",
- " \"name\":\n",
- " \"checklist_class_global\",\n",
- " \"answer\": [\n",
- " {\n",
- " \"name\": \"first_checklist_answer\",\n",
- " \"confidence\": 0.5\n",
- " },\n",
- " {\n",
- " \"name\": \"second_checklist_answer\",\n",
- " \"confidence\": 0.5\n",
- " },\n",
- " ],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "########## Nested Global Classification ###########\n\n# Python Annotation\nnested_radio_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"nested_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\",\n confidence=0.5,\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_sub_radio_answer\", confidence=0.5)),\n )\n ],\n )),\n )\n]\n\n# NDJSON\nnested_radio_prediction_ndjson = {\n \"name\": \"nested_radio_question\",\n \"answer\": {\n \"name\":\n \"first_radio_answer\",\n \"confidence\":\n 0.5,\n \"classifications\": [{\n \"name\": \"sub_radio_question\",\n \"answer\": {\n \"name\": \"first_sub_radio_answer\",\n \"confidence\": 0.5\n },\n }],\n },\n}\n\n# Python Annotation\nnested_checklist_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"nested_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_checklist_answer\",\n confidence=0.5,\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_sub_checklist_answer\",\n confidence=0.5,\n )\n ]),\n )\n ],\n )\n ]),\n )\n]\n\n# NDJSON\nnested_checklist_prediction_ndjson = {\n \"name\":\n \"nested_checklist_question\",\n \"answer\": [{\n \"name\":\n \"first_checklist_answer\",\n \"confidence\":\n 0.5,\n \"classifications\": [{\n \"name\": \"sub_checklist_question\",\n \"answer\": {\n \"name\": \"first_sub_checklist_answer\",\n \"confidence\": 0.5,\n },\n }],\n }],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "########## Nested Global Classification ###########\n",
- "\n",
- "# Python Annotation\n",
- "nested_radio_prediction = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"nested_radio_question\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\",\n",
- " confidence=0.5,\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"sub_radio_question\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_sub_radio_answer\", confidence=0.5)),\n",
- " )\n",
- " ],\n",
- " )),\n",
- " )\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "nested_radio_prediction_ndjson = {\n",
- " \"name\": \"nested_radio_question\",\n",
- " \"answer\": {\n",
- " \"name\":\n",
- " \"first_radio_answer\",\n",
- " \"confidence\":\n",
- " 0.5,\n",
- " \"classifications\": [{\n",
- " \"name\": \"sub_radio_question\",\n",
- " \"answer\": {\n",
- " \"name\": \"first_sub_radio_answer\",\n",
- " \"confidence\": 0.5\n",
- " },\n",
- " }],\n",
- " },\n",
- "}\n",
- "\n",
- "# Python Annotation\n",
- "nested_checklist_prediction = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"nested_checklist_question\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"first_checklist_answer\",\n",
- " confidence=0.5,\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"sub_checklist_question\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"first_sub_checklist_answer\",\n",
- " confidence=0.5,\n",
- " )\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " )\n",
- " ]),\n",
- " )\n",
- "]\n",
- "\n",
- "# NDJSON\n",
- "nested_checklist_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"nested_checklist_question\",\n",
- " \"answer\": [{\n",
- " \"name\":\n",
- " \"first_checklist_answer\",\n",
- " \"confidence\":\n",
- " 0.5,\n",
- " \"classifications\": [{\n",
- " \"name\": \"sub_checklist_question\",\n",
- " \"answer\": {\n",
- " \"name\": \"first_sub_checklist_answer\",\n",
- " \"confidence\": 0.5,\n",
- " },\n",
- " }],\n",
- " }],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "########## Classifications under frame base tools ##########\n# Confidence scores are not supported for frame specific bounding box annotations with sub-classifications\n\n# bounding box dimensions\nbbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n\n# Python Annotation\nframe_bbox_with_checklist_subclass_prediction = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=10,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=11,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ),\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n confidence=0.5)\n ]),\n )\n ],\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ),\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"second_checklist_answer\", confidence=0.5)\n ]),\n )\n ],\n ),\n]\n\nframe_bbox_with_checklist_subclass_prediction_ndjson = {\n \"name\":\n \"bbox_class\",\n \"segments\": [{\n \"keyframes\": [\n {\n \"frame\": 10,\n \"bbox\": bbox_dm2\n },\n {\n \"frame\":\n 11,\n \"bbox\":\n bbox_dm2,\n \"classifications\": [{\n \"name\":\n \"bbox_radio\",\n \"answer\": [{\n \"name\": \"first_checklist_answer\",\n \"confidence\": 0.5,\n }],\n }],\n },\n {\n \"frame\":\n 13,\n \"bbox\":\n bbox_dm2,\n \"classifications\": [{\n \"name\":\n \"bbox_radio\",\n \"answer\": [{\n \"name\": \"second_checklist_answer\",\n \"confidence\": 0.5,\n }],\n }],\n },\n ]\n }],\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "########## Classifications under frame base tools ##########\n",
- "# Confidence scores are not supported for frame specific bounding box annotations with sub-classifications\n",
- "\n",
- "# bounding box dimensions\n",
- "bbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n",
- "\n",
- "# Python Annotation\n",
- "frame_bbox_with_checklist_subclass_prediction = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=10,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
- " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=11,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ),\n",
- " ),\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\",\n",
- " confidence=0.5)\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=13,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ),\n",
- " ),\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"second_checklist_answer\", confidence=0.5)\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " ),\n",
- "]\n",
- "\n",
- "frame_bbox_with_checklist_subclass_prediction_ndjson = {\n",
- " \"name\":\n",
- " \"bbox_class\",\n",
- " \"segments\": [{\n",
- " \"keyframes\": [\n",
- " {\n",
- " \"frame\": 10,\n",
- " \"bbox\": bbox_dm2\n",
- " },\n",
- " {\n",
- " \"frame\":\n",
- " 11,\n",
- " \"bbox\":\n",
- " bbox_dm2,\n",
- " \"classifications\": [{\n",
- " \"name\":\n",
- " \"bbox_radio\",\n",
- " \"answer\": [{\n",
- " \"name\": \"first_checklist_answer\",\n",
- " \"confidence\": 0.5,\n",
- " }],\n",
- " }],\n",
- " },\n",
- " {\n",
- " \"frame\":\n",
- " 13,\n",
- " \"bbox\":\n",
- " bbox_dm2,\n",
- " \"classifications\": [{\n",
- " \"name\":\n",
- " \"bbox_radio\",\n",
- " \"answer\": [{\n",
- " \"name\": \"second_checklist_answer\",\n",
- " \"confidence\": 0.5,\n",
- " }],\n",
- " }],\n",
- " },\n",
- " ]\n",
- " }],\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "######### Free text classification ###########\ntext_prediction = [\n lb_types.ClassificationAnnotation(\n name=\"free_text\", # must match your ontology feature's name\n value=lb_types.Text(answer=\"sample text\", confidence=0.5),\n )\n]\n\ntext_prediction_ndjson = {\n \"name\": \"free_text\",\n \"confidence\": 0.5,\n \"answer\": \"sample text\",\n}",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "######### Free text classification ###########\n",
- "text_prediction = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"free_text\", # must match your ontology feature's name\n",
- " value=lb_types.Text(answer=\"sample text\", confidence=0.5),\n",
- " )\n",
- "]\n",
- "\n",
- "text_prediction_ndjson = {\n",
- " \"name\": \"free_text\",\n",
- " \"confidence\": 0.5,\n",
- " \"answer\": \"sample text\",\n",
- "}"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 1: Import data rows into Catalog"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# send a sample image as batch to the project\nglobal_key = \"sample-video-2.mp4\" + str(uuid.uuid4())\ntest_img_url = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\",\n \"global_key\":\n global_key,\n}\ndataset = client.create_dataset(\n name=\"Video prediction demo\",\n iam_integration=\n None, # Removing this argument will default to the organziation's default iam integration\n)\ntask = dataset.create_data_rows([test_img_url])\ntask.wait_till_done()\nprint(\"Errors: \", task.errors)\nprint(\"Failed data rows: \", task.failed_data_rows)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# send a sample image as batch to the project\n",
- "global_key = \"sample-video-2.mp4\" + str(uuid.uuid4())\n",
- "test_img_url = {\n",
- " \"row_data\":\n",
- " \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\",\n",
- " \"global_key\":\n",
- " global_key,\n",
- "}\n",
- "dataset = client.create_dataset(\n",
- " name=\"Video prediction demo\",\n",
- " iam_integration=\n",
- " None, # Removing this argument will default to the organziation's default iam integration\n",
- ")\n",
- "task = dataset.create_data_rows([test_img_url])\n",
- "task.wait_till_done()\n",
- "print(\"Errors: \", task.errors)\n",
- "print(\"Failed data rows: \", task.failed_data_rows)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 2: Create/select an Ontology for your model predictions\n",
"Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the name/instructions fields in your annotations to ensure the correct feature schemas are matched.\n"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "ontology_builder = lb.OntologyBuilder(\n tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"bbox_video\"),\n lb.Tool(tool=lb.Tool.Type.POINT, name=\"point_video\"),\n lb.Tool(tool=lb.Tool.Type.LINE, name=\"line_video_frame\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"video_mask\"),\n lb.Tool(\n tool=lb.Tool.Type.BBOX,\n name=\"bbox_class\",\n classifications=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class\",\n scope=lb.Classification.Scope.\n INDEX, ## defined scope for frame classifications\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n )\n ],\n ),\n ],\n classifications=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class\",\n scope=lb.Classification.Scope.\n INDEX, ## defined scope for frame classifications\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"radio_class\",\n scope=lb.Classification.Scope.INDEX,\n options=[\n lb.Option(value=\"first_radio_answer\"),\n lb.Option(value=\"second_radio_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"nested_radio_question\",\n options=[\n lb.Option(\n \"first_radio_answer\",\n options=[\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"sub_radio_question\",\n options=[lb.Option(\"first_sub_radio_answer\")],\n )\n ],\n )\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"nested_checklist_question\",\n options=[\n lb.Option(\n \"first_checklist_answer\",\n options=[\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"sub_checklist_question\",\n options=[lb.Option(\"first_sub_checklist_answer\")],\n )\n ],\n )\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.RADIO,\n name=\"radio_class_global\",\n options=[\n lb.Option(value=\"first_radio_answer\"),\n lb.Option(value=\"second_radio_answer\"),\n ],\n ),\n lb.Classification(\n class_type=lb.Classification.Type.CHECKLIST,\n name=\"checklist_class_global\",\n options=[\n lb.Option(value=\"first_checklist_answer\"),\n lb.Option(value=\"second_checklist_answer\"),\n ],\n ),\n lb.Classification(class_type=lb.Classification.Type.TEXT,\n name=\"free_text\"),\n ],\n)\n\nontology = client.create_ontology(\n \"Ontology Video Annotations\",\n ontology_builder.asdict(),\n media_type=lb.MediaType.Video,\n)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "ontology_builder = lb.OntologyBuilder(\n",
- " tools=[\n",
- " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"bbox_video\"),\n",
- " lb.Tool(tool=lb.Tool.Type.POINT, name=\"point_video\"),\n",
- " lb.Tool(tool=lb.Tool.Type.LINE, name=\"line_video_frame\"),\n",
- " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"video_mask\"),\n",
- " lb.Tool(\n",
- " tool=lb.Tool.Type.BBOX,\n",
- " name=\"bbox_class\",\n",
- " classifications=[\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.CHECKLIST,\n",
- " name=\"checklist_class\",\n",
- " scope=lb.Classification.Scope.\n",
- " INDEX, ## defined scope for frame classifications\n",
- " options=[\n",
- " lb.Option(value=\"first_checklist_answer\"),\n",
- " lb.Option(value=\"second_checklist_answer\"),\n",
- " ],\n",
- " )\n",
- " ],\n",
- " ),\n",
- " ],\n",
- " classifications=[\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.CHECKLIST,\n",
- " name=\"checklist_class\",\n",
- " scope=lb.Classification.Scope.\n",
- " INDEX, ## defined scope for frame classifications\n",
- " options=[\n",
- " lb.Option(value=\"first_checklist_answer\"),\n",
- " lb.Option(value=\"second_checklist_answer\"),\n",
- " ],\n",
- " ),\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.RADIO,\n",
- " name=\"radio_class\",\n",
- " scope=lb.Classification.Scope.INDEX,\n",
- " options=[\n",
- " lb.Option(value=\"first_radio_answer\"),\n",
- " lb.Option(value=\"second_radio_answer\"),\n",
- " ],\n",
- " ),\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.RADIO,\n",
- " name=\"nested_radio_question\",\n",
- " options=[\n",
- " lb.Option(\n",
- " \"first_radio_answer\",\n",
- " options=[\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.RADIO,\n",
- " name=\"sub_radio_question\",\n",
- " options=[lb.Option(\"first_sub_radio_answer\")],\n",
- " )\n",
- " ],\n",
- " )\n",
- " ],\n",
- " ),\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.CHECKLIST,\n",
- " name=\"nested_checklist_question\",\n",
- " options=[\n",
- " lb.Option(\n",
- " \"first_checklist_answer\",\n",
- " options=[\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.CHECKLIST,\n",
- " name=\"sub_checklist_question\",\n",
- " options=[lb.Option(\"first_sub_checklist_answer\")],\n",
- " )\n",
- " ],\n",
- " )\n",
- " ],\n",
- " ),\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.RADIO,\n",
- " name=\"radio_class_global\",\n",
- " options=[\n",
- " lb.Option(value=\"first_radio_answer\"),\n",
- " lb.Option(value=\"second_radio_answer\"),\n",
- " ],\n",
- " ),\n",
- " lb.Classification(\n",
- " class_type=lb.Classification.Type.CHECKLIST,\n",
- " name=\"checklist_class_global\",\n",
- " options=[\n",
- " lb.Option(value=\"first_checklist_answer\"),\n",
- " lb.Option(value=\"second_checklist_answer\"),\n",
- " ],\n",
- " ),\n",
- " lb.Classification(class_type=lb.Classification.Type.TEXT,\n",
- " name=\"free_text\"),\n",
- " ],\n",
- ")\n",
- "\n",
- "ontology = client.create_ontology(\n",
- " \"Ontology Video Annotations\",\n",
- " ontology_builder.asdict(),\n",
- " media_type=lb.MediaType.Video,\n",
- ")"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 3: Create a Model and Model Run"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# create Model\nmodel = client.create_model(name=\"video_model_run_\" + str(uuid.uuid4()),\n ontology_id=ontology.uid)\n# create Model Run\nmodel_run = model.create_model_run(\"iteration 1\")",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# create Model\n",
- "model = client.create_model(name=\"video_model_run_\" + str(uuid.uuid4()),\n",
- " ontology_id=ontology.uid)\n",
- "# create Model Run\n",
- "model_run = model.create_model_run(\"iteration 1\")"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 4: Send data rows to the Model Run"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "model_run.upsert_data_rows(global_keys=[global_key])",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "model_run.upsert_data_rows(global_keys=[global_key])"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 5. Create the predictions payload\n",
"\n",
"Create the annotations payload using the snippets of [code here](https://docs.labelbox.com/reference/import-video-annotations).\n",
"\n"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"#### Python Annotation Types"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "label_predictions = []\nannotations_list = [\n point_prediction,\n bbox_prediction,\n polyline_prediction,\n checklist_prediction,\n radio_prediction,\n nested_radio_prediction,\n nested_checklist_prediction,\n frame_bbox_with_checklist_subclass_prediction,\n global_radio_prediction,\n global_checklist_prediction,\n text_prediction,\n]\n\nflatten_list_annotations = [\n ann for ann_sublist in annotations_list for ann in ann_sublist\n]\n\nlabel_predictions.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=flatten_list_annotations))",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "label_predictions = []\n",
- "annotations_list = [\n",
- " point_prediction,\n",
- " bbox_prediction,\n",
- " polyline_prediction,\n",
- " checklist_prediction,\n",
- " radio_prediction,\n",
- " nested_radio_prediction,\n",
- " nested_checklist_prediction,\n",
- " frame_bbox_with_checklist_subclass_prediction,\n",
- " global_radio_prediction,\n",
- " global_checklist_prediction,\n",
- " text_prediction,\n",
- "]\n",
- "\n",
- "flatten_list_annotations = [\n",
- " ann for ann_sublist in annotations_list for ann in ann_sublist\n",
- "]\n",
- "\n",
- "label_predictions.append(\n",
- " lb_types.Label(data={\"global_key\": global_key},\n",
- " annotations=flatten_list_annotations))"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"#### NDJSON annotations"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\nlabel_prediction_ndjson = []\n\nfor annotation in [\n point_prediction_ndjson,\n bbox_prediction_ndjson,\n polyline_prediction_ndjson,\n frame_checklist_classification_prediction_ndjson,\n frame_radio_classification_prediction_ndjson,\n nested_radio_prediction_ndjson,\n nested_checklist_prediction_ndjson,\n frame_bbox_with_checklist_subclass_prediction_ndjson,\n global_radio_classification_ndjson,\n global_checklist_classification_ndjson,\n text_prediction_ndjson,\n]:\n annotation.update({\"dataRow\": {\"globalKey\": global_key}})\n label_prediction_ndjson.append(annotation)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n",
- "label_prediction_ndjson = []\n",
- "\n",
- "for annotation in [\n",
- " point_prediction_ndjson,\n",
- " bbox_prediction_ndjson,\n",
- " polyline_prediction_ndjson,\n",
- " frame_checklist_classification_prediction_ndjson,\n",
- " frame_radio_classification_prediction_ndjson,\n",
- " nested_radio_prediction_ndjson,\n",
- " nested_checklist_prediction_ndjson,\n",
- " frame_bbox_with_checklist_subclass_prediction_ndjson,\n",
- " global_radio_classification_ndjson,\n",
- " global_checklist_classification_ndjson,\n",
- " text_prediction_ndjson,\n",
- "]:\n",
- " annotation.update({\"dataRow\": {\"globalKey\": global_key}})\n",
- " label_prediction_ndjson.append(annotation)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 6. Upload the predictions payload to the Model Run "
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# Upload the prediction label to the Model Run\nupload_job_prediction = model_run.add_predictions(\n name=\"prediction_upload_job\" + str(uuid.uuid4()),\n predictions=label_predictions,\n)\n\n# Errors will appear for annotation uploads that failed.\nprint(\"Errors:\", upload_job_prediction.errors)\nprint(\"Status of uploads: \", upload_job_prediction.statuses)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# Upload the prediction label to the Model Run\n",
- "upload_job_prediction = model_run.add_predictions(\n",
- " name=\"prediction_upload_job\" + str(uuid.uuid4()),\n",
- " predictions=label_predictions,\n",
- ")\n",
- "\n",
- "# Errors will appear for annotation uploads that failed.\n",
- "print(\"Errors:\", upload_job_prediction.errors)\n",
- "print(\"Status of uploads: \", upload_job_prediction.statuses)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Step 7: Send annotations to the Model Run \n",
"To send annotations to a Model Run, we must first import them into a project, create a label payload and then send them to the Model Run."
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.1. Create a labelbox project"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# Create a Labelbox project\nproject = client.create_project(name=\"video_prediction_demo\",\n media_type=lb.MediaType.Video)\nproject.setup_editor(ontology)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# Create a Labelbox project\n",
- "project = client.create_project(name=\"video_prediction_demo\",\n",
- " media_type=lb.MediaType.Video)\n",
- "project.setup_editor(ontology)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.2. Create a batch to send to the project "
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "project.create_batch(\n \"batch_video_prediction_demo\", # Each batch in a project must have a unique name\n global_keys=[global_key\n ], # A list of data rows, data row ids or global keys\n priority=5, # priority between 1(Highest) - 5(lowest)\n)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "project.create_batch(\n",
- " \"batch_video_prediction_demo\", # Each batch in a project must have a unique name\n",
- " global_keys=[global_key\n",
- " ], # A list of data rows, data row ids or global keys\n",
- " priority=5, # priority between 1(Highest) - 5(lowest)\n",
- ")"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.3 Create the annotations payload"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# Python Annotation\npoint_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"point_video\",\n keyframe=True,\n frame=17,\n value=lb_types.Point(x=660.134, y=407.926),\n )\n]\n\n######## Polyline ########\n\n# Python Annotation\npolyline_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=5,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=12,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=20,\n segment_index=0,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=24,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"line_video_frame\",\n keyframe=True,\n frame=45,\n segment_index=1,\n value=lb_types.Line(\n points=[lb_types.Point(x=680, y=100),\n lb_types.Point(x=100, y=190)]),\n ),\n]\n\nradio_annotation = [\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=9,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"radio_class\",\n frame=15,\n segment_index=0,\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n ),\n]\n\nchecklist_annotation = [\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=29,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=35,\n segment_index=0,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=39,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n ]),\n ),\n lb_types.VideoClassificationAnnotation(\n name=\"checklist_class\",\n frame=45,\n segment_index=1,\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n ]),\n ),\n]\n\nglobal_radio_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"radio_class_global\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\")),\n )\n]\n\nglobal_checklist_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"checklist_class_global\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\"),\n lb_types.ClassificationAnswer(name=\"second_checklist_answer\"),\n ]),\n )\n]\n\nnested_radio_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"nested_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_radio_answer\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_radio_question\",\n value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n name=\"first_sub_radio_answer\")),\n )\n ],\n )),\n )\n]\n\nnested_checklist_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"nested_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_checklist_answer\",\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"sub_checklist_question\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"first_sub_checklist_answer\")\n ]),\n )\n ],\n )\n ]),\n )\n]\n\nbbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\nframe_bbox_with_checklist_subclass = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=10,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=11,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n ]),\n )\n ],\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_class\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm2[\"left\"],\n y=bbox_dm2[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n ), # x= left + width , y = top + height\n ),\n classifications=[\n lb_types.ClassificationAnnotation(\n name=\"checklist_class\",\n value=lb_types.Checklist(answer=[\n lb_types.ClassificationAnswer(\n name=\"second_checklist_answer\")\n ]),\n )\n ],\n ),\n]\n\nbbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\nbbox_annotation = [\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"],\n y=bbox_dm[\"top\"]), # x = left, y = top\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ), # x= left + width , y = top + height\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=15,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n lb_types.VideoObjectAnnotation(\n name=\"bbox_video\",\n keyframe=True,\n frame=19,\n segment_index=0,\n value=lb_types.Rectangle(\n start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n end=lb_types.Point(\n x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n ),\n ),\n ),\n]\n\ntext_annotation = [\n lb_types.ClassificationAnnotation(\n name=\"free_text\", # must match your ontology feature's name\n value=lb_types.Text(answer=\"sample text\"),\n )\n]",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# Python Annotation\n",
- "point_annotation = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"point_video\",\n",
- " keyframe=True,\n",
- " frame=17,\n",
- " value=lb_types.Point(x=660.134, y=407.926),\n",
- " )\n",
- "]\n",
- "\n",
- "######## Polyline ########\n",
- "\n",
- "# Python Annotation\n",
- "polyline_annotation = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=5,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=12,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=20,\n",
- " segment_index=0,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=24,\n",
- " segment_index=1,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"line_video_frame\",\n",
- " keyframe=True,\n",
- " frame=45,\n",
- " segment_index=1,\n",
- " value=lb_types.Line(\n",
- " points=[lb_types.Point(x=680, y=100),\n",
- " lb_types.Point(x=100, y=190)]),\n",
- " ),\n",
- "]\n",
- "\n",
- "radio_annotation = [\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"radio_class\",\n",
- " frame=9,\n",
- " segment_index=0,\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\")),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"radio_class\",\n",
- " frame=15,\n",
- " segment_index=0,\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\")),\n",
- " ),\n",
- "]\n",
- "\n",
- "checklist_annotation = [\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=29,\n",
- " segment_index=0,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=35,\n",
- " segment_index=0,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=39,\n",
- " segment_index=1,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n",
- " ]),\n",
- " ),\n",
- " lb_types.VideoClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " frame=45,\n",
- " segment_index=1,\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\")\n",
- " ]),\n",
- " ),\n",
- "]\n",
- "\n",
- "global_radio_annotation = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"radio_class_global\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\")),\n",
- " )\n",
- "]\n",
- "\n",
- "global_checklist_annotation = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class_global\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\"),\n",
- " lb_types.ClassificationAnswer(name=\"second_checklist_answer\"),\n",
- " ]),\n",
- " )\n",
- "]\n",
- "\n",
- "nested_radio_annotation = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"nested_radio_question\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_radio_answer\",\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"sub_radio_question\",\n",
- " value=lb_types.Radio(answer=lb_types.ClassificationAnswer(\n",
- " name=\"first_sub_radio_answer\")),\n",
- " )\n",
- " ],\n",
- " )),\n",
- " )\n",
- "]\n",
- "\n",
- "nested_checklist_annotation = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"nested_checklist_question\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"first_checklist_answer\",\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"sub_checklist_question\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"first_sub_checklist_answer\")\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " )\n",
- " ]),\n",
- " )\n",
- "]\n",
- "\n",
- "bbox_dm2 = {\"top\": 146.0, \"left\": 98.0, \"height\": 382.0, \"width\": 341.0}\n",
- "frame_bbox_with_checklist_subclass = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=10,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
- " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=11,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
- " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(name=\"first_checklist_answer\")\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_class\",\n",
- " keyframe=True,\n",
- " frame=13,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm2[\"left\"],\n",
- " y=bbox_dm2[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"],\n",
- " y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " classifications=[\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"checklist_class\",\n",
- " value=lb_types.Checklist(answer=[\n",
- " lb_types.ClassificationAnswer(\n",
- " name=\"second_checklist_answer\")\n",
- " ]),\n",
- " )\n",
- " ],\n",
- " ),\n",
- "]\n",
- "\n",
- "bbox_dm = {\"top\": 617, \"left\": 1371, \"height\": 419, \"width\": 505}\n",
- "bbox_annotation = [\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=13,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"],\n",
- " y=bbox_dm[\"top\"]), # x = left, y = top\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ), # x= left + width , y = top + height\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=15,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ),\n",
- " ),\n",
- " ),\n",
- " lb_types.VideoObjectAnnotation(\n",
- " name=\"bbox_video\",\n",
- " keyframe=True,\n",
- " frame=19,\n",
- " segment_index=0,\n",
- " value=lb_types.Rectangle(\n",
- " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n",
- " end=lb_types.Point(\n",
- " x=bbox_dm[\"left\"] + bbox_dm[\"width\"],\n",
- " y=bbox_dm[\"top\"] + bbox_dm[\"height\"],\n",
- " ),\n",
- " ),\n",
- " ),\n",
- "]\n",
- "\n",
- "text_annotation = [\n",
- " lb_types.ClassificationAnnotation(\n",
- " name=\"free_text\", # must match your ontology feature's name\n",
- " value=lb_types.Text(answer=\"sample text\"),\n",
- " )\n",
- "]"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.4. Create the label object"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "markdown",
"metadata": {},
- "source": []
+ "source": [],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n\nlabels = []\nannotations_list = [\n checklist_annotation,\n radio_annotation,\n bbox_annotation,\n frame_bbox_with_checklist_subclass,\n point_annotation,\n polyline_annotation,\n global_checklist_annotation,\n global_radio_annotation,\n nested_checklist_annotation,\n nested_radio_annotation,\n text_annotation,\n]\n\nflatten_list_annotations = [\n ann for ann_sublist in annotations_list for ann in ann_sublist\n]\n\nlabels.append(\n lb_types.Label(\n data={\"global_key\": global_key},\n annotations=flatten_list_annotations,\n ))",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n",
- "\n",
- "labels = []\n",
- "annotations_list = [\n",
- " checklist_annotation,\n",
- " radio_annotation,\n",
- " bbox_annotation,\n",
- " frame_bbox_with_checklist_subclass,\n",
- " point_annotation,\n",
- " polyline_annotation,\n",
- " global_checklist_annotation,\n",
- " global_radio_annotation,\n",
- " nested_checklist_annotation,\n",
- " nested_radio_annotation,\n",
- " text_annotation,\n",
- "]\n",
- "\n",
- "flatten_list_annotations = [\n",
- " ann for ann_sublist in annotations_list for ann in ann_sublist\n",
- "]\n",
- "\n",
- "labels.append(\n",
- " lb_types.Label(\n",
- " data={\"global_key\":global_key},\n",
- " annotations=flatten_list_annotations,\n",
- " ))"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.5. Upload annotations to the project using Label Import"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "upload_job_annotation = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"video_annotations_import_\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nupload_job_annotation.wait_until_done()\n# Errors will appear for annotation uploads that failed.\nprint(\"Errors:\", upload_job_annotation.errors)\nprint(\"Status of uploads: \", upload_job_annotation.statuses)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "upload_job_annotation = lb.LabelImport.create_from_objects(\n",
- " client=client,\n",
- " project_id=project.uid,\n",
- " name=\"video_annotations_import_\" + str(uuid.uuid4()),\n",
- " labels=labels,\n",
- ")\n",
- "\n",
- "upload_job_annotation.wait_until_done()\n",
- "# Errors will appear for annotation uploads that failed.\n",
- "print(\"Errors:\", upload_job_annotation.errors)\n",
- "print(\"Status of uploads: \", upload_job_annotation.statuses)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"##### 7.6. Send the annotations to the Model Run"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# get the labels id from the project\nmodel_run.upsert_labels(project_id=project.uid)",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# get the labels id from the project\n",
- "model_run.upsert_labels(project_id=project.uid)"
- ]
+ "execution_count": null
},
{
- "cell_type": "markdown",
"metadata": {},
"source": [
"## Optional deletions for cleanup \n"
- ]
+ ],
+ "cell_type": "markdown"
},
{
- "cell_type": "code",
- "execution_count": null,
"metadata": {},
+ "source": "# project.delete()\n# dataset.delete()",
+ "cell_type": "code",
"outputs": [],
- "source": [
- "# project.delete()\n",
- "# dataset.delete()"
- ]
- }
- ],
- "metadata": {
- "language_info": {
- "name": "python"
+ "execution_count": null
}
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
+ ]
+}
\ No newline at end of file
From cd768a4f1c1f45c6e7c0bb83682e890374dd89ef Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Tue, 16 Jul 2024 21:36:14 +0000
Subject: [PATCH 3/3] :memo: README updated
---
examples/README.md | 148 ++++++++++++++++++++++-----------------------
1 file changed, 74 insertions(+), 74 deletions(-)
diff --git a/examples/README.md b/examples/README.md
index 38452d9d5..387840707 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -27,40 +27,40 @@
 |
- User Management |
-  |
-  |
+ Ontologies |
+  |
+  |
Basics |
 |
 |
-
- Data Row Metadata |
-  |
-  |
-
-
- Data Rows |
-  |
-  |
-
Quick Start |
 |
 |
- Ontologies |
-  |
-  |
+ Data Row Metadata |
+  |
+  |
Projects |
 |
 |
+
+ Data Rows |
+  |
+  |
+
+
+ User Management |
+  |
+  |
+
@@ -75,16 +75,16 @@
-
- Export Data |
-  |
-  |
-
Composite Mask Export |
 |
 |
+
+ Export Data |
+  |
+  |
+
Exporting to CSV |
 |
@@ -104,6 +104,11 @@
+
+ Project Setup |
+  |
+  |
+
Queue Management |
 |
@@ -114,11 +119,6 @@
 |
 |
-
- Project Setup |
-  |
-  |
-
Webhooks |
 |
@@ -138,16 +138,16 @@
+
+ PDF |
+  |
+  |
+
Conversational |
 |
 |
-
- Conversational LLM Data Generation |
-  |
-  |
-
Video |
 |
@@ -163,15 +163,25 @@
 |
 |
+
+ Tiled |
+  |
+  |
+
Image |
 |
 |
- Tiled |
-  |
-  |
+ Conversational LLM Data Generation |
+  |
+  |
+
+
+ Conversational LLM |
+  |
+  |
Audio |
@@ -183,16 +193,6 @@
 |
 |
-
- Conversational LLM |
-  |
-  |
-
-
- PDF |
-  |
-  |
-
@@ -213,9 +213,9 @@
 |
- Langchain |
-  |
-  |
+ Import YOLOv8 Annotations |
+  |
+  |
Meta SAM |
@@ -228,9 +228,9 @@
 |
- Import YOLOv8 Annotations |
-  |
-  |
+ Langchain |
+  |
+  |
@@ -252,9 +252,9 @@
 |
- Custom Metrics Basics |
-  |
-  |
+ Custom Metrics Demo |
+  |
+  |
Model Slices |
@@ -262,9 +262,9 @@
 |
- Custom Metrics Demo |
-  |
-  |
+ Custom Metrics Basics |
+  |
+  |
@@ -281,24 +281,24 @@
- Video Predictions |
-  |
-  |
+ Geospatial Predictions |
+  |
+  |
- Image Predictions |
-  |
-  |
+ HTML Predictions |
+  |
+  |
- Text Predictions |
-  |
-  |
+ Conversational LLM Predictions |
+  |
+  |
- HTML Predictions |
-  |
-  |
+ Image Predictions |
+  |
+  |
Conversational Predictions |
@@ -306,14 +306,14 @@
 |
- Conversational LLM Predictions |
-  |
-  |
+ Text Predictions |
+  |
+  |
- Geospatial Predictions |
-  |
-  |
+ Video Predictions |
+  |
+  |
PDF Predictions |