From f80f1c00027dd939469df8a9467d38780c09e61b Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:18:27 -0500 Subject: [PATCH 01/23] started notebook --- .../yolo/import_yolo_annotations.ipynb | 189 ++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 examples/integrations/yolo/import_yolo_annotations.ipynb diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb new file mode 100644 index 000000000..59ea6f0d8 --- /dev/null +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -0,0 +1,189 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Import YOLOv8 annotations\n", + "This notebook will provides examples of setting up a Project with annotations generated with YOLOv8. We will be using the [Ultralytics](https://docs.ultralytics.com/) library to generate our annotations. In this guide we will be:\n", + "* Importing a demo image data rows that will be labeled\n", + "* Setting up our ontology that matches our YOLOv8 annotations\n", + "* Importing our data rows and attaching our ontology to a project\n", + "* Running our images through Ultralytics then importing the annotations generated\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set up" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem with your settings or a recent ultralytics package update. \n", + "View settings with 'yolo settings' or at '/Users/gabrielunderwood/Library/Application Support/Ultralytics/settings.yaml'\n", + "Update settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'. For help see https://docs.ultralytics.com/quickstart/#ultralytics-settings.\n" + ] + } + ], + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "import uuid\n", + "import ultralytics\n", + "from ultralytics.engine.model import Model\n", + "from ultralytics.engine.results import Results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API key and client\n", + "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHhhZnpkcWgwMmZqMDd5MTdramllZW8xIiwic2VjcmV0IjoiZjkxMmZhMzMwOTRmOGMxYzFkN2QzNTY2NTMxMzEyNDMiLCJpYXQiOjE3MTgxMTI3NjksImV4cCI6MjM0OTI2NDc2OX0.4wY66g9LaP5xFHoSeleAgTK75CX7wxKMsaIbX0nI_qc\"\n", + "client = lb.Client(api_key=API_KEY)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Import YOLOv8 Annotations\n", + "\n", + "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Importing an image data row\n", + "\n", + "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n" + ] + } + ], + "source": [ + "# create data row\n", + "data_row = {\n", + " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\": str(uuid.uuid4()),\n", + " \"media_type\": \"IMAGE\"\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setting up an ontology and a project\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 3edfd76b9775ac2cca4bb6f34e84b3aa746549e1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Jun 2024 16:20:21 +0000 Subject: [PATCH 02/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 154 +++++------------- 1 file changed, 38 insertions(+), 116 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 59ea6f0d8..bd69832c1 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -34,156 +36,76 @@ "* Setting up our ontology that matches our YOLOv8 annotations\n", "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics then importing the annotations generated\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", - "Note: you may need to restart the kernel to use updated packages.\n", - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem with your settings or a recent ultralytics package update. \n", - "View settings with 'yolo settings' or at '/Users/gabrielunderwood/Library/Application Support/Ultralytics/settings.yaml'\n", - "Update settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'. For help see https://docs.ultralytics.com/quickstart/#ultralytics-settings.\n" - ] - } - ], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "import uuid\n", - "import ultralytics\n", - "from ultralytics.engine.model import Model\n", - "from ultralytics.engine.results import Results" - ] + "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results", + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": 3, "metadata": {}, + "source": "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHhhZnpkcWgwMmZqMDd5MTdramllZW8xIiwic2VjcmV0IjoiZjkxMmZhMzMwOTRmOGMxYzFkN2QzNTY2NTMxMzEyNDMiLCJpYXQiOjE3MTgxMTI3NjksImV4cCI6MjM0OTI2NDc2OX0.4wY66g9LaP5xFHoSeleAgTK75CX7wxKMsaIbX0nI_qc\"\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHhhZnpkcWgwMmZqMDd5MTdramllZW8xIiwic2VjcmV0IjoiZjkxMmZhMzMwOTRmOGMxYzFkN2QzNTY2NTMxMzEyNDMiLCJpYXQiOjE3MTgxMTI3NjksImV4cCI6MjM0OTI2NDc2OX0.4wY66g9LaP5xFHoSeleAgTK75CX7wxKMsaIbX0nI_qc\"\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": 7, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n" - ] - } - ], - "source": [ - "# create data row\n", - "data_row = {\n", - " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\": str(uuid.uuid4()),\n", - " \"media_type\": \"IMAGE\"\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "source": "# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n str(uuid.uuid4()),\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" + ], + "cell_type": "markdown" } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From d11f5e108e5d510c654ffc047cd049c6ae7ba9b2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Jun 2024 16:21:05 +0000 Subject: [PATCH 03/23] :memo: README updated --- examples/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/README.md b/examples/README.md index faf0b39a2..a36d043d0 100644 --- a/examples/README.md +++ b/examples/README.md @@ -227,6 +227,11 @@ Open In Github Open In Colab + + Import Yolo Annotations + Open In Github + Open In Colab + From 408f470cea7c6e3bd92861a27c66874efdc01717 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:22:01 -0500 Subject: [PATCH 04/23] removed key --- .../yolo/import_yolo_annotations.ipynb | 104 ++++++++++++------ 1 file changed, 69 insertions(+), 35 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index bd69832c1..743506606 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,18 +1,16 @@ { - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, "cells": [ { + "cell_type": "markdown", "metadata": {}, "source": [ - "", - " ", + "\n", + " \n", "\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -24,10 +22,10 @@ "\n", "" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -36,76 +34,112 @@ "* Setting up our ontology that matches our YOLOv8 annotations\n", "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics then importing the annotations generated\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics" + ] }, { - "metadata": {}, - "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "import uuid\n", + "import ultralytics\n", + "from ultralytics.engine.model import Model\n", + "from ultralytics.engine.results import Results" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHhhZnpkcWgwMmZqMDd5MTdramllZW8xIiwic2VjcmV0IjoiZjkxMmZhMzMwOTRmOGMxYzFkN2QzNTY2NTMxMzEyNDMiLCJpYXQiOjE3MTgxMTI3NjksImV4cCI6MjM0OTI2NDc2OX0.4wY66g9LaP5xFHoSeleAgTK75CX7wxKMsaIbX0nI_qc\"\nclient = lb.Client(api_key=API_KEY)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "API_KEY = None\n", + "client = lb.Client(api_key=API_KEY)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n str(uuid.uuid4()),\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# create data row\n", + "data_row = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " str(uuid.uuid4()),\n", + " \"media_type\":\n", + " \"IMAGE\",\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n" - ], - "cell_type": "markdown" + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 3de2087739e115cb6cbf133c949ab0b1cf6702bb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Jun 2024 16:22:58 +0000 Subject: [PATCH 05/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 104 ++++++------------ 1 file changed, 35 insertions(+), 69 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 743506606..cefd369cf 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -34,112 +36,76 @@ "* Setting up our ontology that matches our YOLOv8 annotations\n", "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics then importing the annotations generated\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", "outputs": [], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results", + "cell_type": "code", "outputs": [], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "import uuid\n", - "import ultralytics\n", - "from ultralytics.engine.model import Model\n", - "from ultralytics.engine.results import Results" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n str(uuid.uuid4()),\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " str(uuid.uuid4()),\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n" - ] + ], + "cell_type": "markdown" } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From 2dd689ae590e9e45a04bfa6c1336c6ce4eeb0b3c Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 10:48:39 -0500 Subject: [PATCH 06/23] finsihed up notebook --- .../yolo/import_yolo_annotations.ipynb | 578 ++++++++++++++++-- 1 file changed, 541 insertions(+), 37 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index cefd369cf..65590b9e9 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,18 +1,16 @@ { - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, "cells": [ { + "cell_type": "markdown", "metadata": {}, "source": [ - "", - " ", + "\n", + " \n", "\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -24,10 +22,10 @@ "\n", "" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -35,77 +33,583 @@ "* Importing a demo image data rows that will be labeled\n", "* Setting up our ontology that matches our YOLOv8 annotations\n", "* Importing our data rows and attaching our ontology to a project\n", - "* Running our images through Ultralytics then importing the annotations generated\n" - ], - "cell_type": "markdown" + "* Running our images through Ultralytics\n", + "* Importing the annotations generated\n" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", "cell_type": "code", - "outputs": [], - "execution_count": null + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3 install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3 install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics" + ] }, { - "metadata": {}, - "source": "import labelbox as lb\nimport labelbox.types as lb_types\nimport uuid\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results", "cell_type": "code", - "outputs": [], - "execution_count": null + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'dateutil.utils'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[10], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtypes\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb_types\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01multralytics\u001b[39;00m\n", + "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/__init__.py:5\u001b[0m\n\u001b[1;32m 1\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabelbox\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m __version__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m3.72.2\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mclient\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Client\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mproject\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Project\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodel\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Model\n", + "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/client.py:20\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexceptions\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m __version__ \u001b[38;5;28;01mas\u001b[39;00m SDK_VERSION\n\u001b[0;32m---> 20\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m utils\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01madv_client\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AdvClient\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01morm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m query\n", + "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/utils.py:6\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtz\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tzoffset\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparser\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m isoparse \u001b[38;5;28;01mas\u001b[39;00m dateutil_parse\n\u001b[0;32m----> 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m default_tzinfo\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01murllib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparse\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m urlparse\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pydantic_compat\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'dateutil.utils'" + ] + } + ], + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "\n", + "import ultralytics\n", + "from ultralytics.engine.model import Model\n", + "from ultralytics.engine.results import Results\n", + "\n", + "import uuid\n", + "import io" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "code", + "execution_count": null, "metadata": {}, - "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "outputs": [], + "source": [ + "API_KEY = None\n", + "client = lb.Client(api_key=API_KEY)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set up a YOLOv8 model\n", + "Below we will be initializing our model to be used for on our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " + ] + }, + { "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ], - "cell_type": "markdown" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "global_key = str(uuid.uuid4())\n", + "\n", + "# create data row\n", + "data_row = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " global_key,\n", + " \"media_type\":\n", + " \"IMAGE\",\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, - "source": "# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n str(uuid.uuid4()),\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "source": [ + "### Setting up an ontology and a project\n", + "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8.. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Create an ontology" + ] + }, + { "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "ontology_builder = lb.OntologyBuilder(tools=[\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle\"),\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person\"),\n", + "])\n", + "\n", + "ontology = client.create_ontology(\n", + " name=\"YOLOv8 Demo Ontology\",\n", + " normalized=ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image,\n", + ")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "### Setting up an ontology and a project\n" - ], - "cell_type": "markdown" + "#### Create and set up a project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", + "\n", + "project.setup_editor(ontology)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Export our data rows and getting our predictions\n", + "Now that we have imported our image data row and set up our project and ontology we can now get our predictions. In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Export data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "export_task = project.export()\n", + "export_task.wait_till_done()\n", + "\n", + "# prediction list we will be populating\n", + "url_list = []\n", + "global_keys = []\n", + "\n", + "\n", + "# callback that is ran on each data row\n", + "def export_callback(output: lb.BufferedJsonConverterOutput):\n", + "\n", + " data_row = output.json\n", + "\n", + " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", + "\n", + " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", + "\n", + "\n", + "# check if export has errors\n", + "if export_task.has_errors():\n", + " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", + "\n", + "if export_task.has_result():\n", + " export_task.get_buffered_stream().start(stream_handler=export_callback)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Make YOLOv8 predictions\n", + "Below we are passing our list of image URLs to our YOLOv8 model. If your `row_data` URL are behind a private cloud bucket integration you will need to either download your images locally or give your IDE permission to access your images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results: list[Results] = model(url_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import YOLOv8 annotations to a project\n", + "Now that you have finished your initial set up and have gotten your predictions from YOLOv8 we can import our annotations towards are project. We will be doing the following in this step:\n", + "1. Defining our import functions\n", + "2. Creating our labels\n", + "3. Importing our labels as either ground truths or MAL labels (pre-labels)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Defining our import functions\n", + "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class_mapping = {\"person\": \"Person\", \"bus\": \"Vehicle\", \"truck\": \"Vehicle\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Bounding box" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_yolo_bbox_annotation_predictions(\n", + " yolo_result: Results, model: Model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", + " \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for bbox in yolo_result.boxes:\n", + " class_name = model.names[int(bbox.cls)]\n", + "\n", + " # ignore bboxes that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get bbox coordinates\n", + " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", + "\n", + " bbox_source = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Rectangle(\n", + " start=lb_types.Point(x=start_x, y=start_y),\n", + " end=lb_types.Point(x=end_x, y=end_y),\n", + " ),\n", + " )\n", + "\n", + " annotations.append(bbox_source)\n", + "\n", + " return annotations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Segment Mask" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_yolo_segment_annotation_predictions(\n", + " yolo_result: Results, model: Model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", + " \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for i, mask in enumerate(yolo_result.masks.data):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore segment masks that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get binary numpy array to byte array. You must resize mask to match image.\n", + " mask = (mask.numpy() * 255).astype(\"uint8\")\n", + " img = Image.fromarray(mask, \"L\")\n", + " img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", + " img_byte_arr = io.BytesIO()\n", + " img.save(img_byte_arr, format=\"PNG\")\n", + " encoded_image_bytes = img_byte_arr.getvalue()\n", + "\n", + " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", + " mask_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", + " )\n", + " annotations.append(mask_annotation)\n", + "\n", + " return annotations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Polygon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_yolo_polygon_annotation_predictions(\n", + " yolo_result: Results, model: Model,\n", + " ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", + " \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n", + "\n", + " Args:\n", + " yolo_result (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for i, coordinates in enumerate(yolo_result.masks.xy):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore polygons that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " polygon_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Polygon(points=[\n", + " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", + " for coordinate in coordinates\n", + " ]),\n", + " )\n", + " annotations.append(polygon_annotation)\n", + "\n", + " return annotations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Creating our labels\n", + "Now that we have defined our functions to create our Labelbox annotations we can run each result from our YOLOv8 prediction list with our global keys to create our labels. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# label list that will be populated\n", + "labels = []\n", + "\n", + "for i, global_key in enumerate(global_keys):\n", + " annotations = []\n", + "\n", + " # run result through each function and adding them to our annotation list\n", + " annotations += get_yolo_bbox_annotation_predictions(results[i], model,\n", + " class_mapping)\n", + " annotations += get_yolo_polygon_annotation_predictions(\n", + " results[i], model, class_mapping)\n", + " annotations += get_yolo_segment_annotation_predictions(\n", + " results[i], model, class_mapping)\n", + "\n", + " labels.append(\n", + " lb_types.Label(data={\"global_key\": global_key},\n", + " annotations=annotations))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Import annotations to Labelbox\n", + "We have now created our labels and can import them towards our project. For more information on importing annotations visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Option A: Upload to a labeling project as pre-labels (MAL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# upload MAL labels for this data row in project\n", + "upload_job = lb.MALPredictionImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"mal_job\" + str(uuid.uuid4()),\n", + " predictions=labels,\n", + ")\n", + "upload_job.wait_until_done()\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Option B: Upload to a labeling project using ground truth" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# upload label for this data row in project\n", + "upload_job = lb.LabelImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"label_import_job\" + str(uuid.uuid4()),\n", + " labels=labels,\n", + ")\n", + "upload_job.wait_until_done\n", + "\n", + "print(\"Errors:\", upload_job.errors)\n", + "print(\"Status of uploads: \", upload_job.statuses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Clean up\n", + "Uncomment and run the cell below to optionally delete Labelbox objects created" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# batch.delete()\n", + "# project.delete()\n", + "# dataset.delete()\\" + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 031a24526e8ea981ba8eddd049b6294263bb2167 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:42:22 -0500 Subject: [PATCH 07/23] modified yolo notebook slightly --- .../yolo/import_yolo_annotations.ipynb | 54 +++++++++---------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 65590b9e9..39bf86f89 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -66,12 +66,12 @@ ], "source": [ "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" + "%pip install -q --upgrade ultralytics==8.0.20" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -81,7 +81,7 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[10], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtypes\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb_types\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01multralytics\u001b[39;00m\n", + "Cell \u001b[0;32mIn[13], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtypes\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb_types\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01multralytics\u001b[39;00m\n", "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/__init__.py:5\u001b[0m\n\u001b[1;32m 1\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabelbox\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m __version__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m3.72.2\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mclient\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Client\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mproject\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Project\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodel\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Model\n", "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/client.py:20\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexceptions\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m __version__ \u001b[38;5;28;01mas\u001b[39;00m SDK_VERSION\n\u001b[0;32m---> 20\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m utils\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01madv_client\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AdvClient\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01morm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m query\n", "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/utils.py:6\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtz\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tzoffset\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparser\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m isoparse \u001b[38;5;28;01mas\u001b[39;00m dateutil_parse\n\u001b[0;32m----> 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m default_tzinfo\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01murllib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparse\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m urlparse\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pydantic_compat\n", @@ -129,9 +129,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'ultralytics' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43multralytics\u001b[49m\u001b[38;5;241m.\u001b[39mYOLO(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myolov8n-seg.pt\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'ultralytics' is not defined" + ] + } + ], "source": [ "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" ] @@ -243,7 +255,7 @@ "metadata": {}, "source": [ "### Export our data rows and getting our predictions\n", - "Now that we have imported our image data row and set up our project and ontology we can now get our predictions. In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." + "In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." ] }, { @@ -285,29 +297,12 @@ " export_task.get_buffered_stream().start(stream_handler=export_callback)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Make YOLOv8 predictions\n", - "Below we are passing our list of image URLs to our YOLOv8 model. If your `row_data` URL are behind a private cloud bucket integration you will need to either download your images locally or give your IDE permission to access your images." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "results: list[Results] = model(url_list)" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", - "Now that you have finished your initial set up and have gotten your predictions from YOLOv8 we can import our annotations towards are project. We will be doing the following in this step:\n", + "Now that you have finished your initial set up we create our predictions from YOLOv8 and import our annotations towards are project. We will be doing the following in this step:\n", "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" @@ -483,7 +478,7 @@ "metadata": {}, "source": [ "#### Creating our labels\n", - "Now that we have defined our functions to create our Labelbox annotations we can run each result from our YOLOv8 prediction list with our global keys to create our labels. " + "Now that we have defined our functions to create our Labelbox annotations we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " ] }, { @@ -498,13 +493,16 @@ "for i, global_key in enumerate(global_keys):\n", " annotations = []\n", "\n", + " # make YOLOv8 predictions\n", + " result = model.predict(url_list[i])\n", + " \n", " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(results[i], model,\n", + " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", " class_mapping)\n", " annotations += get_yolo_polygon_annotation_predictions(\n", - " results[i], model, class_mapping)\n", + " result, model, class_mapping)\n", " annotations += get_yolo_segment_annotation_predictions(\n", - " results[i], model, class_mapping)\n", + " result, model, class_mapping)\n", "\n", " labels.append(\n", " lb_types.Label(data={\"global_key\": global_key},\n", From 9dc0272e1e4512e8d9fb22169fe0f384941eb142 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 13 Jun 2024 16:44:06 +0000 Subject: [PATCH 08/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 509 ++++-------------- 1 file changed, 104 insertions(+), 405 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 39bf86f89..17e8006e7 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -35,270 +37,144 @@ "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics\n", "* Importing the annotations generated\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ] + ], + "cell_type": "markdown" }, { + "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics==8.0.20", "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3 install --upgrade pip\u001b[0m\n", - "Note: you may need to restart the kernel to use updated packages.\n", - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3 install --upgrade pip\u001b[0m\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics==8.0.20" - ] + "outputs": [], + "execution_count": null }, { + "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results\n\nimport uuid\nimport io", "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'dateutil.utils'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[13], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtypes\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mlb_types\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01multralytics\u001b[39;00m\n", - "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/__init__.py:5\u001b[0m\n\u001b[1;32m 1\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabelbox\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m __version__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m3.72.2\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mclient\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Client\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mproject\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Project\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodel\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Model\n", - "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/client.py:20\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexceptions\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m __version__ \u001b[38;5;28;01mas\u001b[39;00m SDK_VERSION\n\u001b[0;32m---> 20\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m utils\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01madv_client\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AdvClient\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01morm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m query\n", - "File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/labelbox/utils.py:6\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtz\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tzoffset\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparser\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m isoparse \u001b[38;5;28;01mas\u001b[39;00m dateutil_parse\n\u001b[0;32m----> 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdateutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m default_tzinfo\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01murllib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mparse\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m urlparse\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlabelbox\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pydantic_compat\n", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'dateutil.utils'" - ] - } - ], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "\n", - "import ultralytics\n", - "from ultralytics.engine.model import Model\n", - "from ultralytics.engine.results import Results\n", - "\n", - "import uuid\n", - "import io" - ] + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up a YOLOv8 model\n", "Below we will be initializing our model to be used for on our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " - ] + ], + "cell_type": "markdown" }, { + "metadata": {}, + "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'ultralytics' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43multralytics\u001b[49m\u001b[38;5;241m.\u001b[39mYOLO(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myolov8n-seg.pt\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "\u001b[0;31mNameError\u001b[0m: name 'ultralytics' is not defined" - ] - } - ], - "source": [ - "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" - ] + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "global_key = str(uuid.uuid4())\n", - "\n", - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " global_key,\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n", "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8.. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8.\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an ontology" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", + "cell_type": "code", "outputs": [], - "source": [ - "ontology_builder = lb.OntologyBuilder(tools=[\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle\"),\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person\"),\n", - "])\n", - "\n", - "ontology = client.create_ontology(\n", - " name=\"YOLOv8 Demo Ontology\",\n", - " normalized=ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image,\n", - ")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and set up a project" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", + "cell_type": "code", "outputs": [], - "source": [ - "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", - "\n", - "project.setup_editor(ontology)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Export our data rows and getting our predictions\n", "In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", + "cell_type": "code", "outputs": [], - "source": [ - "export_task = project.export()\n", - "export_task.wait_till_done()\n", - "\n", - "# prediction list we will be populating\n", - "url_list = []\n", - "global_keys = []\n", - "\n", - "\n", - "# callback that is ran on each data row\n", - "def export_callback(output: lb.BufferedJsonConverterOutput):\n", - "\n", - " data_row = output.json\n", - "\n", - " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", - "\n", - " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", - "\n", - "\n", - "# check if export has errors\n", - "if export_task.has_errors():\n", - " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", - "\n", - "if export_task.has_result():\n", - " export_task.get_buffered_stream().start(stream_handler=export_callback)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", @@ -306,308 +182,131 @@ "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Defining our import functions\n", "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "class_mapping = {\"person\": \"Person\", \"bus\": \"Vehicle\", \"truck\": \"Vehicle\"}", + "cell_type": "code", "outputs": [], - "source": [ - "class_mapping = {\"person\": \"Person\", \"bus\": \"Vehicle\", \"truck\": \"Vehicle\"}" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Bounding box" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_bbox_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_bbox_annotation_predictions(\n", - " yolo_result: Results, model: Model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", - " \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for bbox in yolo_result.boxes:\n", - " class_name = model.names[int(bbox.cls)]\n", - "\n", - " # ignore bboxes that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get bbox coordinates\n", - " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", - "\n", - " bbox_source = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=start_x, y=start_y),\n", - " end=lb_types.Point(x=end_x, y=end_y),\n", - " ),\n", - " )\n", - "\n", - " annotations.append(bbox_source)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_segment_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_segment_annotation_predictions(\n", - " yolo_result: Results, model: Model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", - " \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for i, mask in enumerate(yolo_result.masks.data):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore segment masks that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get binary numpy array to byte array. You must resize mask to match image.\n", - " mask = (mask.numpy() * 255).astype(\"uint8\")\n", - " img = Image.fromarray(mask, \"L\")\n", - " img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", - " img_byte_arr = io.BytesIO()\n", - " img.save(img_byte_arr, format=\"PNG\")\n", - " encoded_image_bytes = img_byte_arr.getvalue()\n", - "\n", - " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", - " mask_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", - " )\n", - " annotations.append(mask_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_polygon_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_polygon_annotation_predictions(\n", - " yolo_result: Results, model: Model,\n", - " ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", - " \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n", - "\n", - " Args:\n", - " yolo_result (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for i, coordinates in enumerate(yolo_result.masks.xy):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore polygons that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Polygon(points=[\n", - " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", - " for coordinate in coordinates\n", - " ]),\n", - " )\n", - " annotations.append(polygon_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our labels\n", "Now that we have defined our functions to create our Labelbox annotations we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", + "cell_type": "code", "outputs": [], - "source": [ - "# label list that will be populated\n", - "labels = []\n", - "\n", - "for i, global_key in enumerate(global_keys):\n", - " annotations = []\n", - "\n", - " # make YOLOv8 predictions\n", - " result = model.predict(url_list[i])\n", - " \n", - " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", - " class_mapping)\n", - " annotations += get_yolo_polygon_annotation_predictions(\n", - " result, model, class_mapping)\n", - " annotations += get_yolo_segment_annotation_predictions(\n", - " result, model, class_mapping)\n", - "\n", - " labels.append(\n", - " lb_types.Label(data={\"global_key\": global_key},\n", - " annotations=annotations))" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Import annotations to Labelbox\n", "We have now created our labels and can import them towards our project. For more information on importing annotations visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload to a labeling project as pre-labels (MAL)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload MAL labels for this data row in project\nupload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\nupload_job.wait_until_done()\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "# upload MAL labels for this data row in project\n", - "upload_job = lb.MALPredictionImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"mal_job\" + str(uuid.uuid4()),\n", - " predictions=labels,\n", - ")\n", - "upload_job.wait_until_done()\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option B: Upload to a labeling project using ground truth" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload label for this data row in project\nupload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\nupload_job.wait_until_done\n\nprint(\"Errors:\", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", + "cell_type": "code", "outputs": [], - "source": [ - "# upload label for this data row in project\n", - "upload_job = lb.LabelImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"label_import_job\" + str(uuid.uuid4()),\n", - " labels=labels,\n", - ")\n", - "upload_job.wait_until_done\n", - "\n", - "print(\"Errors:\", upload_job.errors)\n", - "print(\"Status of uploads: \", upload_job.statuses)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Clean up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# batch.delete()\n# project.delete()\n# dataset.delete()\\", + "cell_type": "code", "outputs": [], - "source": [ - "# batch.delete()\n", - "# project.delete()\n", - "# dataset.delete()\\" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" + "execution_count": null } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From 48976d08a31809993f07711e7d5e837939f37570 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 12:50:49 -0500 Subject: [PATCH 09/23] redit mapping --- .../yolo/import_yolo_annotations.ipynb | 454 ++++++++++++++---- 1 file changed, 351 insertions(+), 103 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 17e8006e7..1641d3cbc 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,18 +1,16 @@ { - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, "cells": [ { + "cell_type": "markdown", "metadata": {}, "source": [ - "", - " ", + "\n", + " \n", "\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -24,10 +22,10 @@ "\n", "" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -37,144 +35,227 @@ "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics\n", "* Importing the annotations generated\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics==8.0.20", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics==8.0.20" + ] }, { - "metadata": {}, - "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom ultralytics.engine.model import Model\nfrom ultralytics.engine.results import Results\n\nimport uuid\nimport io", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "\n", + "import ultralytics\n", + "from PIL import Image\n", + "\n", + "import uuid\n", + "import io" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "API_KEY = None\n", + "client = lb.Client(api_key=API_KEY)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set up a YOLOv8 model\n", "Below we will be initializing our model to be used for on our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "global_key = str(uuid.uuid4())\n", + "\n", + "# create data row\n", + "data_row = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " global_key,\n", + " \"media_type\":\n", + " \"IMAGE\",\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n", - "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8.. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8.\n" - ], - "cell_type": "markdown" + "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an ontology" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "ontology_builder = lb.OntologyBuilder(tools=[\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", + "])\n", + "\n", + "ontology = client.create_ontology(\n", + " name=\"YOLOv8 Demo Ontology\",\n", + " normalized=ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image,\n", + ")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and set up a project" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", + "\n", + "project.setup_editor(ontology)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Export our data rows and getting our predictions\n", "In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "export_task = project.export()\n", + "export_task.wait_till_done()\n", + "\n", + "# prediction list we will be populating\n", + "url_list = []\n", + "global_keys = []\n", + "\n", + "\n", + "# callback that is ran on each data row\n", + "def export_callback(output: lb.BufferedJsonConverterOutput):\n", + "\n", + " data_row = output.json\n", + "\n", + " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", + "\n", + " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", + "\n", + "\n", + "# check if export has errors\n", + "if export_task.has_errors():\n", + " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", + "\n", + "if export_task.has_result():\n", + " export_task.get_buffered_stream().start(stream_handler=export_callback)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", @@ -182,131 +263,298 @@ "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Defining our import functions\n", - "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`." - ], - "cell_type": "markdown" + "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." + ] }, { - "metadata": {}, - "source": "class_mapping = {\"person\": \"Person\", \"bus\": \"Vehicle\", \"truck\": \"Vehicle\"}", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "bbox_class_mapping = {\"person\": \"Person_bbox\", \"bus\": \"Vehicle_bbox\", \"truck\": \"Vehicle_bbox\"}\n", + "mask_class_mapping = {\"person\": \"Person_mask\", \"bus\": \"Vehicle_mask\", \"truck\": \"Vehicle_mask\"}\n", + "polygon_class_mapping = {\"person\": \"Person_polygon\", \"bus\": \"Vehicle_polygon\", \"truck\": \"Vehicle_polygon\"}" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Bounding box" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_bbox_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_bbox_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", + " \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for bbox in yolo_result.boxes:\n", + " class_name = model.names[int(bbox.cls)]\n", + "\n", + " # ignore bboxes that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get bbox coordinates\n", + " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", + "\n", + " bbox_source = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Rectangle(\n", + " start=lb_types.Point(x=start_x, y=start_y),\n", + " end=lb_types.Point(x=end_x, y=end_y),\n", + " ),\n", + " )\n", + "\n", + " annotations.append(bbox_source)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_segment_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_segment_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", + " \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for i, mask in enumerate(yolo_result.masks.data):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore segment masks that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get binary numpy array to byte array. You must resize mask to match image.\n", + " mask = (mask.numpy() * 255).astype(\"uint8\")\n", + " img = Image.fromarray(mask, \"L\")\n", + " img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", + " img_byte_arr = io.BytesIO()\n", + " img.save(img_byte_arr, format=\"PNG\")\n", + " encoded_image_bytes = img_byte_arr.getvalue()\n", + "\n", + " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", + " mask_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", + " )\n", + " annotations.append(mask_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_polygon_annotation_predictions(\n yolo_result: Results, model: Model,\n ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_polygon_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", + " \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n", + "\n", + " Args:\n", + " yolo_result (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + " for yolo_result in yolo_results:\n", + " for i, coordinates in enumerate(yolo_result.masks.xy):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore polygons that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " polygon_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Polygon(points=[\n", + " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", + " for coordinate in coordinates\n", + " ]),\n", + " )\n", + " annotations.append(polygon_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our labels\n", "Now that we have defined our functions to create our Labelbox annotations we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# label list that will be populated\n", + "labels = []\n", + "\n", + "for i, global_key in enumerate(global_keys):\n", + " annotations = []\n", + "\n", + " # make YOLOv8 predictions\n", + " result = model.predict(url_list[i])\n", + "\n", + " # run result through each function and adding them to our annotation list\n", + " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", + " bbox_class_mapping)\n", + " annotations += get_yolo_polygon_annotation_predictions(\n", + " result, model, polygon_class_mapping)\n", + " annotations += get_yolo_segment_annotation_predictions(\n", + " result, model, mask_class_mapping)\n", + "\n", + " labels.append(\n", + " lb_types.Label(data={\"global_key\": global_key},\n", + " annotations=annotations))" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Import annotations to Labelbox\n", "We have now created our labels and can import them towards our project. For more information on importing annotations visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload to a labeling project as pre-labels (MAL)" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# upload MAL labels for this data row in project\nupload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\nupload_job.wait_until_done()\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# upload MAL labels for this data row in project\n", + "upload_job = lb.MALPredictionImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"mal_job\" + str(uuid.uuid4()),\n", + " predictions=labels,\n", + ")\n", + "upload_job.wait_until_done()\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Option B: Upload to a labeling project using ground truth" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# upload label for this data row in project\nupload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\nupload_job.wait_until_done\n\nprint(\"Errors:\", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# upload label for this data row in project\n", + "upload_job = lb.LabelImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"label_import_job\" + str(uuid.uuid4()),\n", + " labels=labels,\n", + ")\n", + "upload_job.wait_until_done\n", + "\n", + "print(\"Errors:\", upload_job.errors)\n", + "print(\"Status of uploads: \", upload_job.statuses)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Clean up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# batch.delete()\n# project.delete()\n# dataset.delete()\\", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# batch.delete()\n", + "# project.delete()\n", + "# dataset.delete()\\" + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 9f6ccecd607886784345e254004988d740ba1854 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 13:38:16 -0500 Subject: [PATCH 10/23] finished notebook --- examples/integrations/yolo/import_yolo_annotations.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 1641d3cbc..eaf548673 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -51,7 +51,7 @@ "outputs": [], "source": [ "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics==8.0.20" + "%pip install -q --upgrade ultralytics" ] }, { @@ -546,7 +546,7 @@ "source": [ "# batch.delete()\n", "# project.delete()\n", - "# dataset.delete()\\" + "# dataset.delete()" ] } ], From ebf9025fb0a0daa6bdd5e3a9e72868a46beeddc0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 13 Jun 2024 18:39:12 +0000 Subject: [PATCH 11/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 450 ++++-------------- 1 file changed, 101 insertions(+), 349 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index eaf548673..af5166ed5 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -35,227 +37,144 @@ "* Importing our data rows and attaching our ontology to a project\n", "* Running our images through Ultralytics\n", "* Importing the annotations generated\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", "outputs": [], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", + "cell_type": "code", "outputs": [], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "\n", - "import ultralytics\n", - "from PIL import Image\n", - "\n", - "import uuid\n", - "import io" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up a YOLOv8 model\n", "Below we will be initializing our model to be used for on our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", + "cell_type": "code", "outputs": [], - "source": [ - "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "global_key = str(uuid.uuid4())\n", - "\n", - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " global_key,\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n", "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an ontology" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", + "cell_type": "code", "outputs": [], - "source": [ - "ontology_builder = lb.OntologyBuilder(tools=[\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", - "])\n", - "\n", - "ontology = client.create_ontology(\n", - " name=\"YOLOv8 Demo Ontology\",\n", - " normalized=ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image,\n", - ")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and set up a project" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", + "cell_type": "code", "outputs": [], - "source": [ - "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", - "\n", - "project.setup_editor(ontology)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Export our data rows and getting our predictions\n", "In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", + "cell_type": "code", "outputs": [], - "source": [ - "export_task = project.export()\n", - "export_task.wait_till_done()\n", - "\n", - "# prediction list we will be populating\n", - "url_list = []\n", - "global_keys = []\n", - "\n", - "\n", - "# callback that is ran on each data row\n", - "def export_callback(output: lb.BufferedJsonConverterOutput):\n", - "\n", - " data_row = output.json\n", - "\n", - " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", - "\n", - " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", - "\n", - "\n", - "# check if export has errors\n", - "if export_task.has_errors():\n", - " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", - "\n", - "if export_task.has_result():\n", - " export_task.get_buffered_stream().start(stream_handler=export_callback)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", @@ -263,298 +182,131 @@ "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Defining our import functions\n", "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", + "cell_type": "code", "outputs": [], - "source": [ - "bbox_class_mapping = {\"person\": \"Person_bbox\", \"bus\": \"Vehicle_bbox\", \"truck\": \"Vehicle_bbox\"}\n", - "mask_class_mapping = {\"person\": \"Person_mask\", \"bus\": \"Vehicle_mask\", \"truck\": \"Vehicle_mask\"}\n", - "polygon_class_mapping = {\"person\": \"Person_polygon\", \"bus\": \"Vehicle_polygon\", \"truck\": \"Vehicle_polygon\"}" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Bounding box" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_bbox_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", - " \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for bbox in yolo_result.boxes:\n", - " class_name = model.names[int(bbox.cls)]\n", - "\n", - " # ignore bboxes that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get bbox coordinates\n", - " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", - "\n", - " bbox_source = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=start_x, y=start_y),\n", - " end=lb_types.Point(x=end_x, y=end_y),\n", - " ),\n", - " )\n", - "\n", - " annotations.append(bbox_source)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_segment_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", - " \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for i, mask in enumerate(yolo_result.masks.data):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore segment masks that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get binary numpy array to byte array. You must resize mask to match image.\n", - " mask = (mask.numpy() * 255).astype(\"uint8\")\n", - " img = Image.fromarray(mask, \"L\")\n", - " img = img.resize((yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", - " img_byte_arr = io.BytesIO()\n", - " img.save(img_byte_arr, format=\"PNG\")\n", - " encoded_image_bytes = img_byte_arr.getvalue()\n", - "\n", - " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", - " mask_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", - " )\n", - " annotations.append(mask_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_polygon_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", - " \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n", - "\n", - " Args:\n", - " yolo_result (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - " for yolo_result in yolo_results:\n", - " for i, coordinates in enumerate(yolo_result.masks.xy):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore polygons that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Polygon(points=[\n", - " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", - " for coordinate in coordinates\n", - " ]),\n", - " )\n", - " annotations.append(polygon_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our labels\n", "Now that we have defined our functions to create our Labelbox annotations we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", + "cell_type": "code", "outputs": [], - "source": [ - "# label list that will be populated\n", - "labels = []\n", - "\n", - "for i, global_key in enumerate(global_keys):\n", - " annotations = []\n", - "\n", - " # make YOLOv8 predictions\n", - " result = model.predict(url_list[i])\n", - "\n", - " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", - " bbox_class_mapping)\n", - " annotations += get_yolo_polygon_annotation_predictions(\n", - " result, model, polygon_class_mapping)\n", - " annotations += get_yolo_segment_annotation_predictions(\n", - " result, model, mask_class_mapping)\n", - "\n", - " labels.append(\n", - " lb_types.Label(data={\"global_key\": global_key},\n", - " annotations=annotations))" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Import annotations to Labelbox\n", "We have now created our labels and can import them towards our project. For more information on importing annotations visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload to a labeling project as pre-labels (MAL)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload MAL labels for this data row in project\nupload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\nupload_job.wait_until_done()\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "# upload MAL labels for this data row in project\n", - "upload_job = lb.MALPredictionImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"mal_job\" + str(uuid.uuid4()),\n", - " predictions=labels,\n", - ")\n", - "upload_job.wait_until_done()\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option B: Upload to a labeling project using ground truth" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload label for this data row in project\nupload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\nupload_job.wait_until_done\n\nprint(\"Errors:\", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", + "cell_type": "code", "outputs": [], - "source": [ - "# upload label for this data row in project\n", - "upload_job = lb.LabelImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"label_import_job\" + str(uuid.uuid4()),\n", - " labels=labels,\n", - ")\n", - "upload_job.wait_until_done\n", - "\n", - "print(\"Errors:\", upload_job.errors)\n", - "print(\"Status of uploads: \", upload_job.statuses)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Clean up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", + "cell_type": "code", "outputs": [], - "source": [ - "# batch.delete()\n", - "# project.delete()\n", - "# dataset.delete()" - ] + "execution_count": null } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From 44d0d868cfee3902e2780da601bc5841c1019cf6 Mon Sep 17 00:00:00 2001 From: Gabe <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 13:48:16 -0500 Subject: [PATCH 12/23] Update import_yolo_annotations.ipynb --- .../yolo/import_yolo_annotations.ipynb | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index af5166ed5..af403d62c 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -31,12 +31,12 @@ "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", - "This notebook will provides examples of setting up a Project with annotations generated with YOLOv8. We will be using the [Ultralytics](https://docs.ultralytics.com/) library to generate our annotations. In this guide we will be:\n", - "* Importing a demo image data rows that will be labeled\n", - "* Setting up our ontology that matches our YOLOv8 annotations\n", - "* Importing our data rows and attaching our ontology to a project\n", - "* Running our images through Ultralytics\n", - "* Importing the annotations generated\n" + "This notebook will provide examples of setting up a Project with annotations generated with YOLOv8. We will use the [Ultralytics](https://docs.ultralytics.com/) library to generate annotations. In this guide, we will be:\n", + "1. Importing a demo image data rows that will be labeled\n", + "2. Setting up our ontology that matches our YOLOv8 annotations\n", + "3. Importing our data rows and attaching our ontology to a project\n", + "4. Running our images through Ultralytics\n", + "5. Importing the annotations generated\n" ], "cell_type": "markdown" }, @@ -65,7 +65,7 @@ "metadata": {}, "source": [ "## API key and client\n", - "Provide a valid API key below in order to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." + "Provide a valid API key below to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." ], "cell_type": "markdown" }, @@ -80,7 +80,7 @@ "metadata": {}, "source": [ "## Set up a YOLOv8 model\n", - "Below we will be initializing our model to be used for on our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " + "Below, we will initialize our model for our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " ], "cell_type": "markdown" }, @@ -96,7 +96,7 @@ "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", - "The first few steps of this guide will demonstrating a basic workflow of creating data rows and setting up a project. For a quick complete overview of this process visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." + "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." ], "cell_type": "markdown" }, @@ -105,7 +105,7 @@ "source": [ "### Importing an image data row\n", "\n", - "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide we will go into more detail on the exact annotations." + "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide, we will go into more detail on the exact annotations." ], "cell_type": "markdown" }, @@ -120,7 +120,7 @@ "metadata": {}, "source": [ "### Setting up an ontology and a project\n", - "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that your are wanting to derive from YOLOv8. We will be introduce and explain a class mapping later in this guide so feel free to name your ontology features anything you want. In our example, we will be including a combination of bounding boxes, segment mask, and polygon tools to demonstrate converting each of those type of annotations from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" + "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that you want to derive from YOLOv8. We will introduce and explain class mapping later in this guide, so feel free to name your ontology features anything you want. In our example, we will include a combination of bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" ], "cell_type": "markdown" }, @@ -155,8 +155,8 @@ { "metadata": {}, "source": [ - "### Export our data rows and getting our predictions\n", - "In the step below, we are exporting our data row from our project and then adding the `row_data` and `global_key` to a list to then be used to make our predictions." + "### Export our data rows and get our predictions\n", + "In the step below, we export our data row from our project and then add the `row_data` and `global_key` to a list to make our predictions." ], "cell_type": "markdown" }, @@ -178,7 +178,7 @@ "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", - "Now that you have finished your initial set up we create our predictions from YOLOv8 and import our annotations towards are project. We will be doing the following in this step:\n", + "Now that you have finished your initial set up we create our predictions from YOLOv8 and import our annotations towards our project. We will be doing the following in this step:\n", "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" @@ -189,7 +189,7 @@ "metadata": {}, "source": [ "#### Defining our import functions\n", - "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons and segment masks which matches our ontology. Below our the functions used for each type. These functions follow the same similar style, essentially, navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. The reason we have this mapping is to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." + "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons, and segment masks that match our ontology. Below are the functions used for each type. These functions follow the same style, essentially navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. We have this mapping to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." ], "cell_type": "markdown" }, @@ -246,7 +246,7 @@ "metadata": {}, "source": [ "#### Creating our labels\n", - "Now that we have defined our functions to create our Labelbox annotations we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " + "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " ], "cell_type": "markdown" }, @@ -261,7 +261,7 @@ "metadata": {}, "source": [ "#### Import annotations to Labelbox\n", - "We have now created our labels and can import them towards our project. For more information on importing annotations visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." + "We have created our labels and can import them to our project. For more information on importing annotations, visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." ], "cell_type": "markdown" }, @@ -309,4 +309,4 @@ "execution_count": null } ] -} \ No newline at end of file +} From 9ca239a0192fa2d9c2740808888301909f7f2d8b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 13 Jun 2024 18:49:07 +0000 Subject: [PATCH 13/23] :art: Cleaned --- examples/integrations/yolo/import_yolo_annotations.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index af403d62c..aefe5f6c2 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -309,4 +309,4 @@ "execution_count": null } ] -} +} \ No newline at end of file From 404bd220af99379e14fa6ccec7bc04236d8c30c0 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 14:56:48 -0500 Subject: [PATCH 14/23] small typos --- examples/integrations/yolo/import_yolo_annotations.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index eaf548673..d2622de11 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -259,7 +259,7 @@ "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", - "Now that you have finished your initial set up we create our predictions from YOLOv8 and import our annotations towards are project. We will be doing the following in this step:\n", + "Now that you have finished your initial set up we can create our predictions from YOLOv8 and import our annotations towards are project. We will be doing the following in this step:\n", "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" @@ -500,8 +500,8 @@ ")\n", "upload_job.wait_until_done()\n", "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" + "print(\"Errors \", upload_job.errors)\n", + "print(\"Status of uploads: \", upload_job.statuses)" ] }, { From 6c1f867145ebb03f705f15af20ab2f6192f789e6 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 15:28:04 -0500 Subject: [PATCH 15/23] typos --- .../integrations/yolo/import_yolo_annotations.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index ffa920a90..2292e0487 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -120,7 +120,7 @@ "source": [ "### Importing an image data row\n", "\n", - "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. Which has a lot of objects that can be picked up by YOLOv8. Later in this guide, we will go into more detail on the exact annotations." + "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. This image has a lot of objects that can be picked up by YOLOv8. Later in this guide, we will go into more detail on the exact annotations." ] }, { @@ -154,7 +154,7 @@ "metadata": {}, "source": [ "### Setting up an ontology and a project\n", - "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that you want to derive from YOLOv8. We will introduce and explain class mapping later in this guide, so feel free to name your ontology features anything you want. In our example, we will include a combination of bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" + "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that you want to derive from YOLOv8. We will introduce and explain class mappings later in this guide, so feel free to name your ontology features anything you want. In our example, we will include a combination of bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" ] }, { @@ -259,7 +259,7 @@ "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", - "Now that you have finished your initial set up we create our predictions from YOLOv8 and import our annotations towards our project. We will be doing the following in this step:\n", + "Now that you have finished your initial set up we can create our predictions from YOLOv8 and import our annotations towards our project. We will be doing the following in this step:\n", "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" @@ -351,7 +351,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Segment Mask" + "##### Segment mask" ] }, { @@ -512,8 +512,8 @@ ")\n", "upload_job.wait_until_done()\n", "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" + "print(\"Errors: \", upload_job.errors)\n", + "print(\"Status of uploads: \", upload_job.statuses)" ] }, { From 7cf5dfeef5b77c6dcfb5a9ee9f08aac466aaa372 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 13 Jun 2024 20:29:01 +0000 Subject: [PATCH 16/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 466 ++++-------------- 1 file changed, 103 insertions(+), 363 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 2292e0487..a39c45bc0 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 annotations\n", @@ -35,227 +37,144 @@ "3. Importing our data rows and attaching our ontology to a project\n", "4. Running our images through Ultralytics\n", "5. Importing the annotations generated\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", "outputs": [], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", + "cell_type": "code", "outputs": [], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "\n", - "import ultralytics\n", - "from PIL import Image\n", - "\n", - "import uuid\n", - "import io" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API key and client\n", "Provide a valid API key below to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set up a YOLOv8 model\n", "Below, we will initialize our model for our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", + "cell_type": "code", "outputs": [], - "source": [ - "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Importing an image data row\n", "\n", "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. This image has a lot of objects that can be picked up by YOLOv8. Later in this guide, we will go into more detail on the exact annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "global_key = str(uuid.uuid4())\n", - "\n", - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " global_key,\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Setting up an ontology and a project\n", "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that you want to derive from YOLOv8. We will introduce and explain class mappings later in this guide, so feel free to name your ontology features anything you want. In our example, we will include a combination of bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an ontology" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", + "cell_type": "code", "outputs": [], - "source": [ - "ontology_builder = lb.OntologyBuilder(tools=[\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", - "])\n", - "\n", - "ontology = client.create_ontology(\n", - " name=\"YOLOv8 Demo Ontology\",\n", - " normalized=ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image,\n", - ")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and set up a project" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", + "cell_type": "code", "outputs": [], - "source": [ - "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", - "\n", - "project.setup_editor(ontology)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Export our data rows and get our predictions\n", "In the step below, we export our data row from our project and then add the `row_data` and `global_key` to a list to make our predictions." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", + "cell_type": "code", "outputs": [], - "source": [ - "export_task = project.export()\n", - "export_task.wait_till_done()\n", - "\n", - "# prediction list we will be populating\n", - "url_list = []\n", - "global_keys = []\n", - "\n", - "\n", - "# callback that is ran on each data row\n", - "def export_callback(output: lb.BufferedJsonConverterOutput):\n", - "\n", - " data_row = output.json\n", - "\n", - " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", - "\n", - " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", - "\n", - "\n", - "# check if export has errors\n", - "if export_task.has_errors():\n", - " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", - "\n", - "if export_task.has_result():\n", - " export_task.get_buffered_stream().start(stream_handler=export_callback)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 annotations to a project\n", @@ -263,310 +182,131 @@ "1. Defining our import functions\n", "2. Creating our labels\n", "3. Importing our labels as either ground truths or MAL labels (pre-labels)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Defining our import functions\n", "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons, and segment masks that match our ontology. Below are the functions used for each type. These functions follow the same style, essentially navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. We have this mapping to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", + "cell_type": "code", "outputs": [], - "source": [ - "bbox_class_mapping = {\n", - " \"person\": \"Person_bbox\",\n", - " \"bus\": \"Vehicle_bbox\",\n", - " \"truck\": \"Vehicle_bbox\",\n", - "}\n", - "mask_class_mapping = {\n", - " \"person\": \"Person_mask\",\n", - " \"bus\": \"Vehicle_mask\",\n", - " \"truck\": \"Vehicle_mask\",\n", - "}\n", - "polygon_class_mapping = {\n", - " \"person\": \"Person_polygon\",\n", - " \"bus\": \"Vehicle_polygon\",\n", - " \"truck\": \"Vehicle_polygon\",\n", - "}" - ] - }, - { - "cell_type": "markdown", + "execution_count": null + }, + { "metadata": {}, "source": [ "##### Bounding box" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_bbox_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", - " \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for bbox in yolo_result.boxes:\n", - " class_name = model.names[int(bbox.cls)]\n", - "\n", - " # ignore bboxes that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get bbox coordinates\n", - " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", - "\n", - " bbox_source = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=start_x, y=start_y),\n", - " end=lb_types.Point(x=end_x, y=end_y),\n", - " ),\n", - " )\n", - "\n", - " annotations.append(bbox_source)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment mask" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_segment_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", - " \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for i, mask in enumerate(yolo_result.masks.data):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore segment masks that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get binary numpy array to byte array. You must resize mask to match image.\n", - " mask = (mask.numpy() * 255).astype(\"uint8\")\n", - " img = Image.fromarray(mask, \"L\")\n", - " img = img.resize(\n", - " (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", - " img_byte_arr = io.BytesIO()\n", - " img.save(img_byte_arr, format=\"PNG\")\n", - " encoded_image_bytes = img_byte_arr.getvalue()\n", - "\n", - " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", - " mask_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", - " )\n", - " annotations.append(mask_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_polygon_annotation_predictions(\n", - " yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", - " \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n", - "\n", - " Args:\n", - " yolo_result (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - " for yolo_result in yolo_results:\n", - " for i, coordinates in enumerate(yolo_result.masks.xy):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore polygons that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Polygon(points=[\n", - " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", - " for coordinate in coordinates\n", - " ]),\n", - " )\n", - " annotations.append(polygon_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our labels\n", "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", + "cell_type": "code", "outputs": [], - "source": [ - "# label list that will be populated\n", - "labels = []\n", - "\n", - "for i, global_key in enumerate(global_keys):\n", - " annotations = []\n", - "\n", - " # make YOLOv8 predictions\n", - " result = model.predict(url_list[i])\n", - "\n", - " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", - " bbox_class_mapping)\n", - " annotations += get_yolo_polygon_annotation_predictions(\n", - " result, model, polygon_class_mapping)\n", - " annotations += get_yolo_segment_annotation_predictions(\n", - " result, model, mask_class_mapping)\n", - "\n", - " labels.append(\n", - " lb_types.Label(data={\"global_key\": global_key},\n", - " annotations=annotations))" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Import annotations to Labelbox\n", "We have created our labels and can import them to our project. For more information on importing annotations, visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload to a labeling project as pre-labels (MAL)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload MAL labels for this data row in project\nupload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\nupload_job.wait_until_done()\n\nprint(\"Errors: \", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", + "cell_type": "code", "outputs": [], - "source": [ - "# upload MAL labels for this data row in project\n", - "upload_job = lb.MALPredictionImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"mal_job\" + str(uuid.uuid4()),\n", - " predictions=labels,\n", - ")\n", - "upload_job.wait_until_done()\n", - "\n", - "print(\"Errors: \", upload_job.errors)\n", - "print(\"Status of uploads: \", upload_job.statuses)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option B: Upload to a labeling project using ground truth" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# upload label for this data row in project\nupload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\nupload_job.wait_until_done\n\nprint(\"Errors:\", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", + "cell_type": "code", "outputs": [], - "source": [ - "# upload label for this data row in project\n", - "upload_job = lb.LabelImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"label_import_job\" + str(uuid.uuid4()),\n", - " labels=labels,\n", - ")\n", - "upload_job.wait_until_done\n", - "\n", - "print(\"Errors:\", upload_job.errors)\n", - "print(\"Status of uploads: \", upload_job.statuses)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Clean up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", + "cell_type": "code", "outputs": [], - "source": [ - "# batch.delete()\n", - "# project.delete()\n", - "# dataset.delete()" - ] + "execution_count": null } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From a20bf9c9f72aefcf9a4f479a9c19aa5446570dc5 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 20:38:27 -0500 Subject: [PATCH 17/23] review changes --- .../yolo/import_yolo_annotations.ipynb | 557 +++++++++++++----- 1 file changed, 416 insertions(+), 141 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index a39c45bc0..28f4de9cd 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,18 +1,16 @@ { - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, "cells": [ { + "cell_type": "markdown", "metadata": {}, "source": [ - "", - " ", + "\n", + " \n", "\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -24,289 +22,566 @@ "\n", "" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "# Import YOLOv8 annotations\n", - "This notebook will provide examples of setting up a Project with annotations generated with YOLOv8. We will use the [Ultralytics](https://docs.ultralytics.com/) library to generate annotations. In this guide, we will be:\n", - "1. Importing a demo image data rows that will be labeled\n", - "2. Setting up our ontology that matches our YOLOv8 annotations\n", - "3. Importing our data rows and attaching our ontology to a project\n", - "4. Running our images through Ultralytics\n", - "5. Importing the annotations generated\n" - ], - "cell_type": "markdown" + "# Import YOLOv8 Annotations\n", + "This notebook provides examples of setting up an Annotate Project using annotations generated by the [Ultralytics](https://docs.ultralytics.com/) library of YOLOv8. In this guide, we will show you how to:\n", + "\n", + "1. Import image data rows for labeling\n", + "\n", + "2. Set up an ontology that matches the YOLOv8 annotations\n", + "\n", + "3. Import data rows and attach the ontology to a project\n", + "\n", + "4. Process images using Ultralytics\n", + "\n", + "5. Import the annotations generated" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "## Set up" - ], - "cell_type": "markdown" + "## Set Up" + ] }, { - "metadata": {}, - "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics" + ] }, { - "metadata": {}, - "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "\n", + "import ultralytics\n", + "from PIL import Image\n", + "\n", + "import uuid\n", + "import io" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "## API key and client\n", - "Provide a valid API key below to properly connect to the Labelbox client. Please review [Create API key guide](https://docs.labelbox.com/reference/create-api-key) for more information." - ], - "cell_type": "markdown" + "## API Key and Client\n", + "Replace the value of `API_KEY` with a valid [API key](https://docs.labelbox.com/reference/create-api-key) to connect to the Labelbox client." + ] }, { - "metadata": {}, - "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "API_KEY = None\n", + "client = lb.Client(api_key=API_KEY)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "## Set up a YOLOv8 model\n", - "Below, we will initialize our model for our image data rows. We are using `yolov8n-seg.pt` since it supports segmentation masks. " - ], - "cell_type": "markdown" + "## Set Up a YOLOv8 model\n", + "Initialize our model for image data rows using `yolov8n-seg.pt`, which supports segmentation masks." + ] }, { - "metadata": {}, - "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", - "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, visit our [Quick start](https://docs.labelbox.com/reference/quick-start) guide." - ], - "cell_type": "markdown" + "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, see [Quick start](https://docs.labelbox.com/reference/quick-start)." + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "### Importing an image data row\n", - "\n", - "We will be using this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg) to be annotated with YOLOv8. This image has a lot of objects that can be picked up by YOLOv8. Later in this guide, we will go into more detail on the exact annotations." - ], - "cell_type": "markdown" + "### Import an Image Data Row\n", + "In this example, we use YOLOv8 to annotate this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg), which contains many objects that YOLOv8 can detect. Later in this guide, we will provide more details on the specific annotations." + ] }, { - "metadata": {}, - "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "global_key = str(uuid.uuid4())\n", + "\n", + "# create data row\n", + "data_row = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " global_key,\n", + " \"media_type\":\n", + " \"IMAGE\",\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "### Setting up an ontology and a project\n", - "You must create a matching ontology and project with the data rows you are trying to label. The ontology should include the annotations that you want to derive from YOLOv8. We will introduce and explain class mappings later in this guide, so feel free to name your ontology features anything you want. In our example, we will include a combination of bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. Labelbox does not support ontologies were the same feature name is present at the first level so each of our feature names need to be unique.\n" - ], - "cell_type": "markdown" + "### Set Up an Ontology and Project\n", + "\n", + "You need to create an ontology and project that match the data rows you are labeling. The ontology needs to include the annotations you want to derive from YOLOv8. Each feature name must be unique because Labelbox does not support ontologies with duplicate feature names at the first level.\n", + "\n", + "We will include bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. We will also explain class mapping later in this guide.\n" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "#### Create an ontology" - ], - "cell_type": "markdown" + "#### Create an Ontology" + ] }, { - "metadata": {}, - "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "ontology_builder = lb.OntologyBuilder(tools=[\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", + "])\n", + "\n", + "ontology = client.create_ontology(\n", + " name=\"YOLOv8 Demo Ontology\",\n", + " normalized=ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image,\n", + ")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "#### Create and set up a project" - ], - "cell_type": "markdown" + "#### Create and Set Up a Project" + ] }, { - "metadata": {}, - "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", + "\n", + "project.setup_editor(ontology)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "### Export our data rows and get our predictions\n", - "In the step below, we export our data row from our project and then add the `row_data` and `global_key` to a list to make our predictions." - ], - "cell_type": "markdown" + "### Export Data Rows and Get Predictions\n", + "\n", + "Now we can export the data row from our project. Then add the row_data and global_key to a list to make our predictions." + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "export_task = project.export()\n", + "export_task.wait_till_done()\n", + "\n", + "# prediction list we will be populating\n", + "url_list = []\n", + "global_keys = []\n", + "\n", + "\n", + "# callback that is ran on each data row\n", + "def export_callback(output: lb.BufferedJsonConverterOutput):\n", + "\n", + " data_row = output.json\n", + "\n", + " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", + "\n", + " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", + "\n", + "\n", + "# check if export has errors\n", + "if export_task.has_errors():\n", + " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", + "\n", + "if export_task.has_result():\n", + " export_task.get_buffered_stream().start(stream_handler=export_callback)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "### Import YOLOv8 annotations to a project\n", - "Now that you have finished your initial set up we can create our predictions from YOLOv8 and import our annotations towards our project. We will be doing the following in this step:\n", - "1. Defining our import functions\n", - "2. Creating our labels\n", - "3. Importing our labels as either ground truths or MAL labels (pre-labels)" - ], - "cell_type": "markdown" + "### Import YOLOv8 Annotations to a Project\n", + "\n", + "Now that you have finished your initial setup, we can create predictions using YOLOv8 and import the annotations into our project. In this step, we will:\n", + "\n", + "1. Define our import functions\n", + "\n", + "2. Create our labels\n", + "\n", + "3. Import our labels as either ground truths or MAL labels (pre-labels)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "#### Defining our import functions\n", - "YOLOv8 supports a wide range of annotations. This guide shows importing bounding boxes, polygons, and segment masks that match our ontology. Below are the functions used for each type. These functions follow the same style, essentially navigating through our result payload from YOLOv8 and converting it to the Labelbox annotation format. All of our functions support a class mapping which maps YOLOv8 annotation names to Labelbox feature names. We have this mapping to support having different names for Labelbox features compared to YOLOv8 annotation names. It also allows us to map common YOLOv8 names to the same Labelbox feature attached to our ontology. We will define this mapping first. In our case, we are mapping `bus` and `truck` to our Labelbox feature name `Vehicle` and `person` to our Labelbox feature name `Person`. We will create a mapping per tool type." - ], - "cell_type": "markdown" + "#### Define Import Functions\n", + "\n", + "YOLOv8 supports a wide range of annotations. In this guide, we only import bounding boxes, polygons, and segment masks that match the ontology we created earlier. The following functions handle each annotation type by navigating through the YOLOv8 result payload and converting it to the Labelbox annotation format.\n", + "\n", + "All these functions support class mapping, which aligns YOLOv8 annotation names with Labelbox feature names. This mapping allows for different names in Labelbox and YOLOv8 and enables common YOLOv8 names to correspond to the same Labelbox feature in our ontology. We will define this mapping first. In our example, we map bus and truck to the Labelbox feature name Vehicle and person to Person. We will create a mapping for each tool type." + ] }, { - "metadata": {}, - "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null - }, - { + "source": [ + "bbox_class_mapping = {\n", + " \"person\": \"Person_bbox\",\n", + " \"bus\": \"Vehicle_bbox\",\n", + " \"truck\": \"Vehicle_bbox\",\n", + "}\n", + "mask_class_mapping = {\n", + " \"person\": \"Person_mask\",\n", + " \"bus\": \"Vehicle_mask\",\n", + " \"truck\": \"Vehicle_mask\",\n", + "}\n", + "polygon_class_mapping = {\n", + " \"person\": \"Person_polygon\",\n", + " \"bus\": \"Vehicle_polygon\",\n", + " \"truck\": \"Vehicle_polygon\",\n", + "}" + ] + }, + { + "cell_type": "markdown", "metadata": {}, "source": [ - "##### Bounding box" - ], - "cell_type": "markdown" + "##### Bounding Box" + ] }, { - "metadata": {}, - "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_bbox_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", + " \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for bbox in yolo_result.boxes:\n", + " class_name = model.names[int(bbox.cls)]\n", + "\n", + " # ignore bboxes that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get bbox coordinates\n", + " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", + "\n", + " bbox_source = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Rectangle(\n", + " start=lb_types.Point(x=start_x, y=start_y),\n", + " end=lb_types.Point(x=end_x, y=end_y),\n", + " ),\n", + " )\n", + "\n", + " annotations.append(bbox_source)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "##### Segment mask" - ], - "cell_type": "markdown" + "##### Segment Mask" + ] }, { - "metadata": {}, - "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_segment_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", + " \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for i, mask in enumerate(yolo_result.masks.data):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore segment masks that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get binary numpy array to byte array. You must resize mask to match image.\n", + " mask = (mask.numpy() * 255).astype(\"uint8\")\n", + " img = Image.fromarray(mask, \"L\")\n", + " img = img.resize(\n", + " (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", + " img_byte_arr = io.BytesIO()\n", + " img.save(img_byte_arr, format=\"PNG\")\n", + " encoded_image_bytes = img_byte_arr.getvalue()\n", + "\n", + " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", + " mask_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", + " )\n", + " annotations.append(mask_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to labelbox polygon annotations format\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_polygon_annotation_predictions(\n", + " yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", + " \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n", + "\n", + " Args:\n", + " yolo_result (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + " for yolo_result in yolo_results:\n", + " for i, coordinates in enumerate(yolo_result.masks.xy):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore polygons that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " polygon_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Polygon(points=[\n", + " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", + " for coordinate in coordinates\n", + " ]),\n", + " )\n", + " annotations.append(polygon_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "#### Creating our labels\n", + "#### Creating our Labels\n", "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# label list that will be populated\n", + "labels = []\n", + "\n", + "for i, global_key in enumerate(global_keys):\n", + " annotations = []\n", + "\n", + " # make YOLOv8 predictions\n", + " result = model.predict(url_list[i])\n", + "\n", + " # run result through each function and adding them to our annotation list\n", + " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", + " bbox_class_mapping)\n", + " annotations += get_yolo_polygon_annotation_predictions(\n", + " result, model, polygon_class_mapping)\n", + " annotations += get_yolo_segment_annotation_predictions(\n", + " result, model, mask_class_mapping)\n", + "\n", + " labels.append(\n", + " lb_types.Label(data={\"global_key\": global_key},\n", + " annotations=annotations))" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "#### Import annotations to Labelbox\n", - "We have created our labels and can import them to our project. For more information on importing annotations, visit our [import image annotations](https://docs.labelbox.com/reference/import-image-annotations) guide." - ], - "cell_type": "markdown" + "#### Import Annotations to Labelbox\n", + "We have created our labels and can import them to our project. For more information on importing annotations, see [import image annotations](https://docs.labelbox.com/reference/import-image-annotations)." + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "##### Option A: Upload to a labeling project as pre-labels (MAL)" - ], - "cell_type": "markdown" + "##### Option A: Upload as [Pre-labels (Model Assisted Labeling)](https://docs.labelbox.com/docs/model-assisted-labeling)\n", + "\n", + "This option is helpful for speeding up the initial labeling process and reducing the manual labeling workload for high-volume datasets." + ] }, { - "metadata": {}, - "source": "# upload MAL labels for this data row in project\nupload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\nupload_job.wait_until_done()\n\nprint(\"Errors: \", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "upload_job = lb.MALPredictionImport.create_from_objects(\n", + " client = client, \n", + " project_id = project.uid, \n", + " name=\"mal_job\"+str(uuid.uuid4()), \n", + " predictions=labels\n", + ")\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "##### Option B: Upload to a labeling project using ground truth" - ], - "cell_type": "markdown" + "#### Option B: Upload to a Labeling Project as [Ground Truths](https://docs.labelbox.com/docs/import-ground-truth)\n", + "\n", + "This option is helpful for loading high-confidence labels from another platform or previous projects that just need review rather than manual labeling effort." + ] }, { - "metadata": {}, - "source": "# upload label for this data row in project\nupload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\nupload_job.wait_until_done\n\nprint(\"Errors:\", upload_job.errors)\nprint(\"Status of uploads: \", upload_job.statuses)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "upload_job = lb.LabelImport.create_from_objects(\n", + " client = client, \n", + " project_id = project.uid, \n", + " name=\"label_import_job\"+str(uuid.uuid4()), \n", + " labels=labels\n", + ")\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ - "## Clean up\n", - "Uncomment and run the cell below to optionally delete Labelbox objects created" - ], - "cell_type": "markdown" + "## Clean Up\n", + "Uncomment and run the cell below to optionally delete Labelbox objects created." + ] }, { - "metadata": {}, - "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# batch.delete()\n", + "# project.delete()\n", + "# dataset.delete()" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" } - ] -} \ No newline at end of file + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 19e718dafe271d8a816f5649b0fcc4b01c30d9ba Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 14 Jun 2024 01:39:24 +0000 Subject: [PATCH 18/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 462 ++++-------------- 1 file changed, 103 insertions(+), 359 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 28f4de9cd..728294bac 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 Annotations\n", @@ -40,121 +42,85 @@ "4. Process images using Ultralytics\n", "\n", "5. Import the annotations generated" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", "outputs": [], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", + "cell_type": "code", "outputs": [], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "\n", - "import ultralytics\n", - "from PIL import Image\n", - "\n", - "import uuid\n", - "import io" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API Key and Client\n", "Replace the value of `API_KEY` with a valid [API key](https://docs.labelbox.com/reference/create-api-key) to connect to the Labelbox client." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up a YOLOv8 model\n", "Initialize our model for image data rows using `yolov8n-seg.pt`, which supports segmentation masks." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", + "cell_type": "code", "outputs": [], - "source": [ - "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, see [Quick start](https://docs.labelbox.com/reference/quick-start)." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import an Image Data Row\n", "In this example, we use YOLOv8 to annotate this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg), which contains many objects that YOLOv8 can detect. Later in this guide, we will provide more details on the specific annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "global_key = str(uuid.uuid4())\n", - "\n", - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " global_key,\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Set Up an Ontology and Project\n", @@ -162,108 +128,61 @@ "You need to create an ontology and project that match the data rows you are labeling. The ontology needs to include the annotations you want to derive from YOLOv8. Each feature name must be unique because Labelbox does not support ontologies with duplicate feature names at the first level.\n", "\n", "We will include bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. We will also explain class mapping later in this guide.\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an Ontology" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", + "cell_type": "code", "outputs": [], - "source": [ - "ontology_builder = lb.OntologyBuilder(tools=[\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", - "])\n", - "\n", - "ontology = client.create_ontology(\n", - " name=\"YOLOv8 Demo Ontology\",\n", - " normalized=ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image,\n", - ")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and Set Up a Project" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", + "cell_type": "code", "outputs": [], - "source": [ - "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", - "\n", - "project.setup_editor(ontology)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Export Data Rows and Get Predictions\n", "\n", "Now we can export the data row from our project. Then add the row_data and global_key to a list to make our predictions." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", + "cell_type": "code", "outputs": [], - "source": [ - "export_task = project.export()\n", - "export_task.wait_till_done()\n", - "\n", - "# prediction list we will be populating\n", - "url_list = []\n", - "global_keys = []\n", - "\n", - "\n", - "# callback that is ran on each data row\n", - "def export_callback(output: lb.BufferedJsonConverterOutput):\n", - "\n", - " data_row = output.json\n", - "\n", - " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", - "\n", - " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", - "\n", - "\n", - "# check if export has errors\n", - "if export_task.has_errors():\n", - " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", - "\n", - "if export_task.has_result():\n", - " export_task.get_buffered_stream().start(stream_handler=export_callback)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 Annotations to a Project\n", @@ -275,10 +194,10 @@ "2. Create our labels\n", "\n", "3. Import our labels as either ground truths or MAL labels (pre-labels)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Define Import Functions\n", @@ -286,302 +205,127 @@ "YOLOv8 supports a wide range of annotations. In this guide, we only import bounding boxes, polygons, and segment masks that match the ontology we created earlier. The following functions handle each annotation type by navigating through the YOLOv8 result payload and converting it to the Labelbox annotation format.\n", "\n", "All these functions support class mapping, which aligns YOLOv8 annotation names with Labelbox feature names. This mapping allows for different names in Labelbox and YOLOv8 and enables common YOLOv8 names to correspond to the same Labelbox feature in our ontology. We will define this mapping first. In our example, we map bus and truck to the Labelbox feature name Vehicle and person to Person. We will create a mapping for each tool type." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", + "cell_type": "code", "outputs": [], - "source": [ - "bbox_class_mapping = {\n", - " \"person\": \"Person_bbox\",\n", - " \"bus\": \"Vehicle_bbox\",\n", - " \"truck\": \"Vehicle_bbox\",\n", - "}\n", - "mask_class_mapping = {\n", - " \"person\": \"Person_mask\",\n", - " \"bus\": \"Vehicle_mask\",\n", - " \"truck\": \"Vehicle_mask\",\n", - "}\n", - "polygon_class_mapping = {\n", - " \"person\": \"Person_polygon\",\n", - " \"bus\": \"Vehicle_polygon\",\n", - " \"truck\": \"Vehicle_polygon\",\n", - "}" - ] - }, - { - "cell_type": "markdown", + "execution_count": null + }, + { "metadata": {}, "source": [ "##### Bounding Box" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_bbox_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", - " \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for bbox in yolo_result.boxes:\n", - " class_name = model.names[int(bbox.cls)]\n", - "\n", - " # ignore bboxes that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get bbox coordinates\n", - " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", - "\n", - " bbox_source = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=start_x, y=start_y),\n", - " end=lb_types.Point(x=end_x, y=end_y),\n", - " ),\n", - " )\n", - "\n", - " annotations.append(bbox_source)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_segment_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", - " \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for i, mask in enumerate(yolo_result.masks.data):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore segment masks that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get binary numpy array to byte array. You must resize mask to match image.\n", - " mask = (mask.numpy() * 255).astype(\"uint8\")\n", - " img = Image.fromarray(mask, \"L\")\n", - " img = img.resize(\n", - " (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", - " img_byte_arr = io.BytesIO()\n", - " img.save(img_byte_arr, format=\"PNG\")\n", - " encoded_image_bytes = img_byte_arr.getvalue()\n", - "\n", - " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", - " mask_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", - " )\n", - " annotations.append(mask_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_polygon_annotation_predictions(\n", - " yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", - " \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n", - "\n", - " Args:\n", - " yolo_result (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - " for yolo_result in yolo_results:\n", - " for i, coordinates in enumerate(yolo_result.masks.xy):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore polygons that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Polygon(points=[\n", - " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", - " for coordinate in coordinates\n", - " ]),\n", - " )\n", - " annotations.append(polygon_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our Labels\n", "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", + "cell_type": "code", "outputs": [], - "source": [ - "# label list that will be populated\n", - "labels = []\n", - "\n", - "for i, global_key in enumerate(global_keys):\n", - " annotations = []\n", - "\n", - " # make YOLOv8 predictions\n", - " result = model.predict(url_list[i])\n", - "\n", - " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", - " bbox_class_mapping)\n", - " annotations += get_yolo_polygon_annotation_predictions(\n", - " result, model, polygon_class_mapping)\n", - " annotations += get_yolo_segment_annotation_predictions(\n", - " result, model, mask_class_mapping)\n", - "\n", - " labels.append(\n", - " lb_types.Label(data={\"global_key\": global_key},\n", - " annotations=annotations))" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Import Annotations to Labelbox\n", "We have created our labels and can import them to our project. For more information on importing annotations, see [import image annotations](https://docs.labelbox.com/reference/import-image-annotations)." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload as [Pre-labels (Model Assisted Labeling)](https://docs.labelbox.com/docs/model-assisted-labeling)\n", "\n", "This option is helpful for speeding up the initial labeling process and reducing the manual labeling workload for high-volume datasets." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "upload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "upload_job = lb.MALPredictionImport.create_from_objects(\n", - " client = client, \n", - " project_id = project.uid, \n", - " name=\"mal_job\"+str(uuid.uuid4()), \n", - " predictions=labels\n", - ")\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Option B: Upload to a Labeling Project as [Ground Truths](https://docs.labelbox.com/docs/import-ground-truth)\n", "\n", "This option is helpful for loading high-confidence labels from another platform or previous projects that just need review rather than manual labeling effort." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "upload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "upload_job = lb.LabelImport.create_from_objects(\n", - " client = client, \n", - " project_id = project.uid, \n", - " name=\"label_import_job\"+str(uuid.uuid4()), \n", - " labels=labels\n", - ")\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Clean Up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", + "cell_type": "code", "outputs": [], - "source": [ - "# batch.delete()\n", - "# project.delete()\n", - "# dataset.delete()" - ] + "execution_count": null } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From 902be1079d0664a9c0db8e6d29d6794deab12e4a Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 20:43:06 -0500 Subject: [PATCH 19/23] added some inline code --- .../yolo/import_yolo_annotations.ipynb | 464 ++++++++++++++---- 1 file changed, 360 insertions(+), 104 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 728294bac..875f92780 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,18 +1,16 @@ { - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, "cells": [ { + "cell_type": "markdown", "metadata": {}, "source": [ - "", - " ", + "\n", + " \n", "\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -24,10 +22,10 @@ "\n", "" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 Annotations\n", @@ -42,85 +40,121 @@ "4. Process images using Ultralytics\n", "\n", "5. Import the annotations generated" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "%pip install -q --upgrade \"labelbox[data]\"\n", + "%pip install -q --upgrade ultralytics" + ] }, { - "metadata": {}, - "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "import labelbox as lb\n", + "import labelbox.types as lb_types\n", + "\n", + "import ultralytics\n", + "from PIL import Image\n", + "\n", + "import uuid\n", + "import io" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## API Key and Client\n", "Replace the value of `API_KEY` with a valid [API key](https://docs.labelbox.com/reference/create-api-key) to connect to the Labelbox client." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "API_KEY = None\n", + "client = lb.Client(api_key=API_KEY)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up a YOLOv8 model\n", "Initialize our model for image data rows using `yolov8n-seg.pt`, which supports segmentation masks." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, see [Quick start](https://docs.labelbox.com/reference/quick-start)." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Import an Image Data Row\n", "In this example, we use YOLOv8 to annotate this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg), which contains many objects that YOLOv8 can detect. Later in this guide, we will provide more details on the specific annotations." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "global_key = str(uuid.uuid4())\n", + "\n", + "# create data row\n", + "data_row = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " global_key,\n", + " \"media_type\":\n", + " \"IMAGE\",\n", + "}\n", + "\n", + "# create dataset and import data row\n", + "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", + "task = dataset.create_data_rows([data_row])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Set Up an Ontology and Project\n", @@ -128,61 +162,108 @@ "You need to create an ontology and project that match the data rows you are labeling. The ontology needs to include the annotations you want to derive from YOLOv8. Each feature name must be unique because Labelbox does not support ontologies with duplicate feature names at the first level.\n", "\n", "We will include bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. We will also explain class mapping later in this guide.\n" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an Ontology" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "ontology_builder = lb.OntologyBuilder(tools=[\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", + " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", + "])\n", + "\n", + "ontology = client.create_ontology(\n", + " name=\"YOLOv8 Demo Ontology\",\n", + " normalized=ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image,\n", + ")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and Set Up a Project" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", + "\n", + "project.setup_editor(ontology)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Export Data Rows and Get Predictions\n", "\n", "Now we can export the data row from our project. Then add the row_data and global_key to a list to make our predictions." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "export_task = project.export()\n", + "export_task.wait_till_done()\n", + "\n", + "# prediction list we will be populating\n", + "url_list = []\n", + "global_keys = []\n", + "\n", + "\n", + "# callback that is ran on each data row\n", + "def export_callback(output: lb.BufferedJsonConverterOutput):\n", + "\n", + " data_row = output.json\n", + "\n", + " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", + "\n", + " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", + "\n", + "\n", + "# check if export has errors\n", + "if export_task.has_errors():\n", + " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", + "\n", + "if export_task.has_result():\n", + " export_task.get_buffered_stream().start(stream_handler=export_callback)" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 Annotations to a Project\n", @@ -194,138 +275,313 @@ "2. Create our labels\n", "\n", "3. Import our labels as either ground truths or MAL labels (pre-labels)" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Define Import Functions\n", "\n", "YOLOv8 supports a wide range of annotations. In this guide, we only import bounding boxes, polygons, and segment masks that match the ontology we created earlier. The following functions handle each annotation type by navigating through the YOLOv8 result payload and converting it to the Labelbox annotation format.\n", "\n", - "All these functions support class mapping, which aligns YOLOv8 annotation names with Labelbox feature names. This mapping allows for different names in Labelbox and YOLOv8 and enables common YOLOv8 names to correspond to the same Labelbox feature in our ontology. We will define this mapping first. In our example, we map bus and truck to the Labelbox feature name Vehicle and person to Person. We will create a mapping for each tool type." - ], - "cell_type": "markdown" + "All these functions support class mapping, which aligns YOLOv8 annotation names with Labelbox feature names. This mapping allows for different names in Labelbox and YOLOv8 and enables common YOLOv8 names to correspond to the same Labelbox feature in our ontology. We will define this mapping first. In our example, we map `bus` and `truck` to the Labelbox feature name `Vehicle` and person to `Person`. We will create a mapping for each tool type." + ] }, { - "metadata": {}, - "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null - }, - { + "source": [ + "bbox_class_mapping = {\n", + " \"person\": \"Person_bbox\",\n", + " \"bus\": \"Vehicle_bbox\",\n", + " \"truck\": \"Vehicle_bbox\",\n", + "}\n", + "mask_class_mapping = {\n", + " \"person\": \"Person_mask\",\n", + " \"bus\": \"Vehicle_mask\",\n", + " \"truck\": \"Vehicle_mask\",\n", + "}\n", + "polygon_class_mapping = {\n", + " \"person\": \"Person_polygon\",\n", + " \"bus\": \"Vehicle_polygon\",\n", + " \"truck\": \"Vehicle_polygon\",\n", + "}" + ] + }, + { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Bounding Box" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_bbox_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", + " \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for bbox in yolo_result.boxes:\n", + " class_name = model.names[int(bbox.cls)]\n", + "\n", + " # ignore bboxes that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get bbox coordinates\n", + " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", + "\n", + " bbox_source = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Rectangle(\n", + " start=lb_types.Point(x=start_x, y=start_y),\n", + " end=lb_types.Point(x=end_x, y=end_y),\n", + " ),\n", + " )\n", + "\n", + " annotations.append(bbox_source)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_segment_annotation_predictions(\n", + " yolo_results, model,\n", + " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", + " \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n", + "\n", + " Args:\n", + " yolo_results (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + "\n", + " for yolo_result in yolo_results:\n", + " for i, mask in enumerate(yolo_result.masks.data):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore segment masks that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " # get binary numpy array to byte array. You must resize mask to match image.\n", + " mask = (mask.numpy() * 255).astype(\"uint8\")\n", + " img = Image.fromarray(mask, \"L\")\n", + " img = img.resize(\n", + " (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", + " img_byte_arr = io.BytesIO()\n", + " img.save(img_byte_arr, format=\"PNG\")\n", + " encoded_image_bytes = img_byte_arr.getvalue()\n", + "\n", + " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", + " mask_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", + " )\n", + " annotations.append(mask_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "def get_yolo_polygon_annotation_predictions(\n", + " yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", + " \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n", + "\n", + " Args:\n", + " yolo_result (Results): YOLOv8 prediction results.\n", + " model (Model): YOLOv8 model.\n", + " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", + " Returns:\n", + " list[lb_types.ObjectAnnotation]\n", + " \"\"\"\n", + " annotations = []\n", + " for yolo_result in yolo_results:\n", + " for i, coordinates in enumerate(yolo_result.masks.xy):\n", + " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", + "\n", + " # ignore polygons that are not included in our mapping\n", + " if not class_name in ontology_mapping.keys():\n", + " continue\n", + "\n", + " polygon_annotation = lb_types.ObjectAnnotation(\n", + " name=ontology_mapping[class_name],\n", + " value=lb_types.Polygon(points=[\n", + " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", + " for coordinate in coordinates\n", + " ]),\n", + " )\n", + " annotations.append(polygon_annotation)\n", + "\n", + " return annotations" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our Labels\n", "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# label list that will be populated\n", + "labels = []\n", + "\n", + "for i, global_key in enumerate(global_keys):\n", + " annotations = []\n", + "\n", + " # make YOLOv8 predictions\n", + " result = model.predict(url_list[i])\n", + "\n", + " # run result through each function and adding them to our annotation list\n", + " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", + " bbox_class_mapping)\n", + " annotations += get_yolo_polygon_annotation_predictions(\n", + " result, model, polygon_class_mapping)\n", + " annotations += get_yolo_segment_annotation_predictions(\n", + " result, model, mask_class_mapping)\n", + "\n", + " labels.append(\n", + " lb_types.Label(data={\"global_key\": global_key},\n", + " annotations=annotations))" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Import Annotations to Labelbox\n", "We have created our labels and can import them to our project. For more information on importing annotations, see [import image annotations](https://docs.labelbox.com/reference/import-image-annotations)." - ], - "cell_type": "markdown" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload as [Pre-labels (Model Assisted Labeling)](https://docs.labelbox.com/docs/model-assisted-labeling)\n", "\n", "This option is helpful for speeding up the initial labeling process and reducing the manual labeling workload for high-volume datasets." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "upload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "upload_job = lb.MALPredictionImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"mal_job\" + str(uuid.uuid4()),\n", + " predictions=labels,\n", + ")\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "#### Option B: Upload to a Labeling Project as [Ground Truths](https://docs.labelbox.com/docs/import-ground-truth)\n", "\n", "This option is helpful for loading high-confidence labels from another platform or previous projects that just need review rather than manual labeling effort." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "upload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "upload_job = lb.LabelImport.create_from_objects(\n", + " client=client,\n", + " project_id=project.uid,\n", + " name=\"label_import_job\" + str(uuid.uuid4()),\n", + " labels=labels,\n", + ")\n", + "\n", + "print(f\"Errors: {upload_job.errors}\")\n", + "print(f\"Status of uploads: {upload_job.statuses}\")" + ] }, { + "cell_type": "markdown", "metadata": {}, "source": [ "## Clean Up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created." - ], - "cell_type": "markdown" + ] }, { - "metadata": {}, - "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# batch.delete()\n", + "# project.delete()\n", + "# dataset.delete()" + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 4a9804388ebd197dcbd8229271d9e563a8c465cb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 14 Jun 2024 01:44:05 +0000 Subject: [PATCH 20/23] :art: Cleaned --- .../yolo/import_yolo_annotations.ipynb | 462 ++++-------------- 1 file changed, 103 insertions(+), 359 deletions(-) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolo_annotations.ipynb index 875f92780..79da0117b 100644 --- a/examples/integrations/yolo/import_yolo_annotations.ipynb +++ b/examples/integrations/yolo/import_yolo_annotations.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - " \n", + "", + " ", "\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Import YOLOv8 Annotations\n", @@ -40,121 +42,85 @@ "4. Process images using Ultralytics\n", "\n", "5. Import the annotations generated" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "%pip install -q --upgrade \"labelbox[data]\"\n%pip install -q --upgrade ultralytics", + "cell_type": "code", "outputs": [], - "source": [ - "%pip install -q --upgrade \"labelbox[data]\"\n", - "%pip install -q --upgrade ultralytics" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "import labelbox as lb\nimport labelbox.types as lb_types\n\nimport ultralytics\nfrom PIL import Image\n\nimport uuid\nimport io", + "cell_type": "code", "outputs": [], - "source": [ - "import labelbox as lb\n", - "import labelbox.types as lb_types\n", - "\n", - "import ultralytics\n", - "from PIL import Image\n", - "\n", - "import uuid\n", - "import io" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## API Key and Client\n", "Replace the value of `API_KEY` with a valid [API key](https://docs.labelbox.com/reference/create-api-key) to connect to the Labelbox client." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "API_KEY = None\nclient = lb.Client(api_key=API_KEY)", + "cell_type": "code", "outputs": [], - "source": [ - "API_KEY = None\n", - "client = lb.Client(api_key=API_KEY)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Set Up a YOLOv8 model\n", "Initialize our model for image data rows using `yolov8n-seg.pt`, which supports segmentation masks." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "model = ultralytics.YOLO(\"yolov8n-seg.pt\")", + "cell_type": "code", "outputs": [], - "source": [ - "model = ultralytics.YOLO(\"yolov8n-seg.pt\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Example: Import YOLOv8 Annotations\n", "\n", "The first few steps of this guide will demonstrate a basic workflow of creating data rows and setting up a project. For a quick, complete overview of this process, see [Quick start](https://docs.labelbox.com/reference/quick-start)." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import an Image Data Row\n", "In this example, we use YOLOv8 to annotate this [image](https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg), which contains many objects that YOLOv8 can detect. Later in this guide, we will provide more details on the specific annotations." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "global_key = str(uuid.uuid4())\n\n# create data row\ndata_row = {\n \"row_data\":\n \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n \"global_key\":\n global_key,\n \"media_type\":\n \"IMAGE\",\n}\n\n# create dataset and import data row\ndataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\ntask = dataset.create_data_rows([data_row])\ntask.wait_till_done()\n\nprint(f\"Errors: {task.errors}\")", + "cell_type": "code", "outputs": [], - "source": [ - "global_key = str(uuid.uuid4())\n", - "\n", - "# create data row\n", - "data_row = {\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", - " \"global_key\":\n", - " global_key,\n", - " \"media_type\":\n", - " \"IMAGE\",\n", - "}\n", - "\n", - "# create dataset and import data row\n", - "dataset = client.create_dataset(name=\"YOLOv8 Demo Dataset\")\n", - "task = dataset.create_data_rows([data_row])\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Set Up an Ontology and Project\n", @@ -162,108 +128,61 @@ "You need to create an ontology and project that match the data rows you are labeling. The ontology needs to include the annotations you want to derive from YOLOv8. Each feature name must be unique because Labelbox does not support ontologies with duplicate feature names at the first level.\n", "\n", "We will include bounding boxes, segment masks, and polygon tools to demonstrate converting each type of annotation from YOLOv8. We will also explain class mapping later in this guide.\n" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create an Ontology" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "ontology_builder = lb.OntologyBuilder(tools=[\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n])\n\nontology = client.create_ontology(\n name=\"YOLOv8 Demo Ontology\",\n normalized=ontology_builder.asdict(),\n media_type=lb.MediaType.Image,\n)", + "cell_type": "code", "outputs": [], - "source": [ - "ontology_builder = lb.OntologyBuilder(tools=[\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Vehicle_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person_bbox\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Vehicle_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.RASTER_SEGMENTATION, name=\"Person_mask\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Vehicle_polygon\"),\n", - " lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"Person_polygon\"),\n", - "])\n", - "\n", - "ontology = client.create_ontology(\n", - " name=\"YOLOv8 Demo Ontology\",\n", - " normalized=ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image,\n", - ")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Create and Set Up a Project" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "project = client.create_project(name=\"YOLOv8 Demo Project\",\n media_type=lb.MediaType.Image)\n\nproject.create_batch(name=\"batch 1\", global_keys=[global_key])\n\nproject.setup_editor(ontology)", + "cell_type": "code", "outputs": [], - "source": [ - "project = client.create_project(name=\"YOLOv8 Demo Project\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.create_batch(name=\"batch 1\", global_keys=[global_key])\n", - "\n", - "project.setup_editor(ontology)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Export Data Rows and Get Predictions\n", "\n", "Now we can export the data row from our project. Then add the row_data and global_key to a list to make our predictions." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Export data" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "export_task = project.export()\nexport_task.wait_till_done()\n\n# prediction list we will be populating\nurl_list = []\nglobal_keys = []\n\n\n# callback that is ran on each data row\ndef export_callback(output: lb.BufferedJsonConverterOutput):\n\n data_row = output.json\n\n url_list.append(data_row[\"data_row\"][\"row_data\"])\n\n global_keys.append(data_row[\"data_row\"][\"global_key\"])\n\n\n# check if export has errors\nif export_task.has_errors():\n export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n\nif export_task.has_result():\n export_task.get_buffered_stream().start(stream_handler=export_callback)", + "cell_type": "code", "outputs": [], - "source": [ - "export_task = project.export()\n", - "export_task.wait_till_done()\n", - "\n", - "# prediction list we will be populating\n", - "url_list = []\n", - "global_keys = []\n", - "\n", - "\n", - "# callback that is ran on each data row\n", - "def export_callback(output: lb.BufferedJsonConverterOutput):\n", - "\n", - " data_row = output.json\n", - "\n", - " url_list.append(data_row[\"data_row\"][\"row_data\"])\n", - "\n", - " global_keys.append(data_row[\"data_row\"][\"global_key\"])\n", - "\n", - "\n", - "# check if export has errors\n", - "if export_task.has_errors():\n", - " export_task.get_buffered_stream(stream_type=lb.StreamType.ERRORS).start()\n", - "\n", - "if export_task.has_result():\n", - " export_task.get_buffered_stream().start(stream_handler=export_callback)" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Import YOLOv8 Annotations to a Project\n", @@ -275,10 +194,10 @@ "2. Create our labels\n", "\n", "3. Import our labels as either ground truths or MAL labels (pre-labels)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Define Import Functions\n", @@ -286,302 +205,127 @@ "YOLOv8 supports a wide range of annotations. In this guide, we only import bounding boxes, polygons, and segment masks that match the ontology we created earlier. The following functions handle each annotation type by navigating through the YOLOv8 result payload and converting it to the Labelbox annotation format.\n", "\n", "All these functions support class mapping, which aligns YOLOv8 annotation names with Labelbox feature names. This mapping allows for different names in Labelbox and YOLOv8 and enables common YOLOv8 names to correspond to the same Labelbox feature in our ontology. We will define this mapping first. In our example, we map `bus` and `truck` to the Labelbox feature name `Vehicle` and person to `Person`. We will create a mapping for each tool type." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "bbox_class_mapping = {\n \"person\": \"Person_bbox\",\n \"bus\": \"Vehicle_bbox\",\n \"truck\": \"Vehicle_bbox\",\n}\nmask_class_mapping = {\n \"person\": \"Person_mask\",\n \"bus\": \"Vehicle_mask\",\n \"truck\": \"Vehicle_mask\",\n}\npolygon_class_mapping = {\n \"person\": \"Person_polygon\",\n \"bus\": \"Vehicle_polygon\",\n \"truck\": \"Vehicle_polygon\",\n}", + "cell_type": "code", "outputs": [], - "source": [ - "bbox_class_mapping = {\n", - " \"person\": \"Person_bbox\",\n", - " \"bus\": \"Vehicle_bbox\",\n", - " \"truck\": \"Vehicle_bbox\",\n", - "}\n", - "mask_class_mapping = {\n", - " \"person\": \"Person_mask\",\n", - " \"bus\": \"Vehicle_mask\",\n", - " \"truck\": \"Vehicle_mask\",\n", - "}\n", - "polygon_class_mapping = {\n", - " \"person\": \"Person_polygon\",\n", - " \"bus\": \"Vehicle_polygon\",\n", - " \"truck\": \"Vehicle_polygon\",\n", - "}" - ] - }, - { - "cell_type": "markdown", + "execution_count": null + }, + { "metadata": {}, "source": [ "##### Bounding Box" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_bbox_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for bbox in yolo_result.boxes:\n class_name = model.names[int(bbox.cls)]\n\n # ignore bboxes that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get bbox coordinates\n start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n\n bbox_source = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Rectangle(\n start=lb_types.Point(x=start_x, y=start_y),\n end=lb_types.Point(x=end_x, y=end_y),\n ),\n )\n\n annotations.append(bbox_source)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_bbox_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.ObjectAnnotation]:\n", - " \"\"\"Convert YOLOV8 model bbox prediction results to Labelbox annotations format.\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for bbox in yolo_result.boxes:\n", - " class_name = model.names[int(bbox.cls)]\n", - "\n", - " # ignore bboxes that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get bbox coordinates\n", - " start_x, start_y, end_x, end_y = bbox.xyxy.tolist()[0]\n", - "\n", - " bbox_source = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=start_x, y=start_y),\n", - " end=lb_types.Point(x=end_x, y=end_y),\n", - " ),\n", - " )\n", - "\n", - " annotations.append(bbox_source)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Segment Mask" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_segment_annotation_predictions(\n yolo_results, model,\n ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n\n Args:\n yolo_results (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n\n for yolo_result in yolo_results:\n for i, mask in enumerate(yolo_result.masks.data):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore segment masks that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n # get binary numpy array to byte array. You must resize mask to match image.\n mask = (mask.numpy() * 255).astype(\"uint8\")\n img = Image.fromarray(mask, \"L\")\n img = img.resize(\n (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format=\"PNG\")\n encoded_image_bytes = img_byte_arr.getvalue()\n\n mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n mask_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n )\n annotations.append(mask_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_segment_annotation_predictions(\n", - " yolo_results, model,\n", - " ontology_mapping: dict[str:str]) -> list[lb_types.Label]:\n", - " \"\"\"Convert YOLOV8 segment mask prediction results to Labelbox annotations format\n", - "\n", - " Args:\n", - " yolo_results (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - "\n", - " for yolo_result in yolo_results:\n", - " for i, mask in enumerate(yolo_result.masks.data):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore segment masks that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " # get binary numpy array to byte array. You must resize mask to match image.\n", - " mask = (mask.numpy() * 255).astype(\"uint8\")\n", - " img = Image.fromarray(mask, \"L\")\n", - " img = img.resize(\n", - " (yolo_result.orig_shape[1], yolo_result.orig_shape[0]))\n", - " img_byte_arr = io.BytesIO()\n", - " img.save(img_byte_arr, format=\"PNG\")\n", - " encoded_image_bytes = img_byte_arr.getvalue()\n", - "\n", - " mask_data = lb_types.MaskData(im_bytes=encoded_image_bytes)\n", - " mask_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Mask(mask=mask_data, color=(255, 255, 255)),\n", - " )\n", - " annotations.append(mask_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Polygon" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "def get_yolo_polygon_annotation_predictions(\n yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n\n Args:\n yolo_result (Results): YOLOv8 prediction results.\n model (Model): YOLOv8 model.\n ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n Returns:\n list[lb_types.ObjectAnnotation]\n \"\"\"\n annotations = []\n for yolo_result in yolo_results:\n for i, coordinates in enumerate(yolo_result.masks.xy):\n class_name = model.names[int(yolo_result.boxes[i].cls)]\n\n # ignore polygons that are not included in our mapping\n if not class_name in ontology_mapping.keys():\n continue\n\n polygon_annotation = lb_types.ObjectAnnotation(\n name=ontology_mapping[class_name],\n value=lb_types.Polygon(points=[\n lb_types.Point(x=coordinate[0], y=coordinate[1])\n for coordinate in coordinates\n ]),\n )\n annotations.append(polygon_annotation)\n\n return annotations", + "cell_type": "code", "outputs": [], - "source": [ - "def get_yolo_polygon_annotation_predictions(\n", - " yolo_results, model, ontology_mapping: dict[str:str]) -> list[lb.Label]:\n", - " \"\"\"Convert YOLOv8 model results to Labelbox polygon annotations format.\n", - "\n", - " Args:\n", - " yolo_result (Results): YOLOv8 prediction results.\n", - " model (Model): YOLOv8 model.\n", - " ontology_mapping (dict[: ]): Allows mapping between YOLOv8 class names and different Labelbox feature names.\n", - " Returns:\n", - " list[lb_types.ObjectAnnotation]\n", - " \"\"\"\n", - " annotations = []\n", - " for yolo_result in yolo_results:\n", - " for i, coordinates in enumerate(yolo_result.masks.xy):\n", - " class_name = model.names[int(yolo_result.boxes[i].cls)]\n", - "\n", - " # ignore polygons that are not included in our mapping\n", - " if not class_name in ontology_mapping.keys():\n", - " continue\n", - "\n", - " polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=ontology_mapping[class_name],\n", - " value=lb_types.Polygon(points=[\n", - " lb_types.Point(x=coordinate[0], y=coordinate[1])\n", - " for coordinate in coordinates\n", - " ]),\n", - " )\n", - " annotations.append(polygon_annotation)\n", - "\n", - " return annotations" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Creating our Labels\n", "Now that we have defined our functions to create our Labelbox annotations, we can run each image through YOLOv8 to obtain our predictions and then use those results with our global keys to create our labels. " - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# label list that will be populated\nlabels = []\n\nfor i, global_key in enumerate(global_keys):\n annotations = []\n\n # make YOLOv8 predictions\n result = model.predict(url_list[i])\n\n # run result through each function and adding them to our annotation list\n annotations += get_yolo_bbox_annotation_predictions(result, model,\n bbox_class_mapping)\n annotations += get_yolo_polygon_annotation_predictions(\n result, model, polygon_class_mapping)\n annotations += get_yolo_segment_annotation_predictions(\n result, model, mask_class_mapping)\n\n labels.append(\n lb_types.Label(data={\"global_key\": global_key},\n annotations=annotations))", + "cell_type": "code", "outputs": [], - "source": [ - "# label list that will be populated\n", - "labels = []\n", - "\n", - "for i, global_key in enumerate(global_keys):\n", - " annotations = []\n", - "\n", - " # make YOLOv8 predictions\n", - " result = model.predict(url_list[i])\n", - "\n", - " # run result through each function and adding them to our annotation list\n", - " annotations += get_yolo_bbox_annotation_predictions(result, model,\n", - " bbox_class_mapping)\n", - " annotations += get_yolo_polygon_annotation_predictions(\n", - " result, model, polygon_class_mapping)\n", - " annotations += get_yolo_segment_annotation_predictions(\n", - " result, model, mask_class_mapping)\n", - "\n", - " labels.append(\n", - " lb_types.Label(data={\"global_key\": global_key},\n", - " annotations=annotations))" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Import Annotations to Labelbox\n", "We have created our labels and can import them to our project. For more information on importing annotations, see [import image annotations](https://docs.labelbox.com/reference/import-image-annotations)." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "##### Option A: Upload as [Pre-labels (Model Assisted Labeling)](https://docs.labelbox.com/docs/model-assisted-labeling)\n", "\n", "This option is helpful for speeding up the initial labeling process and reducing the manual labeling workload for high-volume datasets." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "upload_job = lb.MALPredictionImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"mal_job\" + str(uuid.uuid4()),\n predictions=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "upload_job = lb.MALPredictionImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"mal_job\" + str(uuid.uuid4()),\n", - " predictions=labels,\n", - ")\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "#### Option B: Upload to a Labeling Project as [Ground Truths](https://docs.labelbox.com/docs/import-ground-truth)\n", "\n", "This option is helpful for loading high-confidence labels from another platform or previous projects that just need review rather than manual labeling effort." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "upload_job = lb.LabelImport.create_from_objects(\n client=client,\n project_id=project.uid,\n name=\"label_import_job\" + str(uuid.uuid4()),\n labels=labels,\n)\n\nprint(f\"Errors: {upload_job.errors}\")\nprint(f\"Status of uploads: {upload_job.statuses}\")", + "cell_type": "code", "outputs": [], - "source": [ - "upload_job = lb.LabelImport.create_from_objects(\n", - " client=client,\n", - " project_id=project.uid,\n", - " name=\"label_import_job\" + str(uuid.uuid4()),\n", - " labels=labels,\n", - ")\n", - "\n", - "print(f\"Errors: {upload_job.errors}\")\n", - "print(f\"Status of uploads: {upload_job.statuses}\")" - ] + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "## Clean Up\n", "Uncomment and run the cell below to optionally delete Labelbox objects created." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": "# batch.delete()\n# project.delete()\n# dataset.delete()", + "cell_type": "code", "outputs": [], - "source": [ - "# batch.delete()\n", - "# project.delete()\n", - "# dataset.delete()" - ] + "execution_count": null } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file From 2fd6a326f09f5f06b759bfe0a8a8706eb95e888a Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 13 Jun 2024 21:07:11 -0500 Subject: [PATCH 21/23] added special mappings to generator script --- ...olo_annotations.ipynb => import_yolov8_annotations.ipynb} | 0 examples/scripts/generate_readme.py | 5 ++++- 2 files changed, 4 insertions(+), 1 deletion(-) rename examples/integrations/yolo/{import_yolo_annotations.ipynb => import_yolov8_annotations.ipynb} (100%) diff --git a/examples/integrations/yolo/import_yolo_annotations.ipynb b/examples/integrations/yolo/import_yolov8_annotations.ipynb similarity index 100% rename from examples/integrations/yolo/import_yolo_annotations.ipynb rename to examples/integrations/yolo/import_yolov8_annotations.ipynb diff --git a/examples/scripts/generate_readme.py b/examples/scripts/generate_readme.py index 135f9421d..d5eef5a66 100644 --- a/examples/scripts/generate_readme.py +++ b/examples/scripts/generate_readme.py @@ -67,15 +67,18 @@ def create_title(link: str) -> str: split_link = link.split(".")[-2].split("/")[-1].replace("_", " ").split(" ") title = [] - # List to lower case certain words and list to keep certain acronyms capitalized + # List for lower casing certain words, keep certain acronyms capitalized and special mappings lower_case_words = ["to"] acronyms = ["html", "pdf", "llm", "dicom", "sam", "csv"] + special = {"yolov8":"YOLOv8"} for word in split_link: if word.lower() in acronyms: title.append(word.upper()) elif word.lower() in lower_case_words: title.append(word.lower()) + elif word in special.keys(): + title.append(special[word]) else: title.append(word.capitalize()) return " ".join(title).split(".")[0] From f0b97226188b1a8a247afbe1ffc6ccf4e26a090e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 14 Jun 2024 02:08:57 +0000 Subject: [PATCH 22/23] :art: Cleaned --- examples/integrations/yolo/import_yolov8_annotations.ipynb | 4 ++-- examples/scripts/generate_readme.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/integrations/yolo/import_yolov8_annotations.ipynb b/examples/integrations/yolo/import_yolov8_annotations.ipynb index 79da0117b..3e79b66a4 100644 --- a/examples/integrations/yolo/import_yolov8_annotations.ipynb +++ b/examples/integrations/yolo/import_yolov8_annotations.ipynb @@ -16,12 +16,12 @@ "metadata": {}, "source": [ "\n", - "\n", "\n", "\n", "\n", - "\n", "" ], diff --git a/examples/scripts/generate_readme.py b/examples/scripts/generate_readme.py index d5eef5a66..a584dff4b 100644 --- a/examples/scripts/generate_readme.py +++ b/examples/scripts/generate_readme.py @@ -70,7 +70,7 @@ def create_title(link: str) -> str: # List for lower casing certain words, keep certain acronyms capitalized and special mappings lower_case_words = ["to"] acronyms = ["html", "pdf", "llm", "dicom", "sam", "csv"] - special = {"yolov8":"YOLOv8"} + special = {"yolov8": "YOLOv8"} for word in split_link: if word.lower() in acronyms: From 2f8e067251795b5f2e0505bb8358e0405b5522c7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 14 Jun 2024 02:09:36 +0000 Subject: [PATCH 23/23] :memo: README updated --- examples/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/README.md b/examples/README.md index a36d043d0..30f615bc2 100644 --- a/examples/README.md +++ b/examples/README.md @@ -228,9 +228,9 @@ Open In Colab - Import Yolo Annotations - Open In Github - Open In Colab + Import YOLOv8 Annotations + Open In Github + Open In Colab