Skip to content

Vb/merge 5.2 #1866

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 15 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
project = 'Python SDK reference'
copyright = '2024, Labelbox'
author = 'Labelbox'
release = '5.1.0'
release = '5.2.1'

# -- General configuration ---------------------------------------------------

Expand Down
6 changes: 6 additions & 0 deletions docs/labelbox/datarow_payload_templates.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
Datarow payload templates
===============================================================================================

.. automodule:: labelbox.schema.data_row_payload_templates
:members:
:show-inheritance:
10 changes: 9 additions & 1 deletion libs/labelbox/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
# Changelog
# Version 5.1.0 (2024-09-27)
# Version 5.2.1 (2024-10-09)
## Fixed
* Exporter encoding

# Version 5.2.0 (2024-10-09)
## Added
* Support data row / batch for live mmc projects([#1856](https://github.com/Labelbox/labelbox-python/pull/1856))

# Version 5.1.0 (2024-09-27)
## Added
* Support self-signed SSL certs([#1811](https://github.com/Labelbox/labelbox-python/pull/1811))
* Rectangle units now correctly support percent inputs([#1848](https://github.com/Labelbox/labelbox-python/pull/1848))

Expand Down
2 changes: 1 addition & 1 deletion libs/labelbox/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "labelbox"
version = "5.1.0"
version = "5.2.1"
description = "Labelbox Python API"
authors = [{ name = "Labelbox", email = "engineering@labelbox.com" }]
dependencies = [
Expand Down
66 changes: 45 additions & 21 deletions libs/labelbox/src/labelbox/__init__.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,59 @@
name = "labelbox"

__version__ = "5.1.0"
__version__ = "5.2.1"

from labelbox.client import Client
from labelbox.schema.project import Project
from labelbox.schema.model import Model
from labelbox.schema.model_config import ModelConfig
from labelbox.schema.annotation_import import (
LabelImport,
MALPredictionImport,
MEAPredictionImport,
LabelImport,
MEAToMALPredictionImport,
)
from labelbox.schema.dataset import Dataset
from labelbox.schema.data_row import DataRow
from labelbox.schema.asset_attachment import AssetAttachment
from labelbox.schema.batch import Batch
from labelbox.schema.benchmark import Benchmark
from labelbox.schema.bulk_import_request import BulkImportRequest
from labelbox.schema.catalog import Catalog
from labelbox.schema.data_row import DataRow
from labelbox.schema.data_row_metadata import (
DataRowMetadata,
DataRowMetadataField,
DataRowMetadataOntology,
DeleteDataRowMetadata,
)
from labelbox.schema.dataset import Dataset
from labelbox.schema.enums import AnnotationImportState
from labelbox.schema.label import Label
from labelbox.schema.batch import Batch
from labelbox.schema.review import Review
from labelbox.schema.user import User
from labelbox.schema.organization import Organization
from labelbox.schema.task import Task
from labelbox.schema.export_task import (
StreamType,
BufferedJsonConverterOutput,
ExportTask,
BufferedJsonConverterOutput,
)
from labelbox.schema.iam_integration import IAMIntegration
from labelbox.schema.identifiable import GlobalKey, UniqueId
from labelbox.schema.identifiables import DataRowIds, GlobalKeys, UniqueIds
from labelbox.schema.invite import Invite, InviteLimit
from labelbox.schema.label import Label
from labelbox.schema.label_score import LabelScore
from labelbox.schema.labeling_frontend import (
LabelingFrontend,
LabelingFrontendOptions,
)
from labelbox.schema.asset_attachment import AssetAttachment
from labelbox.schema.webhook import Webhook
from labelbox.schema.labeling_service import LabelingService
from labelbox.schema.labeling_service_dashboard import LabelingServiceDashboard
from labelbox.schema.labeling_service_status import LabelingServiceStatus
from labelbox.schema.media_type import MediaType
from labelbox.schema.model import Model
from labelbox.schema.model_config import ModelConfig
from labelbox.schema.model_run import DataSplit, ModelRun
from labelbox.schema.ontology import (
Classification,
FeatureSchema,
Ontology,
OntologyBuilder,
Classification,
Option,
PromptResponseClassification,
ResponseOption,
Tool,
FeatureSchema,
)
from labelbox.schema.ontology import PromptResponseClassification
from labelbox.schema.ontology import ResponseOption
Expand All @@ -64,10 +78,20 @@
from labelbox.schema.identifiables import UniqueIds, GlobalKeys, DataRowIds
from labelbox.schema.identifiable import UniqueId, GlobalKey
from labelbox.schema.ontology_kind import OntologyKind
from labelbox.schema.organization import Organization
from labelbox.schema.project import Project
from labelbox.schema.project_model_config import ProjectModelConfig
from labelbox.schema.project_overview import (
ProjectOverview,
ProjectOverviewDetailed,
)
from labelbox.schema.labeling_service import LabelingService
from labelbox.schema.labeling_service_dashboard import LabelingServiceDashboard
from labelbox.schema.labeling_service_status import LabelingServiceStatus
from labelbox.schema.project_resource_tag import ProjectResourceTag
from labelbox.schema.queue_mode import QueueMode
from labelbox.schema.resource_tag import ResourceTag
from labelbox.schema.review import Review
from labelbox.schema.role import ProjectRole, Role
from labelbox.schema.slice import CatalogSlice, ModelSlice, Slice
from labelbox.schema.task import Task
from labelbox.schema.task_queue import TaskQueue
from labelbox.schema.user import User
from labelbox.schema.webhook import Webhook
44 changes: 31 additions & 13 deletions libs/labelbox/src/labelbox/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import random
import time
import urllib.parse
import warnings
from collections import defaultdict
from datetime import datetime, timezone
from types import MappingProxyType
Expand Down Expand Up @@ -637,6 +638,7 @@ def create_project(
}
return self._create_project(_CoreProjectInput(**input))

@overload
def create_model_evaluation_project(
self,
name: str,
Expand All @@ -649,7 +651,17 @@ def create_model_evaluation_project(
is_consensus_enabled: Optional[bool] = None,
dataset_id: Optional[str] = None,
dataset_name: Optional[str] = None,
data_row_count: int = 100,
data_row_count: Optional[int] = None,
**kwargs,
) -> Project:
pass

def create_model_evaluation_project(
self,
dataset_id: Optional[str] = None,
dataset_name: Optional[str] = None,
data_row_count: Optional[int] = None,
**kwargs,
) -> Project:
"""
Use this method exclusively to create a chat model evaluation project.
Expand All @@ -674,22 +686,28 @@ def create_model_evaluation_project(
>>> client.create_model_evaluation_project(name=project_name, dataset_id="clr00u8j0j0j0", data_row_count=10)
>>> This creates a new project, and adds 100 datarows to the dataset with id "clr00u8j0j0j0" and assigns a batch of the newly created 10 data rows to the project.

>>> client.create_model_evaluation_project(name=project_name)
>>> This creates a new project with no data rows.

"""
if not dataset_id and not dataset_name:
raise ValueError(
"dataset_name or data_set_id must be present and not be an empty string."
)
dataset_name_or_id = dataset_id or dataset_name
append_to_existing_dataset = bool(dataset_id)

if dataset_id:
append_to_existing_dataset = True
dataset_name_or_id = dataset_id
else:
append_to_existing_dataset = False
dataset_name_or_id = dataset_name
if dataset_name_or_id:
kwargs["dataset_name_or_id"] = dataset_name_or_id
kwargs["append_to_existing_dataset"] = append_to_existing_dataset
if data_row_count is None:
data_row_count = 100
if data_row_count < 0:
raise ValueError("data_row_count must be a positive integer.")
kwargs["data_row_count"] = data_row_count
warnings.warn(
"Automatic generation of data rows of live model evaluation projects is deprecated. dataset_name_or_id, append_to_existing_dataset, data_row_count will be removed in a future version.",
DeprecationWarning,
)

media_type = MediaType.Conversational
editor_task_type = EditorTaskType.ModelChatEvaluation
kwargs["media_type"] = MediaType.Conversational
kwargs["editor_task_type"] = EditorTaskType.ModelChatEvaluation.value

input = {
"name": name,
Expand Down
10 changes: 3 additions & 7 deletions libs/labelbox/src/labelbox/data/annotation_types/collection.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Callable, Generator, Iterable, Union, Optional
from uuid import uuid4
import warnings
from typing import Callable, Generator, Iterable, Union

from tqdm import tqdm

from labelbox.schema import ontology
from labelbox.orm.model import Entity
from ..ontology import get_classifications, get_tools
from labelbox.schema import ontology

from ..generator import PrefetchGenerator
from .label import Label

Expand Down
1 change: 1 addition & 0 deletions libs/labelbox/src/labelbox/orm/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ class Entity(metaclass=EntityMeta):
Review: Type[labelbox.Review]
User: Type[labelbox.User]
LabelingFrontend: Type[labelbox.LabelingFrontend]
BulkImportRequest: Type[labelbox.BulkImportRequest]
Benchmark: Type[labelbox.Benchmark]
IAMIntegration: Type[labelbox.IAMIntegration]
LabelingFrontendOptions: Type[labelbox.LabelingFrontendOptions]
Expand Down
2 changes: 2 additions & 0 deletions libs/labelbox/src/labelbox/schema/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import labelbox.schema.asset_attachment
import labelbox.schema.annotation_import
import labelbox.schema.asset_attachment
import labelbox.schema.annotation_import
import labelbox.schema.benchmark
import labelbox.schema.data_row
import labelbox.schema.dataset
Expand Down
41 changes: 41 additions & 0 deletions libs/labelbox/src/labelbox/schema/data_row_payload_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from typing import Dict, List, Optional

from pydantic import BaseModel, Field

from labelbox.schema.data_row import DataRowMetadataField


class ModelEvalutationTemplateRowData(BaseModel):
type: str = Field(
default="application/vnd.labelbox.conversational.model-chat-evaluation",
frozen=True,
)
draft: bool = Field(default=True, frozen=True)
rootMessageIds: List[str] = Field(default=[])
actors: Dict = Field(default={})
version: int = Field(default=2, frozen=True)
messages: Dict = Field(default={})
global_key: Optional[str] = None


class ModelEvaluationTemplate(BaseModel):
"""
Use this class to create a model evaluation data row.
Examples:
>>> data = ModelEvaluationTemplate()
>>> data.row_data.rootMessageIds = ["root1"]
>>> vector = [random.uniform(1.0, 2.0) for _ in range(embedding.dims)]
>>> data.embeddings = [...]
>>> data.metadata_fields = [...]
>>> data.attachments = [...]
>>> content = data.model_dump()
>>> task = dataset.create_data_rows([content])
"""

row_data: ModelEvalutationTemplateRowData = Field(
default=ModelEvalutationTemplateRowData()
)
attachments: List[Dict] = Field(default=[])
embeddings: List[Dict] = Field(default=[])
metadata_fields: List[DataRowMetadataField] = Field(default=[])
25 changes: 25 additions & 0 deletions libs/labelbox/src/labelbox/schema/enums.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,31 @@
from enum import Enum


class BulkImportRequestState(Enum):
"""State of the import job when importing annotations (RUNNING, FAILED, or FINISHED).

If you are not usinig MEA continue using BulkImportRequest.
AnnotationImports are in beta and will change soon.

.. list-table::
:widths: 15 150
:header-rows: 1

* - State
- Description
* - RUNNING
- Indicates that the import job is not done yet.
* - FAILED
- Indicates the import job failed. Check `BulkImportRequest.errors` for more information
* - FINISHED
- Indicates the import job is no longer running. Check `BulkImportRequest.statuses` for more information
"""

RUNNING = "RUNNING"
FAILED = "FAILED"
FINISHED = "FINISHED"


class AnnotationImportState(Enum):
"""State of the import job when importing annotations (RUNNING, FAILED, or FINISHED).

Expand Down
14 changes: 8 additions & 6 deletions libs/labelbox/src/labelbox/schema/export_task.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,28 @@
import json
import os
import tempfile
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from functools import lru_cache
import json
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generic,
Iterator,
Optional,
Tuple,
TypeVar,
Union,
TYPE_CHECKING,
Any,
)

import requests
import tempfile
import os
from pydantic import BaseModel

from labelbox.schema.task import Task
from labelbox.utils import _CamelCaseMixin
from pydantic import BaseModel

if TYPE_CHECKING:
from labelbox import Client
Expand Down Expand Up @@ -120,6 +121,7 @@ def _get_file_content(
)
response = requests.get(file_info.file, timeout=30)
response.raise_for_status()
response.encoding = "utf-8"
assert (
len(response.content)
== file_info.offsets.end - file_info.offsets.start + 1
Expand Down
Loading
Loading