Skip to content

[PLT-0] Fix Integration Tests #1699

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions .github/workflows/python-package-shared.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
integration:
runs-on: ubuntu-latest
concurrency:
group: labelbox-python-${{ inputs.test-env }}-${{ inputs.python-version }}
group: labelbox-python-${{ inputs.test-env }}-${{ inputs.python-version }}-integration
cancel-in-progress: false
steps:
- uses: actions/checkout@v4
Expand All @@ -48,14 +48,16 @@ jobs:
python-version: ${{ inputs.python-version }}
- name: Integration Testing
env:
PYTEST_XDIST_AUTO_NUM_WORKERS: 20
LABELBOX_TEST_API_KEY: ${{ secrets[inputs.api-key] }}
DA_GCP_LABELBOX_API_KEY: ${{ secrets[inputs.da-test-key] }}
LABELBOX_TEST_ENVIRON: ${{ inputs.test-env }}
working-directory: libs/labelbox
run: rye run integration
run: rye run integration -n 32
unit-data:
runs-on: ubuntu-latest
concurrency:
group: labelbox-python-${{ inputs.test-env }}-${{ inputs.python-version }}-unit-data
cancel-in-progress: false
steps:
- uses: actions/checkout@v4
with:
Expand All @@ -66,12 +68,11 @@ jobs:
python-version: ${{ inputs.python-version }}
- name: Unit && Data Testing
env:
PYTEST_XDIST_AUTO_NUM_WORKERS: 20
LABELBOX_TEST_API_KEY: ${{ secrets[inputs.api-key] }}
DA_GCP_LABELBOX_API_KEY: ${{ secrets[inputs.da-test-key] }}
LABELBOX_TEST_ENVIRON: ${{ inputs.test-env }}
working-directory: libs/labelbox
run: |
rye sync -f --features labelbox/data
rye run unit
rye run data
rye run unit -n 32
rye run data -n 32
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@

"""


@pytest.mark.order(1)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and why is this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test is very slow, this guarantees it runs first (given all tests that you may be running) to prioritize it to optimize testing time

def test_create_from_url(project):
name = str(uuid.uuid4())
url = "https://storage.googleapis.com/labelbox-public-bucket/predictions_test_v2.ndjson"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import uuid
import pytest
from labelbox.data.annotation_types.annotation import ObjectAnnotation
from labelbox.data.annotation_types.label import Label
from labelbox.data.annotation_types.data.text import TextData
from labelbox.data.annotation_types.ner import ConversationEntity

from labelbox.schema.annotation_import import MALPredictionImport


@pytest.mark.order(1)
def test_conversation_entity(client, configured_project_with_one_data_row,
dataset_conversation_entity, rand_gen):

Expand Down
65 changes: 2 additions & 63 deletions libs/labelbox/tests/data/annotation_import/test_data_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,68 +144,7 @@ def create_data_row_for_project(project, dataset, data_row_ndjson, batch_name):
return data_row


# TODO: Add VideoData. Currently label import job finishes without errors but project.export_labels() returns empty list.
@pytest.mark.export_v1("tests used export v1 method, v2 test -> test_import_data_types_v2 below")
@pytest.mark.parametrize(
"data_type_class",
[
AudioData,
ConversationData,
DicomData,
DocumentData,
HTMLData,
ImageData,
TextData,
LlmPromptCreationData,
LlmPromptResponseCreationData,
LlmResponseCreationData,
],
)
def test_import_data_types(
client,
configured_project,
initial_dataset,
rand_gen,
data_row_json_by_data_type,
annotations_by_data_type,
data_type_class,
helpers,
):
project = configured_project
project_id = project.uid
dataset = initial_dataset

helpers.set_project_media_type_from_data_type(project, data_type_class)

data_type_string = data_type_class.__name__[:-4].lower()
data_row_ndjson = data_row_json_by_data_type[data_type_string]
data_row = create_data_row_for_project(project, dataset, data_row_ndjson,
rand_gen(str))

annotations_ndjson = annotations_by_data_type[data_type_string]
annotations_list = [
label.annotations
for label in NDJsonConverter.deserialize(annotations_ndjson)
]
labels = [
lb_types.Label(data=data_type_class(uid=data_row.uid),
annotations=annotations)
for annotations in annotations_list
]

label_import = lb.LabelImport.create_from_objects(
client, project_id, f"test-import-{data_type_string}", labels)
label_import.wait_until_done()

assert label_import.state == AnnotationImportState.FINISHED
assert len(label_import.errors) == 0
exported_labels = project.export_labels(download=True)
objects = exported_labels[0]["Label"]["objects"]
classifications = exported_labels[0]["Label"]["classifications"]
assert len(objects) + len(classifications) == len(labels)
data_row.delete()


@pytest.mark.skip(reason="broken export v1 api, to be retired soon")
def test_import_data_types_by_global_key(
client,
configured_project,
Expand Down Expand Up @@ -259,7 +198,7 @@ def validate_iso_format(date_string: str):
assert parsed_t.minute is not None
assert parsed_t.second is not None


@pytest.mark.order(1)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are to note to run first because they are slower

@pytest.mark.parametrize(
"data_type_class",
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def create_data_row_for_project(project, dataset, data_row_ndjson, batch_name):

return data_row


@pytest.mark.order(1)
def test_import_data_types_by_global_key(
client,
configured_project,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

"""


@pytest.mark.order(1)
def test_create_with_url_arg(client, configured_project_with_one_data_row,
annotation_import_test_helpers):
name = str(uuid.uuid4())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

"""


@pytest.mark.order(1)
def test_create_from_objects(model_run_with_data_rows,
object_predictions_for_annotation_import,
annotation_import_test_helpers):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from labelbox import DataSplit, ModelRun


@pytest.mark.order(1)
def test_model_run(client, configured_project_with_label, data_row, rand_gen):
project, _, _, label = configured_project_with_label
label_id = label.uid
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def test_export_annotations_nested_checklist(
assert len(nested_class_answers) == 2


@pytest.mark.skip(reason="broken export v1 api, to be retired soon")
def test_export_filtered_dates(client,
configured_project_with_complex_ontology):
project, data_row = configured_project_with_complex_ontology
Expand Down Expand Up @@ -107,6 +108,7 @@ def test_export_filtered_dates(client,
assert len(empty_export) == 0


@pytest.mark.skip(reason="broken export v1 api, to be retired soon")
def test_export_filtered_activity(client,
configured_project_with_complex_ontology):
project, data_row = configured_project_with_complex_ontology
Expand Down Expand Up @@ -183,6 +185,7 @@ def test_queued_data_row_export(configured_project):
assert len(result) == 1


@pytest.mark.skip(reason="broken export v1 api, to be retired soon")
def test_label_export(configured_project_with_label):
project, _, _, label = configured_project_with_label
label_id = label.uid
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ def make_named_metadata(dr_id) -> DataRowMetadata:
return metadata


@pytest.mark.order(1)
def test_bulk_delete_datarow_metadata(data_row, mdo):
"""test bulk deletes for all fields"""
metadata = make_metadata(data_row.uid)
Expand Down
1 change: 1 addition & 0 deletions libs/labelbox/tests/integration/test_data_rows_upsert.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def all_inclusive_data_row(self, dataset, image_url):
])
return dr

@pytest.mark.order(1)
def test_create_data_row_with_auto_key(self, dataset, image_url):
task = dataset.upsert_data_rows([{'row_data': image_url}])
task.wait_till_done()
Expand Down
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,17 @@ dev-dependencies = [
"pytest-xdist>=3.5.0",
"toml-cli>=0.6.0",
"faker>=25.5.0",
"pytest-timestamper>=0.0.10",
"pytest-timeout>=2.3.1",
"pytest-order>=1.2.1",
]

[tool.rye.workspace]
members = ["libs/*", "examples"]

[tool.pytest.ini_options]
addopts = "-rP -vvv --reruns 1 --reruns-delay 5 --durations=20 -n auto --maxprocesses=10 --cov=labelbox --import-mode=importlib"
# https://github.com/pytest-dev/pytest-rerunfailures/issues/99
addopts = "-rP -vvv --reruns 1 --reruns-delay 5 --durations=20 -n auto --cov=labelbox --import-mode=importlib --order-group-scope=module"
markers = """
slow: marks tests as slow (deselect with '-m "not slow"')
"""
Expand Down
5 changes: 5 additions & 0 deletions requirements-dev.lock
Original file line number Diff line number Diff line change
Expand Up @@ -198,13 +198,18 @@ pyproj==3.5.0
# via labelbox
pytest==8.2.2
# via pytest-cov
# via pytest-order
# via pytest-rerunfailures
# via pytest-snapshot
# via pytest-timeout
# via pytest-xdist
pytest-cases==3.8.5
pytest-cov==5.0.0
pytest-order==1.2.1
pytest-rerunfailures==14.0
pytest-snapshot==0.9.0
pytest-timeout==2.3.1
pytest-timestamper==0.0.10
pytest-xdist==3.6.1
python-dateutil==2.8.2
# via faker
Expand Down
Loading