Skip to content

[PLT-1506] Added Ruff linting to SDK #1822

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Sep 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion libs/labelbox/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,10 @@ dev-dependencies = [
[tool.ruff]
line-length = 80

[tool.ruff.lint]
ignore = ["F", "E722"]
Copy link
Collaborator Author

@Gabefire Gabefire Sep 19, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have PyFlake and a bareexcept rule ignored here. Later we should look at not ignoring these rules and possiblly include more then just the defaults.

exclude = ["**/__init__.py"]
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs to be excluded for future rules (no unused imports)


[tool.rye.scripts]
unit = "pytest tests/unit"
# https://github.com/Labelbox/labelbox-python/blob/7c84fdffbc14fd1f69d2a6abdcc0087dc557fa4e/Makefile
Expand All @@ -89,9 +93,10 @@ unit = "pytest tests/unit"
# LABELBOX_TEST_BASE_URL="http://host.docker.internal:8080" \
integration = { cmd = "pytest tests/integration" }
data = { cmd = "pytest tests/data" }
rye-lint = "rye lint"
rye-fmt-check = "rye fmt --check"
mypy-lint = "mypy src --pretty --show-error-codes --non-interactive --install-types"
lint = { chain = ["mypy-lint", "rye-fmt-check"] }
lint = { chain = ["rye-fmt-check", "mypy-lint", "rye-lint"] }
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we still need mypy here?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yea I think so Mypy is a type checker

test = { chain = ["lint", "unit", "integration"] }

[tool.hatch.metadata]
Expand Down
6 changes: 3 additions & 3 deletions libs/labelbox/src/labelbox/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ def _get_all(self, db_object_type, where, filter_deleted=True):
An iterable of `db_object_type` instances.
"""
if filter_deleted:
not_deleted = db_object_type.deleted == False
not_deleted = db_object_type.deleted == False # noqa: E712 <Gabefire> Needed for bit operator to combine comparisons
where = not_deleted if where is None else where & not_deleted
query_str, params = query.get_all(db_object_type, where)

Expand Down Expand Up @@ -2297,11 +2297,11 @@ def delete_feature_schema_from_ontology(

if response.status_code == requests.codes.ok:
response_json = response.json()
if response_json["archived"] == True:
if response_json["archived"] is True:
logger.info(
"Feature schema was archived from the ontology because it had associated labels."
)
elif response_json["deleted"] == True:
elif response_json["deleted"] is True:
logger.info(
"Feature schema was successfully removed from the ontology"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def validate_args(self, values):
uid = self.uid
global_key = self.global_key
if (
uid == file_path == im_bytes == url == global_key == None
uid == file_path == im_bytes == url == global_key is None
and arr is None
):
raise ValueError(
Expand All @@ -191,7 +191,9 @@ def validate_args(self, values):
return self

def __repr__(self) -> str:
symbol_or_none = lambda data: "..." if data is not None else None
def symbol_or_none(data):
return "..." if data is not None else None

return (
f"{self.__class__.__name__}(im_bytes={symbol_or_none(self.im_bytes)},"
f"file_path={self.file_path},"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def validate_date(self, values):
url = self.url
uid = self.uid
global_key = self.global_key
if uid == file_path == text == url == global_key == None:
if uid == file_path == text == url == global_key is None:
raise ValueError(
"One of `file_path`, `text`, `uid`, `global_key` or `url` required."
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def validate_data(self):
uid = self.uid
global_key = self.global_key

if uid == file_path == frames == url == global_key == None:
if uid == file_path == frames == url == global_key is None:
raise ValueError(
"One of `file_path`, `frames`, `uid`, `global_key` or `url` required."
)
Expand Down
2 changes: 1 addition & 1 deletion libs/labelbox/src/labelbox/data/annotation_types/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class MaskFrame(_CamelCaseMixin, BaseModel):
def validate_args(self, values):
im_bytes = self.im_bytes
instance_uri = self.instance_uri
if im_bytes == instance_uri == None:
if im_bytes == instance_uri is None:
raise ValueError("One of `instance_uri`, `im_bytes` required.")
return self

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def classification_confusion_matrix(

prediction, ground_truth = predictions[0], ground_truths[0]

if type(prediction) != type(ground_truth):
if type(prediction) is not type(ground_truth):
raise TypeError(
"Classification features must be the same type to compute agreement. "
f"Found `{type(prediction)}` and `{type(ground_truth)}`"
Expand Down
2 changes: 1 addition & 1 deletion libs/labelbox/src/labelbox/data/metrics/iou/calculation.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def classification_miou(

prediction, ground_truth = predictions[0], ground_truths[0]

if type(prediction) != type(ground_truth):
if type(prediction) is not type(ground_truth):
raise TypeError(
"Classification features must be the same type to compute agreement. "
f"Found `{type(prediction)}` and `{type(ground_truth)}`"
Expand Down
2 changes: 1 addition & 1 deletion libs/labelbox/src/labelbox/orm/db_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def _to_many(self, where=None, order_by=None):
)

if rel.filter_deleted:
not_deleted = rel.destination_type.deleted == False
not_deleted = rel.destination_type.deleted == False # noqa: E712 <Gabefire> Needed for bit operator to combine comparisons
where = not_deleted if where is None else where & not_deleted

query_string, params = query.relationship(
Expand Down
11 changes: 5 additions & 6 deletions libs/labelbox/src/labelbox/schema/data_row_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -803,13 +803,13 @@ def _convert_metadata_field(metadata_field):
if isinstance(metadata_field, DataRowMetadataField):
return metadata_field
elif isinstance(metadata_field, dict):
if not "value" in metadata_field:
if "value" not in metadata_field:
raise ValueError(
f"Custom metadata field '{metadata_field}' must have a 'value' key"
)
if (
not "schema_id" in metadata_field
and not "name" in metadata_field
"schema_id" not in metadata_field
and "name" not in metadata_field
):
raise ValueError(
f"Custom metadata field '{metadata_field}' must have either 'schema_id' or 'name' key"
Expand Down Expand Up @@ -954,9 +954,8 @@ def _validate_custom_schema_by_name(


def _batch_items(iterable: List[Any], size: int) -> Generator[Any, None, None]:
l = len(iterable)
for ndx in range(0, l, size):
yield iterable[ndx : min(ndx + size, l)]
for ndx in range(0, len(iterable), size):
yield iterable[ndx : min(ndx + size, len(iterable))]


def _batch_operations(
Expand Down
4 changes: 2 additions & 2 deletions libs/labelbox/src/labelbox/schema/export_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,15 @@

from typing import Optional, List

EXPORT_LIMIT = 30

from labelbox.schema.media_type import MediaType

if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict

EXPORT_LIMIT = 30


class DataRowParams(TypedDict):
data_row_details: Optional[bool]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def check_message_keys(message):
]
)
for key in message.keys():
if not key in accepted_message_keys:
if key not in accepted_message_keys:
raise KeyError(
f"Invalid {key} key found! Accepted keys in messages list is {accepted_message_keys}"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_adding_to_dataset(signer):
assert label.data.url != uuid
generated_label = next(generator)
assert generated_label.data.url == uuid
assert generated_label.data.external_id != None
assert generated_label.data.external_id is not None
assert generated_label.data.uid == dataset.uid
assert label.data.url == uuid

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,6 @@ def test_export_embeddings_custom(
if emb["id"] == embedding.id:
assert emb["name"] == embedding.name
assert emb["dimensions"] == embedding.dims
assert emb["is_custom"] == True
assert emb["is_custom"] is True
assert len(emb["values"]) == 1
assert emb["values"][0]["value"] == vector
6 changes: 3 additions & 3 deletions libs/labelbox/tests/integration/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
def test_benchmark(configured_project_with_label):
project, _, data_row, label = configured_project_with_label
assert set(project.benchmarks()) == set()
assert label.is_benchmark_reference == False
assert label.is_benchmark_reference is False

benchmark = label.create_benchmark()
assert set(project.benchmarks()) == {benchmark}
assert benchmark.reference_label() == label
# Refresh label data to check it's benchmark reference
label = list(data_row.labels())[0]
assert label.is_benchmark_reference == True
assert label.is_benchmark_reference is True

benchmark.delete()
assert set(project.benchmarks()) == set()
# Refresh label data to check it's benchmark reference
label = list(data_row.labels())[0]
assert label.is_benchmark_reference == False
assert label.is_benchmark_reference is False
13 changes: 7 additions & 6 deletions libs/labelbox/tests/integration/test_data_rows.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def make_metadata_fields_dict():
def test_get_data_row_by_global_key(data_row_and_global_key, client, rand_gen):
_, global_key = data_row_and_global_key
data_row = client.get_data_row_by_global_key(global_key)
assert type(data_row) == DataRow
assert type(data_row) is DataRow
assert data_row.global_key == global_key


Expand Down Expand Up @@ -677,9 +677,10 @@ def test_data_row_update(
pdf_url = "https://storage.googleapis.com/labelbox-datasets/arxiv-pdf/data/99-word-token-pdfs/0801.3483.pdf"
tileLayerUrl = "https://storage.googleapis.com/labelbox-datasets/arxiv-pdf/data/99-word-token-pdfs/0801.3483-lb-textlayer.json"
data_row.update(row_data={"pdfUrl": pdf_url, "tileLayerUrl": tileLayerUrl})
custom_check = (
lambda data_row: data_row.row_data and "pdfUrl" not in data_row.row_data
)

def custom_check(data_row):
return data_row.row_data and "pdfUrl" not in data_row.row_data

data_row = wait_for_data_row_processing(
client, data_row, custom_check=custom_check
)
Expand Down Expand Up @@ -1023,9 +1024,9 @@ def test_data_row_bulk_creation_with_same_global_keys(
task.wait_till_done()

assert task.status == "COMPLETE"
assert type(task.failed_data_rows) is list
assert isinstance(task.failed_data_rows, list)
assert len(task.failed_data_rows) == 1
assert type(task.created_data_rows) is list
assert isinstance(task.created_data_rows, list)
assert len(task.created_data_rows) == 1
assert (
task.failed_data_rows[0]["message"]
Expand Down
4 changes: 2 additions & 2 deletions libs/labelbox/tests/integration/test_ephemeral.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
reason="This test only runs in EPHEMERAL environment",
)
def test_org_and_user_setup(client, ephmeral_client):
assert type(client) == ephmeral_client
assert type(client) is ephmeral_client
assert client.admin_client
assert client.api_key != client.admin_client.api_key

Expand All @@ -22,4 +22,4 @@ def test_org_and_user_setup(client, ephmeral_client):
reason="This test does not run in EPHEMERAL environment",
)
def test_integration_client(client, integration_client):
assert type(client) == integration_client
assert type(client) is integration_client
12 changes: 6 additions & 6 deletions libs/labelbox/tests/integration/test_ontology.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def test_feature_schema_is_not_archived(client, ontology):
result = client.is_feature_schema_archived(
ontology.uid, feature_schema_to_check["featureSchemaId"]
)
assert result == False
assert result is False


def test_feature_schema_is_archived(client, configured_project_with_label):
Expand All @@ -23,10 +23,10 @@ def test_feature_schema_is_archived(client, configured_project_with_label):
result = client.delete_feature_schema_from_ontology(
ontology.uid, feature_schema_id
)
assert result.archived == True and result.deleted == False
assert result.archived is True and result.deleted is False
assert (
client.is_feature_schema_archived(ontology.uid, feature_schema_id)
== True
is True
)


Expand Down Expand Up @@ -58,8 +58,8 @@ def test_delete_tool_feature_from_ontology(client, ontology):
result = client.delete_feature_schema_from_ontology(
ontology.uid, feature_schema_to_delete["featureSchemaId"]
)
assert result.deleted == True
assert result.archived == False
assert result.deleted is True
assert result.archived is False
updatedOntology = client.get_ontology(ontology.uid)
assert len(updatedOntology.normalized["tools"]) == 1

Expand Down Expand Up @@ -300,7 +300,7 @@ def test_unarchive_feature_schema_node(client, ontology):
result = client.unarchive_feature_schema_node(
ontology.uid, feature_schema_to_unarchive["featureSchemaId"]
)
assert result == None
assert result is None


def test_unarchive_feature_schema_node_for_non_existing_feature_schema(
Expand Down
6 changes: 3 additions & 3 deletions libs/labelbox/tests/integration/test_toggle_mal.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
def test_enable_model_assisted_labeling(project):
response = project.enable_model_assisted_labeling()
assert response == True
assert response is True

response = project.enable_model_assisted_labeling(True)
assert response == True
assert response is True

response = project.enable_model_assisted_labeling(False)
assert response == False
assert response is False
4 changes: 2 additions & 2 deletions libs/labelbox/tests/unit/test_unit_ontology.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def test_add_ontology_tool() -> None:
assert len(o.tools) == 2

for tool in o.tools:
assert type(tool) == Tool
assert type(tool) is Tool

with pytest.raises(InconsistentOntologyException) as exc:
o.add_tool(Tool(tool=Tool.Type.BBOX, name="bounding box"))
Expand All @@ -217,7 +217,7 @@ def test_add_ontology_classification() -> None:
assert len(o.classifications) == 2

for classification in o.classifications:
assert type(classification) == Classification
assert type(classification) is Classification

with pytest.raises(InconsistentOntologyException) as exc:
o.add_classification(
Expand Down
Loading