Skip to content

Commit 55e3a6a

Browse files
committed
MODEL-1448: Upsert label feedback method
1 parent f27928e commit 55e3a6a

File tree

5 files changed

+66
-7
lines changed

5 files changed

+66
-7
lines changed

libs/labelbox/src/labelbox/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,5 +34,6 @@
3434
from labelbox.schema.slice import Slice, CatalogSlice, ModelSlice
3535
from labelbox.schema.queue_mode import QueueMode
3636
from labelbox.schema.task_queue import TaskQueue
37+
from labelbox.schema.label_score import LabelScore
3738
from labelbox.schema.identifiables import UniqueIds, GlobalKeys, DataRowIds
3839
from labelbox.schema.identifiable import UniqueId, GlobalKey

libs/labelbox/src/labelbox/client.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1955,3 +1955,27 @@ def run_foundry_app(self, model_run_name: str, data_rows: Union[DataRowIds,
19551955
"""
19561956
foundry_client = FoundryClient(self)
19571957
return foundry_client.run_app(model_run_name, data_rows, app_id)
1958+
1959+
def upsert_label_feedback(self, label_id: str, feedback: str,
1960+
scores: Dict[str, float]) -> Entity.Label:
1961+
"""
1962+
1963+
Args:
1964+
label_id: Target label ID
1965+
feedback: Free text comment regarding the label
1966+
scores: A dict of scores, the key is a score name and the value is the score value
1967+
Returns: A list of LabelScore instances
1968+
1969+
"""
1970+
mutation_str = """mutation UpsertAutoQaLabelFeedbackPyApi($labelId: ID!, $feedback: String!, $scores: Json!){
1971+
upsertAutoQaLabelFeedback(input: {labelId: $labelId, feedback: $feedback, scores: $scores}) { id scores {id name score} }
1972+
}
1973+
"""
1974+
res = self.execute(mutation_str, {
1975+
"labelId": label_id,
1976+
"feedback": feedback,
1977+
"scores": scores
1978+
})
1979+
scores_raw = res["upsertAutoQaLabelFeedback"]["scores"]
1980+
1981+
return [Entity.LabelScore(self, x) for x in scores_raw]

libs/labelbox/src/labelbox/orm/model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,7 @@ class Entity(metaclass=EntityMeta):
380380
CatalogSlice: Type[labelbox.CatalogSlice]
381381
ModelSlice: Type[labelbox.ModelSlice]
382382
TaskQueue: Type[labelbox.TaskQueue]
383+
LabelScore: Type[labelbox.LabelScore]
383384

384385
@classmethod
385386
def _attributes_of_type(cls, attr_type):
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from labelbox.orm.db_object import DbObject
2+
from labelbox.orm.model import Field
3+
4+
5+
class LabelScore(DbObject):
6+
"""
7+
a label score
8+
9+
Attributes
10+
name
11+
score
12+
13+
"""
14+
15+
name = Field.String("name")
16+
data_row_count = Field.Float("score")
17+
18+
def __init__(self, client, *args, **kwargs):
19+
super().__init__(client, *args, **kwargs)

libs/labelbox/tests/integration/test_label.py

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22

3+
from labelbox import Client
34
import pytest
4-
import requests
55
import os
66

77
from labelbox import Label
@@ -29,11 +29,13 @@ def test_labels(configured_project_with_label):
2929

3030

3131
# TODO: Skipping this test in staging due to label not updating
32-
@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem" or
33-
os.environ['LABELBOX_TEST_ENVIRON'] == "staging" or
34-
os.environ['LABELBOX_TEST_ENVIRON'] == "local" or
35-
os.environ['LABELBOX_TEST_ENVIRON'] == "custom",
36-
reason="does not work for onprem")
32+
@pytest.mark.skipif(
33+
condition=os.environ["LABELBOX_TEST_ENVIRON"] == "onprem"
34+
or os.environ["LABELBOX_TEST_ENVIRON"] == "staging"
35+
or os.environ["LABELBOX_TEST_ENVIRON"] == "local"
36+
or os.environ["LABELBOX_TEST_ENVIRON"] == "custom",
37+
reason="does not work for onprem",
38+
)
3739
def test_label_update(configured_project_with_label):
3840
_, _, _, label = configured_project_with_label
3941
label.update(label="something else")
@@ -57,7 +59,7 @@ def test_label_bulk_deletion(configured_project_with_label):
5759
project, _, _, _ = configured_project_with_label
5860

5961
for _ in range(2):
60-
#only run twice, already have one label in the fixture
62+
# only run twice, already have one label in the fixture
6163
project.create_label()
6264
labels = project.labels()
6365
l1 = next(labels)
@@ -74,3 +76,15 @@ def test_label_bulk_deletion(configured_project_with_label):
7476
time.sleep(5)
7577

7678
assert set(project.labels()) == {l2}
79+
80+
81+
def test_upsert_label_scores(configured_project_with_label, client: Client):
82+
project, _, _, _ = configured_project_with_label
83+
84+
label = next(project.labels())
85+
86+
scores = client.upsert_label_feedback(
87+
label_id=label.uid, feedback="That's a great label!", scores={"overall": 5}
88+
)
89+
assert len(scores) == 1
90+
assert scores[0].score == 5

0 commit comments

Comments
 (0)