Skip to content

Commit 8d5b97c

Browse files
committed
MODEL-1448: Upsert label feedback method
1 parent 555ce57 commit 8d5b97c

File tree

5 files changed

+66
-7
lines changed

5 files changed

+66
-7
lines changed

libs/labelbox/src/labelbox/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
from labelbox.schema.slice import Slice, CatalogSlice, ModelSlice
3737
from labelbox.schema.queue_mode import QueueMode
3838
from labelbox.schema.task_queue import TaskQueue
39+
from labelbox.schema.label_score import LabelScore
3940
from labelbox.schema.identifiables import UniqueIds, GlobalKeys, DataRowIds
4041
from labelbox.schema.identifiable import UniqueId, GlobalKey
4142
from labelbox.schema.ontology_kind import OntologyKind

libs/labelbox/src/labelbox/client.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2216,3 +2216,27 @@ def get_embedding_by_name(self, name: str) -> Embedding:
22162216
return e
22172217
raise labelbox.exceptions.ResourceNotFoundError(Embedding,
22182218
dict(name=name))
2219+
2220+
def upsert_label_feedback(self, label_id: str, feedback: str,
2221+
scores: Dict[str, float]) -> Entity.Label:
2222+
"""
2223+
2224+
Args:
2225+
label_id: Target label ID
2226+
feedback: Free text comment regarding the label
2227+
scores: A dict of scores, the key is a score name and the value is the score value
2228+
Returns: A list of LabelScore instances
2229+
2230+
"""
2231+
mutation_str = """mutation UpsertAutoQaLabelFeedbackPyApi($labelId: ID!, $feedback: String!, $scores: Json!){
2232+
upsertAutoQaLabelFeedback(input: {labelId: $labelId, feedback: $feedback, scores: $scores}) { id scores {id name score} }
2233+
}
2234+
"""
2235+
res = self.execute(mutation_str, {
2236+
"labelId": label_id,
2237+
"feedback": feedback,
2238+
"scores": scores
2239+
})
2240+
scores_raw = res["upsertAutoQaLabelFeedback"]["scores"]
2241+
2242+
return [Entity.LabelScore(self, x) for x in scores_raw]

libs/labelbox/src/labelbox/orm/model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,7 @@ class Entity(metaclass=EntityMeta):
382382
CatalogSlice: Type[labelbox.CatalogSlice]
383383
ModelSlice: Type[labelbox.ModelSlice]
384384
TaskQueue: Type[labelbox.TaskQueue]
385+
LabelScore: Type[labelbox.LabelScore]
385386

386387
@classmethod
387388
def _attributes_of_type(cls, attr_type):
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from labelbox.orm.db_object import DbObject
2+
from labelbox.orm.model import Field
3+
4+
5+
class LabelScore(DbObject):
6+
"""
7+
a label score
8+
9+
Attributes
10+
name
11+
score
12+
13+
"""
14+
15+
name = Field.String("name")
16+
data_row_count = Field.Float("score")
17+
18+
def __init__(self, client, *args, **kwargs):
19+
super().__init__(client, *args, **kwargs)

libs/labelbox/tests/integration/test_label.py

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22

3+
from labelbox import Client
34
import pytest
4-
import requests
55
import os
66

77
from labelbox import Label
@@ -29,11 +29,13 @@ def test_labels(configured_project_with_label):
2929

3030

3131
# TODO: Skipping this test in staging due to label not updating
32-
@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem" or
33-
os.environ['LABELBOX_TEST_ENVIRON'] == "staging" or
34-
os.environ['LABELBOX_TEST_ENVIRON'] == "local" or
35-
os.environ['LABELBOX_TEST_ENVIRON'] == "custom",
36-
reason="does not work for onprem")
32+
@pytest.mark.skipif(
33+
condition=os.environ["LABELBOX_TEST_ENVIRON"] == "onprem"
34+
or os.environ["LABELBOX_TEST_ENVIRON"] == "staging"
35+
or os.environ["LABELBOX_TEST_ENVIRON"] == "local"
36+
or os.environ["LABELBOX_TEST_ENVIRON"] == "custom",
37+
reason="does not work for onprem",
38+
)
3739
def test_label_update(configured_project_with_label):
3840
_, _, _, label = configured_project_with_label
3941
label.update(label="something else")
@@ -57,7 +59,7 @@ def test_label_bulk_deletion(configured_project_with_label):
5759
project, _, _, _ = configured_project_with_label
5860

5961
for _ in range(2):
60-
#only run twice, already have one label in the fixture
62+
# only run twice, already have one label in the fixture
6163
project.create_label()
6264
labels = project.labels()
6365
l1 = next(labels)
@@ -74,3 +76,15 @@ def test_label_bulk_deletion(configured_project_with_label):
7476
time.sleep(5)
7577

7678
assert set(project.labels()) == {l2}
79+
80+
81+
def test_upsert_label_scores(configured_project_with_label, client: Client):
82+
project, _, _, _ = configured_project_with_label
83+
84+
label = next(project.labels())
85+
86+
scores = client.upsert_label_feedback(
87+
label_id=label.uid, feedback="That's a great label!", scores={"overall": 5}
88+
)
89+
assert len(scores) == 1
90+
assert scores[0].score == 5

0 commit comments

Comments
 (0)