Skip to content

Commit a090b7a

Browse files
authored
add Dataset.scenes property (#191)
* add scenes_list * sneak in some other typehint fixes * bump ver and update changelog * add TODOs for test * rename scenes_list -> scenes * add to tests * lint
1 parent 8897829 commit a090b7a

File tree

4 files changed

+56
-8
lines changed

4 files changed

+56
-8
lines changed

CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@ All notable changes to the [Nucleus Python Client](https://github.com/scaleapi/n
44
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
55
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
66

7+
## [0.4.5](https://github.com/scaleapi/nucleus-python-client/releases/tag/v0.4.4) - 2021-01-07
8+
9+
### Added
10+
- `Dataset.scenes` property that fetches the Scale-generated ID, reference ID, type, and metadata of all scenes in the Dataset.
11+
712
## [0.4.4](https://github.com/scaleapi/nucleus-python-client/releases/tag/v0.4.4) - 2021-01-04
813

914
### Added

nucleus/dataset.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def is_scene(self) -> bool:
140140
return response
141141

142142
@property
143-
def model_runs(self) -> Dict[Any, Any]:
143+
def model_runs(self) -> List[str]:
144144
"""List of all model runs associated with the Dataset."""
145145
# TODO: model_runs -> models
146146
response = self._client.make_request(
@@ -149,7 +149,7 @@ def model_runs(self) -> Dict[Any, Any]:
149149
return response
150150

151151
@property
152-
def slices(self) -> Dict[Any, Any]:
152+
def slices(self) -> List[str]:
153153
"""List of all Slice IDs created from the Dataset."""
154154
response = self._client.make_request(
155155
{}, f"dataset/{self.id}/slices", requests.get
@@ -185,6 +185,15 @@ def items(self) -> List[DatasetItem]:
185185
raise DatasetItemRetrievalError(message=error)
186186
return constructed_dataset_items
187187

188+
@property
189+
def scenes(self) -> List[Dict[str, Any]]:
190+
"""List of ID, reference ID, type, and metadata for all scenes in the Dataset."""
191+
response = self._client.make_request(
192+
{}, f"dataset/{self.id}/scenes_list", requests.get
193+
)
194+
195+
return response.get("scenes", None)
196+
188197
@sanitize_string_args
189198
def autotag_items(self, autotag_name, for_scores_greater_than=0):
190199
"""Fetches the autotag's items above the score threshold, sorted by descending score.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ exclude = '''
2121

2222
[tool.poetry]
2323
name = "scale-nucleus"
24-
version = "0.4.4"
24+
version = "0.4.5"
2525
description = "The official Python client library for Nucleus, the Data Platform for AI"
2626
license = "MIT"
2727
authors = ["Scale AI Nucleus Team <nucleusapi@scaleapi.com>"]

tests/test_scene.py

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,16 @@ def test_scene_upload_sync(dataset_scene):
274274
assert response["dataset_id"] == dataset_scene.id
275275
assert response["new_scenes"] == len(scenes)
276276

277+
uploaded_scenes = dataset_scene.scenes
278+
assert len(uploaded_scenes) == len(scenes)
279+
assert all(
280+
u["reference_id"] == o.reference_id
281+
for u, o in zip(uploaded_scenes, scenes)
282+
)
283+
assert all(
284+
u["metadata"] == o.metadata for u, o in zip(uploaded_scenes, scenes)
285+
)
286+
277287

278288
@pytest.mark.skip("Deactivated sync upload for scenes")
279289
@pytest.mark.integration
@@ -289,6 +299,16 @@ def test_scene_and_cuboid_upload_sync(dataset_scene):
289299
assert response["dataset_id"] == dataset_scene.id
290300
assert response["new_scenes"] == len(scenes)
291301

302+
uploaded_scenes = dataset_scene.scenes
303+
assert len(uploaded_scenes) == len(scenes)
304+
assert all(
305+
u["reference_id"] == o.reference_id
306+
for u, o in zip(uploaded_scenes, scenes)
307+
)
308+
assert all(
309+
u["metadata"] == o.metadata for u, o in zip(uploaded_scenes, scenes)
310+
)
311+
292312
lidar_item_ref = payload[SCENES_KEY][0][FRAMES_KEY][0]["lidar"][
293313
REFERENCE_ID_KEY
294314
]
@@ -341,6 +361,16 @@ def test_scene_upload_async(dataset_scene):
341361
"total_steps": 1,
342362
}
343363

364+
uploaded_scenes = dataset_scene.scenes
365+
assert len(uploaded_scenes) == len(scenes)
366+
assert all(
367+
u["reference_id"] == o.reference_id
368+
for u, o in zip(uploaded_scenes, scenes)
369+
)
370+
assert all(
371+
u["metadata"] == o.metadata for u, o in zip(uploaded_scenes, scenes)
372+
)
373+
344374

345375
@pytest.mark.skip(reason="Temporarily skipped because failing 12/28/21")
346376
@pytest.mark.integration
@@ -349,7 +379,6 @@ def test_scene_upload_and_update(dataset_scene):
349379
scenes = [
350380
LidarScene.from_json(scene_json) for scene_json in payload[SCENES_KEY]
351381
]
352-
reference_ids = [s.reference_id for s in scenes]
353382
update = payload[UPDATE_KEY]
354383

355384
job = dataset_scene.append(scenes, update=update, asynchronous=True)
@@ -374,10 +403,15 @@ def test_scene_upload_and_update(dataset_scene):
374403
"total_steps": 1,
375404
}
376405

377-
fetched_scenes = [
378-
dataset_scene.get_scene(ref_id) for ref_id in reference_ids
379-
]
380-
assert len(fetched_scenes) == len(scenes)
406+
uploaded_scenes = dataset_scene.scenes
407+
assert len(uploaded_scenes) == len(scenes)
408+
assert all(
409+
u["reference_id"] == o.reference_id
410+
for u, o in zip(uploaded_scenes, scenes)
411+
)
412+
assert all(
413+
u["metadata"] == o.metadata for u, o in zip(uploaded_scenes, scenes)
414+
)
381415

382416
job2 = dataset_scene.append(scenes, update=True, asynchronous=True)
383417
job2.sleep_until_complete()

0 commit comments

Comments
 (0)