23
23
DATASET_SLICES_KEY ,
24
24
DEFAULT_ANNOTATION_UPDATE_MODE ,
25
25
EXPORTED_ROWS ,
26
- FRAMES ,
26
+ FRAMES_KEY ,
27
27
NAME_KEY ,
28
28
REFERENCE_IDS_KEY ,
29
29
REQUEST_ID_KEY ,
30
30
SCENES_KEY ,
31
31
UPDATE_KEY ,
32
- URL ,
32
+ URL_KEY ,
33
33
)
34
34
from .dataset_item import (
35
35
DatasetItem ,
@@ -209,7 +209,7 @@ def append(
209
209
asynchronous = False ,
210
210
) -> Union [dict , AsyncJob ]:
211
211
"""
212
- Appends scenes or images with metadata (dataset items) to the dataset. Overwrites images on collision if forced.
212
+ Appends images with metadata (dataset items) or scenes to the dataset. Overwrites images on collision if forced.
213
213
214
214
Parameters:
215
215
:param items: items to upload
@@ -224,20 +224,20 @@ def append(
224
224
'ignored_items': int,
225
225
}
226
226
"""
227
- all_dataset_items = all (
228
- ( isinstance ( item , DatasetItem ) for item in items )
229
- )
230
- all_scenes = all (( isinstance ( item , LidarScene ) for item in items ))
231
- if not all_dataset_items and not all_scenes :
227
+ dataset_items = [
228
+ item for item in items if isinstance ( item , DatasetItem )
229
+ ]
230
+ scenes = [ item for item in items if isinstance ( item , LidarScene )]
231
+ if dataset_items and scenes :
232
232
raise Exception (
233
233
"You must append either DatasetItems or Scenes to the dataset."
234
234
)
235
- if all_scenes :
236
- return self .append_scenes (items , update , asynchronous )
235
+ if scenes :
236
+ return self .append_scenes (scenes , update , asynchronous )
237
237
238
- check_for_duplicate_reference_ids (items )
238
+ check_for_duplicate_reference_ids (dataset_items )
239
239
240
- if len (items ) > WARN_FOR_LARGE_UPLOAD and not asynchronous :
240
+ if len (dataset_items ) > WARN_FOR_LARGE_UPLOAD and not asynchronous :
241
241
print (
242
242
"Tip: for large uploads, get faster performance by importing your data "
243
243
"into Nucleus directly from a cloud storage provider. See "
@@ -246,9 +246,9 @@ def append(
246
246
)
247
247
248
248
if asynchronous :
249
- check_all_paths_remote (items )
249
+ check_all_paths_remote (dataset_items )
250
250
request_id = serialize_and_write_to_presigned_url (
251
- items , self .id , self ._client
251
+ dataset_items , self .id , self ._client
252
252
)
253
253
response = self ._client .make_request (
254
254
payload = {REQUEST_ID_KEY : request_id , UPDATE_KEY : update },
@@ -258,7 +258,7 @@ def append(
258
258
259
259
return self ._client .populate_dataset (
260
260
self .id ,
261
- items ,
261
+ dataset_items ,
262
262
update = update ,
263
263
batch_size = batch_size ,
264
264
)
@@ -309,8 +309,8 @@ def upload_scenes(
309
309
"""
310
310
if asynchronous :
311
311
for scene in payload [SCENES_KEY ]:
312
- for frame in scene [FRAMES ]:
313
- check_all_frame_paths_remote (frame [URL ])
312
+ for frame in scene [FRAMES_KEY ]:
313
+ check_all_frame_paths_remote (frame [URL_KEY ])
314
314
request_id = serialize_and_write_to_presigned_url (
315
315
[payload ], self .id , self ._client
316
316
)
0 commit comments