Skip to content

Commit 2b9483c

Browse files
author
Diego Ardila
committed
I think i fixed the errors in circleci
1 parent 9418d42 commit 2b9483c

File tree

5 files changed

+55
-57
lines changed

5 files changed

+55
-57
lines changed

nucleus/__init__.py

Lines changed: 45 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -50,89 +50,83 @@
5050
geometry | dict | Representation of the bounding box in the Box2DGeometry format.\n
5151
metadata | dict | An arbitrary metadata blob for the annotation.\n
5252
"""
53-
__version__ = "0.1.0"
54-
5553
import json
5654
import logging
57-
import warnings
5855
import os
59-
from typing import List, Union, Dict, Callable, Any, Optional
60-
61-
import tqdm
62-
import tqdm.notebook as tqdm_notebook
56+
import warnings
57+
from typing import Any, Callable, Dict, List, Optional, Union
6358

6459
import grequests
60+
import pkg_resources
6561
import requests
62+
import tqdm
63+
import tqdm.notebook as tqdm_notebook
6664
from requests.adapters import HTTPAdapter
6765

6866
# pylint: disable=E1101
6967
# TODO: refactor to reduce this file to under 1000 lines.
7068
# pylint: disable=C0302
7169
from requests.packages.urllib3.util.retry import Retry
7270

73-
from .constants import REFERENCE_IDS_KEY, DATASET_ITEM_IDS_KEY, UPDATE_KEY
74-
from .dataset import Dataset
75-
from .dataset_item import DatasetItem
7671
from .annotation import (
7772
BoxAnnotation,
7873
PolygonAnnotation,
79-
SegmentationAnnotation,
8074
Segment,
81-
)
82-
from .prediction import (
83-
BoxPrediction,
84-
PolygonPrediction,
85-
SegmentationPrediction,
86-
)
87-
from .model_run import ModelRun
88-
from .slice import Slice
89-
from .upload_response import UploadResponse
90-
from .payload_constructor import (
91-
construct_append_payload,
92-
construct_annotation_payload,
93-
construct_model_creation_payload,
94-
construct_box_predictions_payload,
95-
construct_segmentation_payload,
75+
SegmentationAnnotation,
9676
)
9777
from .constants import (
98-
NUCLEUS_ENDPOINT,
78+
ANNOTATION_METADATA_SCHEMA_KEY,
79+
ANNOTATIONS_IGNORED_KEY,
80+
ANNOTATIONS_PROCESSED_KEY,
81+
AUTOTAGS_KEY,
82+
DATASET_ID_KEY,
83+
DATASET_ITEM_IDS_KEY,
9984
DEFAULT_NETWORK_TIMEOUT_SEC,
100-
ERRORS_KEY,
85+
EMBEDDINGS_URL_KEY,
10186
ERROR_ITEMS,
10287
ERROR_PAYLOAD,
103-
ITEMS_KEY,
104-
ITEM_KEY,
88+
ERRORS_KEY,
10589
IMAGE_KEY,
10690
IMAGE_URL_KEY,
107-
DATASET_ID_KEY,
91+
ITEM_METADATA_SCHEMA_KEY,
92+
ITEMS_KEY,
10893
MODEL_RUN_ID_KEY,
109-
DATASET_ITEM_ID_KEY,
110-
SLICE_ID_KEY,
111-
ANNOTATIONS_PROCESSED_KEY,
112-
ANNOTATIONS_IGNORED_KEY,
113-
PREDICTIONS_PROCESSED_KEY,
94+
NAME_KEY,
95+
NUCLEUS_ENDPOINT,
11496
PREDICTIONS_IGNORED_KEY,
97+
PREDICTIONS_PROCESSED_KEY,
98+
REFERENCE_IDS_KEY,
99+
SLICE_ID_KEY,
115100
STATUS_CODE_KEY,
116-
SUCCESS_STATUS_CODES,
117-
DATASET_NAME_KEY,
118-
DATASET_MODEL_RUNS_KEY,
119-
DATASET_SLICES_KEY,
120-
DATASET_LENGTH_KEY,
121-
NAME_KEY,
122-
ANNOTATIONS_KEY,
123-
AUTOTAGS_KEY,
124-
ANNOTATION_METADATA_SCHEMA_KEY,
125-
ITEM_METADATA_SCHEMA_KEY,
126-
EMBEDDINGS_URL_KEY,
101+
UPDATE_KEY,
127102
)
128-
from .model import Model
103+
from .dataset import Dataset
104+
from .dataset_item import DatasetItem
129105
from .errors import (
106+
DatasetItemRetrievalError,
130107
ModelCreationError,
131108
ModelRunCreationError,
132-
DatasetItemRetrievalError,
133109
NotFoundError,
134110
NucleusAPIError,
135111
)
112+
from .model import Model
113+
from .model_run import ModelRun
114+
from .payload_constructor import (
115+
construct_annotation_payload,
116+
construct_append_payload,
117+
construct_box_predictions_payload,
118+
construct_model_creation_payload,
119+
construct_segmentation_payload,
120+
)
121+
from .prediction import (
122+
BoxPrediction,
123+
PolygonPrediction,
124+
SegmentationPrediction,
125+
)
126+
from .slice import Slice
127+
from .upload_response import UploadResponse
128+
129+
__version__ = pkg_resources.get_distribution("scale-nucleus").version
136130

137131
logger = logging.getLogger(__name__)
138132
logging.basicConfig()
@@ -236,6 +230,8 @@ def get_model_run(self, model_run_id: str, dataset_id: str) -> ModelRun:
236230
"""
237231
Fetches a model_run for given id
238232
:param model_run_id: internally controlled model_run_id
233+
:param dataset_id: the dataset id which may determine the prediction schema
234+
for this model run if present on the dataset.
239235
:return: model_run
240236
"""
241237
return ModelRun(model_run_id, dataset_id, self)

nucleus/dataset.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
)
2424
from .dataset_item import (
2525
DatasetItem,
26+
check_all_paths_remote,
2627
check_for_duplicate_reference_ids,
2728
)
2829
from .payload_constructor import construct_model_run_creation_payload
@@ -196,6 +197,7 @@ def append(
196197
check_for_duplicate_reference_ids(dataset_items)
197198

198199
if asynchronous:
200+
check_all_paths_remote(dataset_items)
199201
request_id = serialize_and_write_to_presigned_url(
200202
dataset_items, self.id, self._client
201203
)

nucleus/model_run.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
ANNOTATIONS_KEY,
99
BOX_TYPE,
1010
DEFAULT_ANNOTATION_UPDATE_MODE,
11-
MASK_TYPE,
1211
POLYGON_TYPE,
1312
REQUEST_ID_KEY,
13+
SEGMENTATION_TYPE,
1414
UPDATE_KEY,
1515
)
1616
from .prediction import (
@@ -175,7 +175,7 @@ def _format_prediction_response(
175175
] = {
176176
BOX_TYPE: BoxPrediction,
177177
POLYGON_TYPE: PolygonPrediction,
178-
MASK_TYPE: SegmentationPrediction,
178+
SEGMENTATION_TYPE: SegmentationPrediction,
179179
}
180180
for type_key in annotation_payload:
181181
type_class = type_key_to_class[type_key]

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ exclude = '''
2121

2222
[tool.poetry]
2323
name = "scale-nucleus"
24-
version = "0.1.4"
24+
version = "0.1.5"
2525
description = "The official Python client library for Nucleus, the Data Platform for AI"
2626
license = "MIT"
2727
authors = ["Scale AI Nucleus Team <nucleusapi@scaleapi.com>"]

tests/test_dataset.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -190,11 +190,11 @@ def test_dataset_append_async_with_1_bad_url(dataset: Dataset):
190190
"started_image_processing": f"Dataset: {dataset.id}, Job: {job.id}",
191191
},
192192
}
193-
assert job.errors() == [
194-
"One or more of the images you attempted to upload did not process correctly. Please see the status for an overview and the errors for more detailed messages.",
195-
# Todo: figure out why this error isn't propagating from image upload.
196-
'Failure when processing the image "https://looks.ok.but.is.not.accessible": {}',
197-
]
193+
# The error is fairly detailed and subject to change. What's important is we surface which URLs failed.
194+
assert (
195+
'Failure when processing the image "https://looks.ok.but.is.not.accessible"'
196+
in str(job.errors())
197+
)
198198

199199

200200
def test_dataset_list_autotags(CLIENT, dataset):

0 commit comments

Comments
 (0)