|
3 | 3 | import requests
|
4 | 4 |
|
5 | 5 | from nucleus.job import AsyncJob
|
| 6 | +from nucleus.prediction import ( |
| 7 | + BoxPrediction, |
| 8 | + CuboidPrediction, |
| 9 | + PolygonPrediction, |
| 10 | + SegmentationPrediction, |
| 11 | + from_json, |
| 12 | +) |
6 | 13 | from nucleus.url_utils import sanitize_string_args
|
7 | 14 | from nucleus.utils import (
|
8 | 15 | convert_export_payload,
|
9 | 16 | format_dataset_item_response,
|
| 17 | + format_prediction_response, |
10 | 18 | serialize_and_write_to_presigned_url,
|
11 | 19 | )
|
12 | 20 |
|
13 |
| -from .annotation import ( |
14 |
| - Annotation, |
15 |
| - check_all_mask_paths_remote, |
16 |
| -) |
| 21 | +from .annotation import Annotation, check_all_mask_paths_remote |
17 | 22 | from .constants import (
|
| 23 | + ANNOTATIONS_KEY, |
| 24 | + AUTOTAG_SCORE_THRESHOLD, |
18 | 25 | DATASET_LENGTH_KEY,
|
19 | 26 | DATASET_MODEL_RUNS_KEY,
|
20 | 27 | DATASET_NAME_KEY,
|
|
24 | 31 | NAME_KEY,
|
25 | 32 | REFERENCE_IDS_KEY,
|
26 | 33 | REQUEST_ID_KEY,
|
27 |
| - AUTOTAG_SCORE_THRESHOLD, |
28 | 34 | UPDATE_KEY,
|
29 | 35 | )
|
30 | 36 | from .dataset_item import (
|
31 | 37 | DatasetItem,
|
32 | 38 | check_all_paths_remote,
|
33 | 39 | check_for_duplicate_reference_ids,
|
34 | 40 | )
|
35 |
| -from .scene import LidarScene, Scene, check_all_scene_paths_remote |
36 | 41 | from .payload_constructor import (
|
37 | 42 | construct_append_scenes_payload,
|
38 | 43 | construct_model_run_creation_payload,
|
39 | 44 | construct_taxonomy_payload,
|
40 | 45 | )
|
| 46 | +from .scene import LidarScene, Scene, check_all_scene_paths_remote |
41 | 47 |
|
42 | 48 | WARN_FOR_LARGE_UPLOAD = 50000
|
43 | 49 | WARN_FOR_LARGE_SCENES_UPLOAD = 5
|
@@ -525,3 +531,137 @@ def get_scene(self, reference_id) -> Scene:
|
525 | 531 | requests_command=requests.get,
|
526 | 532 | )
|
527 | 533 | )
|
| 534 | + |
| 535 | + def export_predictions(self, model): |
| 536 | + """Exports all predictions from a model on this dataset""" |
| 537 | + json_response = self._client.make_request( |
| 538 | + payload=None, |
| 539 | + route=f"dataset/{self.id}/model/{model.id}/export", |
| 540 | + requests_command=requests.get, |
| 541 | + ) |
| 542 | + return format_prediction_response({ANNOTATIONS_KEY: json_response}) |
| 543 | + |
| 544 | + def calculate_evaluation_metrics(self, model, options=None): |
| 545 | + """ |
| 546 | +
|
| 547 | + :param model: the model to calculate eval metrics for |
| 548 | + :param options: Dict with keys: |
| 549 | + class_agnostic -- A flag to specify if matching algorithm should be class-agnostic or not. |
| 550 | + Default value: True |
| 551 | +
|
| 552 | + allowed_label_matches -- An optional list of AllowedMatch objects to specify allowed matches |
| 553 | + for ground truth and model predictions. |
| 554 | + If specified, 'class_agnostic' flag is assumed to be False |
| 555 | +
|
| 556 | + Type 'AllowedMatch': |
| 557 | + { |
| 558 | + ground_truth_label: string, # A label for ground truth annotation. |
| 559 | + model_prediction_label: string, # A label for model prediction that can be matched with |
| 560 | + # corresponding ground truth label. |
| 561 | + } |
| 562 | +
|
| 563 | + payload: |
| 564 | + { |
| 565 | + "class_agnostic": boolean, |
| 566 | + "allowed_label_matches": List[AllowedMatch], |
| 567 | + }""" |
| 568 | + if options is None: |
| 569 | + options = {} |
| 570 | + return self._client.make_request( |
| 571 | + payload=options, |
| 572 | + route=f"dataset/{self.id}/model/{model.id}/calculateEvaluationMetrics", |
| 573 | + ) |
| 574 | + |
| 575 | + def upload_predictions( |
| 576 | + self, |
| 577 | + model, |
| 578 | + predictions: List[ |
| 579 | + Union[ |
| 580 | + BoxPrediction, |
| 581 | + PolygonPrediction, |
| 582 | + CuboidPrediction, |
| 583 | + SegmentationPrediction, |
| 584 | + ] |
| 585 | + ], |
| 586 | + update=False, |
| 587 | + asynchronous=False, |
| 588 | + ): |
| 589 | + """ |
| 590 | + Uploads model outputs as predictions for a model_run. Returns info about the upload. |
| 591 | + :param predictions: List of prediction objects to ingest |
| 592 | + :param update: Whether to update (if true) or ignore (if false) on conflicting reference_id/annotation_id |
| 593 | + :param asynchronous: If true, return launch and then return a reference to an asynchronous job object. This is recommended for large ingests. |
| 594 | + :return: |
| 595 | + If synchronoius |
| 596 | + { |
| 597 | + "model_run_id": str, |
| 598 | + "predictions_processed": int, |
| 599 | + "predictions_ignored": int, |
| 600 | + } |
| 601 | + """ |
| 602 | + if asynchronous: |
| 603 | + check_all_mask_paths_remote(predictions) |
| 604 | + |
| 605 | + request_id = serialize_and_write_to_presigned_url( |
| 606 | + predictions, self.id, self._client |
| 607 | + ) |
| 608 | + response = self._client.make_request( |
| 609 | + payload={REQUEST_ID_KEY: request_id, UPDATE_KEY: update}, |
| 610 | + route=f"dataset/{self.id}/model/{model.id}/uploadPredictions?async=1", |
| 611 | + ) |
| 612 | + return AsyncJob.from_json(response, self._client) |
| 613 | + else: |
| 614 | + return self._client.predict( |
| 615 | + model_run_id=None, |
| 616 | + dataset_id=self.id, |
| 617 | + model_id=model.id, |
| 618 | + annotations=predictions, |
| 619 | + update=update, |
| 620 | + ) |
| 621 | + |
| 622 | + def predictions_iloc(self, model, index): |
| 623 | + """ |
| 624 | + Returns predictions For Dataset Item by index. |
| 625 | + :param model: model object to get predictions from. |
| 626 | + :param index: absolute number of Dataset Item for a dataset corresponding to the model run. |
| 627 | + :return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]], |
| 628 | + } |
| 629 | + """ |
| 630 | + return format_prediction_response( |
| 631 | + self._client.make_request( |
| 632 | + payload=None, |
| 633 | + route=f"dataset/{self.id}/model/{model.id}/iloc/{index}", |
| 634 | + requests_command=requests.get, |
| 635 | + ) |
| 636 | + ) |
| 637 | + |
| 638 | + def predictions_refloc(self, model, reference_id): |
| 639 | + """ |
| 640 | + Returns predictions for dataset Item by its reference_id. |
| 641 | + :param model: model object to get predictions from. |
| 642 | + :param reference_id: reference_id of a dataset item. |
| 643 | + :return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]], |
| 644 | + """ |
| 645 | + return format_prediction_response( |
| 646 | + self._client.make_request( |
| 647 | + payload=None, |
| 648 | + route=f"dataset/{self.id}/model/{model.id}/referenceId/{reference_id}", |
| 649 | + requests_command=requests.get, |
| 650 | + ) |
| 651 | + ) |
| 652 | + |
| 653 | + def prediction_loc(self, model, reference_id, annotation_id): |
| 654 | + """ |
| 655 | + Returns info for single Prediction by its reference id and annotation id. Not supported for segmentation predictions yet. |
| 656 | + :param reference_id: the user specified id for the image |
| 657 | + :param annotation_id: the user specified id for the prediction, or if one was not provided, the Scale internally generated id for the prediction |
| 658 | + :return: |
| 659 | + BoxPrediction | PolygonPrediction | CuboidPrediction |
| 660 | + """ |
| 661 | + return from_json( |
| 662 | + self._client.make_request( |
| 663 | + payload=None, |
| 664 | + route=f"dataset/{self.id}/model/{model.id}/loc/{reference_id}/{annotation_id}", |
| 665 | + requests_command=requests.get, |
| 666 | + ) |
| 667 | + ) |
0 commit comments