Skip to content

Commit 43c602c

Browse files
authored
Rename Unit Test to Scenario Test in Nucleus (#212)
* more renaming fun * rename modelci to validate * nit * merge master * adjust cli tests * last ones
1 parent 72e245f commit 43c602c

33 files changed

+464
-405
lines changed

cli/tests.py

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
from cli.helpers.nucleus_url import nucleus_url
99
from cli.helpers.web_helper import launch_web_or_invoke
1010
from nucleus import NucleusAPIError
11-
from nucleus.modelci import (
11+
from nucleus.validate import (
1212
AvailableEvalFunctions,
13+
ScenarioTestMetric,
1314
ThresholdComparison,
14-
UnitTestMetric,
1515
)
1616

1717

@@ -21,9 +21,9 @@
2121
def tests(ctx, web):
2222
"""Scenario Tests allow you to test your Models
2323
24-
https://dashboard.scale.com/nucleus/unit-tests
24+
https://dashboard.scale.com/nucleus/scenario-tests
2525
"""
26-
launch_web_or_invoke("unit-tests", ctx, web, list_tests)
26+
launch_web_or_invoke("scenario-tests", ctx, web, list_tests)
2727

2828

2929
@tests.command("list")
@@ -32,7 +32,7 @@ def list_tests():
3232
console = Console()
3333
with console.status("Finding your Scenario Tests", spinner="dots4"):
3434
client = init_client()
35-
unit_tests = client.modelci.list_unit_tests()
35+
scenario_tests = client.validate.list_scenario_tests()
3636
table = Table(
3737
Column("id", overflow="fold", min_width=24),
3838
"Name",
@@ -41,13 +41,13 @@ def list_tests():
4141
title=":chart_with_upwards_trend: Scenario Tests",
4242
title_justify="left",
4343
)
44-
for ut in unit_tests:
44+
for ut in scenario_tests:
4545
table.add_row(ut.id, ut.name, ut.slice_id, nucleus_url(ut.id))
4646
console.print(table)
4747

4848

4949
def format_criterion(
50-
criterion: UnitTestMetric, eval_functions: AvailableEvalFunctions
50+
criterion: ScenarioTestMetric, eval_functions: AvailableEvalFunctions
5151
):
5252
op_map = {
5353
ThresholdComparison.GREATER_THAN: ">",
@@ -63,55 +63,57 @@ def format_criterion(
6363

6464

6565
@tests.command("describe")
66-
@click.argument("unit-test-id", default=None, required=False)
66+
@click.argument("scenario-test-id", default=None, required=False)
6767
@click.option(
6868
"--all", "-a", is_flag=True, help="View details about all Scenario Tests"
6969
)
70-
def describe_test(unit_test_id, all):
70+
def describe_test(scenario_test_id, all):
7171
"""View detailed information about a test or all tests"""
7272
console = Console()
73-
# unit_test = client.modelci.get_unit_test(unit_test_id)
74-
assert unit_test_id or all, "Must pass a unit_test_id or --all"
73+
# scenario_test = client.validate.get_scenario_test(scenario_test_id)
74+
assert scenario_test_id or all, "Must pass a scenario_test_id or --all"
7575
client = init_client()
76-
unit_tests = client.modelci.list_unit_tests()
76+
scenario_tests = client.validate.list_scenario_tests()
7777
if all:
7878
tree = Tree(":chart_with_upwards_trend: All Scenario Tests")
7979
with Live(
8080
"Fetching description of all Scenario Tests",
8181
vertical_overflow="visible",
8282
) as live:
83-
for idx, ut in enumerate(unit_tests):
84-
test_branch = tree.add(f"{idx}: Unit Test")
85-
build_unit_test_info_tree(client, ut, test_branch)
83+
for idx, ut in enumerate(scenario_tests):
84+
test_branch = tree.add(f"{idx}: Scenario Test")
85+
build_scenario_test_info_tree(client, ut, test_branch)
8686
live.update(tree)
8787
else:
8888
with console.status("Fetching Scenario Test information"):
89-
unit_test = [ut for ut in unit_tests if ut.id == unit_test_id][0]
90-
tree = Tree(":chart_with_upwards_trend: Unit Test")
91-
build_unit_test_info_tree(client, unit_test, tree)
89+
scenario_test = [
90+
ut for ut in scenario_tests if ut.id == scenario_test_id
91+
][0]
92+
tree = Tree(":chart_with_upwards_trend: Scenario Test")
93+
build_scenario_test_info_tree(client, scenario_test, tree)
9294
console.print(tree)
9395

9496

95-
def build_unit_test_info_tree(client, unit_test, tree):
97+
def build_scenario_test_info_tree(client, scenario_test, tree):
9698
try:
97-
slc = client.get_slice(unit_test.slice_id)
99+
slc = client.get_slice(scenario_test.slice_id)
98100
info_branch = tree.add(":mag: Details")
99-
info_branch.add(f"id: '{unit_test.id}'")
100-
info_branch.add(f"name: '{unit_test.name}'")
101-
unit_test_url = nucleus_url(unit_test.id)
102-
info_branch.add(f"url: {unit_test_url}")
101+
info_branch.add(f"id: '{scenario_test.id}'")
102+
info_branch.add(f"name: '{scenario_test.name}'")
103+
scenario_test_url = nucleus_url(scenario_test.id)
104+
info_branch.add(f"url: {scenario_test_url}")
103105
slice_url = nucleus_url(f"{slc.dataset_id}/{slc.slice_id}")
104106
slice_branch = tree.add(":cake: Slice")
105107
slice_branch.add(f"id: '{slc.id}'")
106108
slice_info = slc.info()
107109
slice_branch.add(f"name: '{slice_info['name']}'")
108110
slice_branch.add(f"len: {len(slc.items)}")
109111
slice_branch.add(f"url: {slice_url}")
110-
criteria = unit_test.get_criteria()
112+
criteria = scenario_test.get_criteria()
111113
criteria_branch = tree.add(":crossed_flags: Criteria")
112114
for criterion in criteria:
113115
pretty_criterion = format_criterion(
114-
criterion, client.modelci.eval_functions
116+
criterion, client.validate.eval_functions
115117
)
116118
criteria_branch.add(pretty_criterion)
117119
except NucleusAPIError as e:

docs/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ Sections
3232

3333
api/nucleus/index
3434
api/nucleus/metrics/index
35-
api/nucleus/modelci/index
35+
api/nucleus/validate/index
3636

3737

3838
Index

nucleus/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,6 @@
109109
from .logger import logger
110110
from .model import Model
111111
from .model_run import ModelRun
112-
from .modelci import ModelCI
113112
from .payload_constructor import (
114113
construct_annotation_payload,
115114
construct_append_payload,
@@ -128,6 +127,7 @@
128127
from .scene import Frame, LidarScene
129128
from .slice import Slice
130129
from .upload_response import UploadResponse
130+
from .validate import Validate
131131

132132
# pylint: disable=E1101
133133
# TODO: refactor to reduce this file to under 1000 lines.
@@ -166,7 +166,7 @@ def __init__(
166166
if use_notebook:
167167
self.tqdm_bar = tqdm_notebook.tqdm
168168
self._connection = Connection(self.api_key, self.endpoint)
169-
self.modelci = ModelCI(self.api_key, self.endpoint)
169+
self.validate = Validate(self.api_key, self.endpoint)
170170

171171
def __repr__(self):
172172
return f"NucleusClient(api_key='{self.api_key}', use_notebook={self._use_notebook}, endpoint='{self.endpoint}')"

nucleus/model.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -163,27 +163,27 @@ def create_run(
163163

164164
return model_run
165165

166-
def evaluate(self, unit_test_names: List[str]) -> AsyncJob:
166+
def evaluate(self, scenario_test_names: List[str]) -> AsyncJob:
167167
"""Evaluates this on the specified Unit Tests. ::
168168
169169
import nucleus
170170
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
171171
model = client.list_models()[0]
172-
unit_test = client.modelci.create_unit_test(
173-
"sample_unit_test", "YOUR_SLICE_ID"
172+
scenario_test = client.validate.create_scenario_test(
173+
"sample_scenario_test", "YOUR_SLICE_ID"
174174
)
175175
176-
model.evaluate(["sample_unit_test"])
176+
model.evaluate(["sample_scenario_test"])
177177
178178
Args:
179-
unit_test_names: list of unit tests to evaluate
179+
scenario_test_names: list of unit tests to evaluate
180180
181181
Returns:
182182
AsyncJob object of evaluation job
183183
"""
184184
response = self._client.make_request(
185-
{"test_names": unit_test_names},
186-
f"modelci/{self.id}/evaluate",
185+
{"test_names": scenario_test_names},
186+
f"validate/{self.id}/evaluate",
187187
requests_command=requests.post,
188188
)
189189
return AsyncJob.from_json(response, self._client)

nucleus/modelci/__init__.py

Lines changed: 0 additions & 21 deletions
This file was deleted.

nucleus/modelci/unit_test_metric.py

Lines changed: 0 additions & 14 deletions
This file was deleted.

nucleus/validate/__init__.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
"""Model CI Python Library."""
2+
3+
__all__ = [
4+
"Validate",
5+
"ScenarioTest",
6+
"EvaluationCriterion",
7+
]
8+
9+
from .client import Validate
10+
from .constants import ThresholdComparison
11+
from .data_transfer_objects.eval_function import (
12+
EvalFunctionEntry,
13+
EvaluationCriterion,
14+
GetEvalFunctions,
15+
)
16+
from .data_transfer_objects.scenario_test import CreateScenarioTestRequest
17+
from .errors import CreateScenarioTestError
18+
from .eval_functions.available_eval_functions import AvailableEvalFunctions
19+
from .scenario_test import ScenarioTest
20+
from .scenario_test_evaluation import (
21+
ScenarioTestEvaluation,
22+
ScenarioTestItemEvaluation,
23+
)
24+
from .scenario_test_metric import ScenarioTestMetric

0 commit comments

Comments
 (0)