|
9 | 9 | import pickle
|
10 | 10 | import random
|
11 | 11 | import tempfile
|
| 12 | +import unittest |
12 | 13 | from pathlib import Path
|
13 | 14 | from unittest.mock import DEFAULT, MagicMock, patch
|
14 | 15 |
|
|
20 | 21 | from sasctl import current_session
|
21 | 22 | from sasctl.core import RestObj, VersionInfo
|
22 | 23 | from sasctl.pzmm.write_score_code import ScoreCode as sc
|
| 24 | +from sasctl.pzmm.write_score_code import ScoreCode |
23 | 25 |
|
24 | 26 |
|
25 | 27 | @pytest.fixture()
|
@@ -301,50 +303,174 @@ def test_determine_score_metrics():
|
301 | 303 | ) == ["P_A", "P_B", "P_C"]
|
302 | 304 |
|
303 | 305 |
|
304 |
| -def test_no_targets_no_thresholds(): |
305 |
| - """ |
306 |
| - Test Cases: |
307 |
| - - len(metrics) == 1 |
308 |
| - - non-h2o |
309 |
| - - h2o |
310 |
| - - len(metrics) > 1 |
311 |
| - - non-h2o |
312 |
| - - h2o |
313 |
| - - raise error for invalid config (returns - metrics != 0) |
314 |
| - """ |
315 |
| - metrics = "Classification" |
316 |
| - returns = [1, "A"] |
317 |
| - with pytest.raises(ValueError): |
318 |
| - sc._no_targets_no_thresholds(metrics, returns) |
| 306 | +class TestNoTargetsNoThresholds(unittest.TestCase): |
| 307 | + def setUp(self): |
| 308 | + self.sc = ScoreCode |
319 | 309 |
|
320 |
| - returns = [1] |
321 |
| - sc._no_targets_no_thresholds(metrics, returns) |
322 |
| - assert "Classification = prediction" in sc.score_code |
323 |
| - sc.score_code = "" |
| 310 | + def tearDown(self): |
| 311 | + self.sc.score_code = "" |
324 | 312 |
|
325 |
| - sc._no_targets_no_thresholds(metrics, returns, h2o_model=True) |
326 |
| - assert "Classification = prediction[1][0]" |
327 |
| - sc.score_code = "" |
| 313 | + def execute_snippet(self, *args): |
| 314 | + scope = {} |
| 315 | + exec(self.sc.score_code, scope) |
| 316 | + test_snippet = scope["test_snippet"] |
| 317 | + return test_snippet(*args) |
328 | 318 |
|
329 |
| - metrics = ["Classification", "Proba_A", "Proba_B", "Proba_C"] |
330 |
| - returns = ["I", 1, 2, 3] |
331 |
| - sc._no_targets_no_thresholds(metrics, returns) |
332 |
| - assert ( |
333 |
| - sc.score_code == f"{'':4}Classification = prediction[0]\n" |
334 |
| - f"{'':4}Proba_A = prediction[1]\n" |
335 |
| - f"{'':4}Proba_B = prediction[2]\n" |
336 |
| - f"{'':4}Proba_C = prediction[3]\n\n" |
337 |
| - f"{'':4}return Classification, Proba_A, Proba_B, Proba_C" |
338 |
| - ) |
339 |
| - sc.score_code = "" |
340 |
| - sc._no_targets_no_thresholds(metrics, returns, h2o_model=True) |
341 |
| - assert ( |
342 |
| - sc.score_code == f"{'':4}Classification = prediction[1][0]\n" |
343 |
| - f"{'':4}Proba_A = float(prediction[1][1])\n" |
344 |
| - f"{'':4}Proba_B = float(prediction[1][2])\n" |
345 |
| - f"{'':4}Proba_C = float(prediction[1][3])\n\n" |
346 |
| - f"{'':4}return Classification, Proba_A, Proba_B, Proba_C" |
347 |
| - ) |
| 319 | + def test_improper_arguments(self): |
| 320 | + metrics = "Classification" |
| 321 | + returns = [1, "A"] |
| 322 | + with pytest.raises(ValueError): |
| 323 | + self.sc._no_targets_no_thresholds(metrics, returns) |
| 324 | + |
| 325 | + def test_single_metric(self): |
| 326 | + metrics = "Classification" |
| 327 | + returns = [1] |
| 328 | + self.sc.score_code += "import pandas as pd\n" \ |
| 329 | + "def test_snippet(input_array, prediction):\n" |
| 330 | + self.sc._no_targets_no_thresholds(metrics, returns) |
| 331 | + # Single row |
| 332 | + input_array = pd.DataFrame([[1]], columns=["A"], index=[0]) |
| 333 | + prediction = [.5] |
| 334 | + self.assertEqual(self.execute_snippet(input_array, prediction), .5) |
| 335 | + # Multi row |
| 336 | + input_array = pd.DataFrame({"A": [.9, 1, 1.1]}) |
| 337 | + prediction = [.3, .4, .5] |
| 338 | + pd.testing.assert_frame_equal( |
| 339 | + self.execute_snippet(input_array, prediction), |
| 340 | + pd.DataFrame({metrics: prediction}) |
| 341 | + ) |
| 342 | + |
| 343 | + def test_single_metric_h2o(self): |
| 344 | + metrics = "Classification" |
| 345 | + returns = [1] |
| 346 | + self.sc.score_code += "import pandas as pd\n" \ |
| 347 | + "def test_snippet(input_array, prediction):\n" |
| 348 | + self.sc._no_targets_no_thresholds(metrics, returns, h2o_model=True) |
| 349 | + # Single row |
| 350 | + input_array = pd.DataFrame([[1]], columns=["A"], index=[0]) |
| 351 | + prediction = [[], [.5]] |
| 352 | + self.assertEqual(self.execute_snippet(input_array, prediction), .5) |
| 353 | + # Multi row |
| 354 | + input_array = pd.DataFrame({"A": [.9, 1, 1.1]}) |
| 355 | + prediction = pd.DataFrame({"predict": [0, 1, 1], "p0": [.3, .4, .5]}) |
| 356 | + pd.testing.assert_series_equal( |
| 357 | + self.execute_snippet(input_array, prediction), |
| 358 | + pd.Series([0, 1, 1], name="predict") |
| 359 | + ) |
| 360 | + |
| 361 | + def test_multi_metric(self): |
| 362 | + metrics = ["Classification", "Proba_A", "Proba_B", "Proba_C"] |
| 363 | + returns = ["I", 1, 2, 3] |
| 364 | + self.sc.score_code += "import pandas as pd\n" \ |
| 365 | + "def test_snippet(input_array, prediction):\n" |
| 366 | + self.sc._no_targets_no_thresholds(metrics, returns) |
| 367 | + # Single row |
| 368 | + input_array = pd.DataFrame([[1]], columns=["A"], index=[0]) |
| 369 | + prediction = ["i", .3, .4, .5] |
| 370 | + self.assertEqual( |
| 371 | + self.execute_snippet(input_array, prediction), |
| 372 | + ("i", .3, .4, .5) |
| 373 | + ) |
| 374 | + # Multi row |
| 375 | + input_array = pd.DataFrame({"A": [1, 0, 1]}) |
| 376 | + prediction = pd.DataFrame({ |
| 377 | + "Classification": ["i", "j", "k"], |
| 378 | + "Proba_A": [.1, .2, .3], |
| 379 | + "Proba_B": [.4, .5, .6], |
| 380 | + "Proba_C": [.7, .8, .9] |
| 381 | + }) |
| 382 | + pd.testing.assert_frame_equal( |
| 383 | + self.execute_snippet(input_array, prediction), |
| 384 | + prediction |
| 385 | + ) |
| 386 | + |
| 387 | + def test_multi_metric_h2o(self): |
| 388 | + metrics = ["Classification", "Proba_A", "Proba_B", "Proba_C"] |
| 389 | + returns = ["I", 1, 2, 3] |
| 390 | + self.sc.score_code += "import pandas as pd\n" \ |
| 391 | + "def test_snippet(input_array, prediction):\n" |
| 392 | + self.sc._no_targets_no_thresholds(metrics, returns, h2o_model=True) |
| 393 | + # Single row |
| 394 | + input_array = pd.DataFrame([[1]], columns=["A"], index=[0]) |
| 395 | + prediction = [[], ["i", .3, .4, .5]] |
| 396 | + self.assertEqual( |
| 397 | + self.execute_snippet(input_array, prediction), |
| 398 | + ("i", .3, .4, .5) |
| 399 | + ) |
| 400 | + # Multi row |
| 401 | + input_array = pd.DataFrame({"A": [1, 0, 1]}) |
| 402 | + prediction = pd.DataFrame({ |
| 403 | + "Classification": ["i", "j", "k"], |
| 404 | + "Proba_A": [.1, .2, .3], |
| 405 | + "Proba_B": [.4, .5, .6], |
| 406 | + "Proba_C": [.7, .8, .9] |
| 407 | + }) |
| 408 | + pd.testing.assert_frame_equal( |
| 409 | + self.execute_snippet(input_array, prediction), |
| 410 | + prediction |
| 411 | + ) |
| 412 | + |
| 413 | + |
| 414 | +class TestBinaryTarget(unittest.TestCase): |
| 415 | + def setUp(self): |
| 416 | + self.sc = ScoreCode |
| 417 | + self.target_values = ["A", "B"] |
| 418 | + |
| 419 | + def tearDown(self): |
| 420 | + self.sc.score_code = "" |
| 421 | + |
| 422 | + def execute_snippet(self, *args): |
| 423 | + scope = {} |
| 424 | + exec(self.sc.score_code, scope) |
| 425 | + test_snippet = scope["test_snippet"] |
| 426 | + return test_snippet(*args) |
| 427 | + |
| 428 | + def test_improper_arguments(self): |
| 429 | + with pytest.raises(ValueError): |
| 430 | + sc._binary_target([], [], ["A", 1, 2, 3]) |
| 431 | + with pytest.raises(ValueError): |
| 432 | + sc._binary_target([], [], ["A", "B"]) |
| 433 | + with pytest.raises(ValueError): |
| 434 | + sc._binary_target(["A", "B", "C", "D"], [], []) |
| 435 | + |
| 436 | + def test_one_metric_one_return(self): |
| 437 | + metrics = "Classification" |
| 438 | + returns = [""] |
| 439 | + self.sc.score_code += "import pandas as pd\n" \ |
| 440 | + "def test_snippet(input_array, prediction):\n" |
| 441 | + self.sc._binary_target(metrics, self.target_values, returns) |
| 442 | + # Single row |
| 443 | + input_array = pd.DataFrame([[1]], columns=["A"], index=[0]) |
| 444 | + prediction = .5 |
| 445 | + self.assertEqual(self.execute_snippet(input_array, prediction), .5) |
| 446 | + # Multi row |
| 447 | + input_array = pd.DataFrame({"A": [.9, 1, 1.1]}) |
| 448 | + prediction = [.3, .4, .5] |
| 449 | + pd.testing.assert_frame_equal( |
| 450 | + self.execute_snippet(input_array, prediction), |
| 451 | + pd.DataFrame({metrics: prediction}) |
| 452 | + ) |
| 453 | + |
| 454 | + def test_one_metric_two_returns(self): |
| 455 | + pass |
| 456 | + |
| 457 | + def test_one_metric_three_returns(self): |
| 458 | + pass |
| 459 | + |
| 460 | + def test_two_metrics_one_return(self): |
| 461 | + pass |
| 462 | + |
| 463 | + def test_two_metrics_two_returns(self): |
| 464 | + pass |
| 465 | + |
| 466 | + def test_two_metrics_three_returns(self): |
| 467 | + pass |
| 468 | + |
| 469 | + def test_three_metrics_one_return(self): |
| 470 | + pass |
| 471 | + |
| 472 | + def test_three_metrics_three_returns(self): |
| 473 | + pass |
348 | 474 |
|
349 | 475 |
|
350 | 476 | def test_binary_target():
|
@@ -398,14 +524,6 @@ def test_binary_target():
|
398 | 524 | - sum(returns) >= 2
|
399 | 525 | - len(metrics) > 3
|
400 | 526 | """
|
401 |
| - # Initial errors |
402 |
| - with pytest.raises(ValueError): |
403 |
| - sc._binary_target([], [], ["A", 1, 2, 3]) |
404 |
| - with pytest.raises(ValueError): |
405 |
| - sc._binary_target([], [], ["A", "B"]) |
406 |
| - with pytest.raises(ValueError): |
407 |
| - sc._binary_target(["A", "B", "C", "D"], [], []) |
408 |
| - |
409 | 527 | # # metrics == 1
|
410 | 528 | metrics = "Classification"
|
411 | 529 | sc._binary_target(metrics, ["A", "B"], [""], h2o_model=True)
|
@@ -504,6 +622,20 @@ def test_binary_target():
|
504 | 622 | sc._binary_target(metrics, ["A", "B"], ["1", 2, 3])
|
505 | 623 |
|
506 | 624 |
|
| 625 | +class TestNonbinaryTargets(unittest.TestCase): |
| 626 | + def setUp(self): |
| 627 | + self.sc = ScoreCode |
| 628 | + |
| 629 | + def tearDown(self): |
| 630 | + self.sc.score_code = "" |
| 631 | + |
| 632 | + def execute_snippet(self, *args): |
| 633 | + scope = {} |
| 634 | + exec(self.sc.score_code, scope) |
| 635 | + test_snippet = scope["test_snippet"] |
| 636 | + return test_snippet(*args) |
| 637 | + |
| 638 | + |
507 | 639 | def test_nonbinary_targets():
|
508 | 640 | """
|
509 | 641 | Test Cases:
|
|
0 commit comments