|
3 | 3 | import pytest
|
4 | 4 | import json
|
5 | 5 | import pathlib
|
6 |
| -import os |
7 | 6 |
|
8 | 7 | from src.wrappers.OsipiBase import OsipiBase
|
9 | 8 | from utilities.data_simulation.GenerateData import GenerateData
|
10 |
| - |
11 | 9 | #run using python -m pytest from the root folder
|
12 | 10 |
|
13 |
| - |
14 |
| -# @pytest.fixture |
15 |
| -# def algorithm_fixture() |
16 |
| -# def test_fixtures() |
17 |
| - |
18 |
| -# use a fixture to generate data |
19 |
| -# either read a config file for the test or perhaps hard code a few fixtures and usefixtures in the config? |
20 |
| -# use a fixture to save data |
21 |
| - |
22 |
| -# def algorithm_list(): |
23 |
| -# # Find the algorithms from algorithms.json |
24 |
| -# file = pathlib.Path(__file__) |
25 |
| -# algorithm_path = file.with_name('algorithms.json') |
26 |
| -# with algorithm_path.open() as f: |
27 |
| -# algorithm_information = json.load(f) |
28 |
| -# return algorithm_information["algorithms"] |
29 |
| - |
30 |
| -# @pytest.fixture(params=algorithm_list()) |
31 |
| -# def algorithm_fixture(request): |
32 |
| -# # assert request.param == "algorithms" |
33 |
| -# yield request.param |
34 |
| - |
35 |
| - |
36 |
| - |
37 |
| -# @pytest.fixture(params=SNR) |
38 |
| -# def noise_fixture(request): |
39 |
| -# return request.config.getoption("--noise") |
40 |
| - |
41 |
| -# @pytest.fixture |
42 |
| -# def noise_fixture(request): |
43 |
| -# yield request.param |
44 |
| - |
45 |
| -# @pytest.mark.parametrize("S", [SNR]) |
46 |
| -# @pytest.mark.parametrize("D, Dp, f, bvals", [[0.0015, 0.1, 0.11000000000000007,[0, 5, 10, 50, 100, 200, 300, 500, 1000]]]) |
47 |
| -# def test_generated(ivim_algorithm, ivim_data, SNR): |
48 |
| -# S0 = 1 |
49 |
| -# gd = GenerateData() |
50 |
| -# name, bvals, data = ivim_data |
51 |
| -# D = data["D"] |
52 |
| -# f = data["f"] |
53 |
| -# Dp = data["Dp"] |
54 |
| -# if "data" not in data: |
55 |
| -# signal = gd.ivim_signal(D, Dp, f, S0, bvals, SNR) |
56 |
| -# else: |
57 |
| -# signal = data["data"] |
58 |
| -# fit = OsipiBase(algorithm=ivim_algorithm) |
59 |
| -# [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals) |
60 |
| -# npt.assert_allclose([f, D, Dp], [f_fit, D_fit, Dp_fit]) |
61 |
| - |
62 |
| - |
63 |
| - |
64 |
| -# test_linear_data = [ |
65 |
| -# pytest.param(0, np.linspace(0, 1000, 11), id='0'), |
66 |
| -# pytest.param(0.01, np.linspace(0, 1000, 11), id='0.1'), |
67 |
| -# pytest.param(0.02, np.linspace(0, 1000, 11), id='0.2'), |
68 |
| -# pytest.param(0.03, np.linspace(0, 1000, 11), id='0.3'), |
69 |
| -# pytest.param(0.04, np.linspace(0, 1000, 11), id='0.4'), |
70 |
| -# pytest.param(0.05, np.linspace(0, 1000, 11), id='0.5'), |
71 |
| -# pytest.param(0.08, np.linspace(0, 1000, 11), id='0.8'), |
72 |
| -# pytest.param(0.1, np.linspace(0, 1000, 11), id='1'), |
73 |
| -# ] |
74 |
| - |
75 |
| -#@pytest.mark.parametrize("D, bvals", test_linear_data) |
76 |
| -#def test_linear_fit(D, bvals): |
77 |
| - #gd = GenerateData() |
78 |
| - #gd_signal = gd.exponential_signal(D, bvals) |
79 |
| - #print(gd_signal) |
80 |
| - #fit = LinearFit() |
81 |
| - #D_fit = fit.linear_fit(bvals, np.log(gd_signal)) |
82 |
| - #npt.assert_allclose([1, D], D_fit) |
83 |
| - |
84 |
| -# test_ivim_data = [ |
85 |
| -# pytest.param(0, 0.01, 0.05, np.linspace(0, 1000, 11), id='0'), |
86 |
| -# pytest.param(0.1, 0.01, 0.05, np.linspace(0, 1000, 11), id='0.1'), |
87 |
| -# pytest.param(0.2, 0.01, 0.05, np.linspace(0, 1000, 11), id='0.2'), |
88 |
| -# pytest.param(0.1, 0.05, 0.1, np.linspace(0, 1000, 11), id='0.3'), |
89 |
| -# pytest.param(0.4, 0.001, 0.05, np.linspace(0, 1000, 11), id='0.4'), |
90 |
| -# pytest.param(0.5, 0.001, 0.05, np.linspace(0, 1000, 11), id='0.5'), |
91 |
| -# ] |
92 |
| - |
93 |
| -#@pytest.mark.parametrize("f, D, Dp, bvals", test_ivim_data) |
94 |
| -#def test_ivim_fit(f, D, Dp, bvals): |
95 |
| - ## We should make a wrapper that runs this for a range of different settings, such as b thresholds, bounds, etc. |
96 |
| - ## An additional inputs to these functions could perhaps be a "settings" class with attributes that are the settings to the |
97 |
| - ## algorithms. I.e. bvalues, thresholds, bounds, initial guesses. |
98 |
| - ## That way, we can write something that defines a range of settings, and then just run them through here. |
99 |
| - |
100 |
| - #gd = GenerateData() |
101 |
| - #gd_signal = gd.ivim_signal(D, Dp, f, 1, bvals) |
102 |
| - |
103 |
| - ##fit = LinearFit() # This is the old code by ETP |
104 |
| - #fit = ETP_SRI_LinearFitting() # This is the standardized format by IAR, which every algorithm will be implemented with |
105 |
| - |
106 |
| - #[f_fit, Dp_fit, D_fit] = fit.ivim_fit(gd_signal, bvals) # Note that I have transposed Dp and D. We should decide on a standard order for these. I usually go with f, Dp, and D ordered after size. |
107 |
| - #npt.assert_allclose([f, D], [f_fit, D_fit], atol=1e-5) |
108 |
| - #if not np.allclose(f, 0): |
109 |
| - #npt.assert_allclose(Dp, Dp_fit, rtol=1e-2, atol=1e-3) |
110 |
| - |
111 |
| - |
112 |
| -# convert the algorithm list and signal list to fixtures that read from the files into params (scope="session") |
113 |
| -# from that helpers can again parse the files? |
114 |
| - |
115 | 11 | def signal_helper(signal):
|
116 | 12 | signal = np.asarray(signal)
|
117 | 13 | signal = np.abs(signal)
|
@@ -160,17 +56,36 @@ def data_ivim_fit_saved():
|
160 | 56 |
|
161 | 57 |
|
162 | 58 | @pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", data_ivim_fit_saved())
|
163 |
| -def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances, request): |
| 59 | +def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances, request, record_property): |
164 | 60 | if xfail["xfail"]:
|
165 | 61 | mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"])
|
166 | 62 | request.node.add_marker(mark)
|
167 | 63 | fit = OsipiBase(algorithm=algorithm, **kwargs)
|
168 | 64 | signal, ratio = signal_helper(data["data"])
|
| 65 | + |
169 | 66 | tolerances = tolerances_helper(tolerances, ratio, data["noise"])
|
170 | 67 | [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals)
|
| 68 | + def to_list_if_needed(value): |
| 69 | + return value.tolist() if isinstance(value, np.ndarray) else value |
| 70 | + test_result = { |
| 71 | + "name": name, |
| 72 | + "algorithm": algorithm, |
| 73 | + "f_fit": to_list_if_needed(f_fit), |
| 74 | + "Dp_fit": to_list_if_needed(Dp_fit), |
| 75 | + "D_fit": to_list_if_needed(D_fit), |
| 76 | + "f": to_list_if_needed(data['f']), |
| 77 | + "Dp": to_list_if_needed(data['Dp']), |
| 78 | + "D": to_list_if_needed(data['D']), |
| 79 | + "rtol": tolerances["rtol"], |
| 80 | + "atol": tolerances["atol"] |
| 81 | + } |
| 82 | + |
| 83 | + |
| 84 | + record_property('test_data', test_result) |
| 85 | + |
171 | 86 | npt.assert_allclose(data['f'], f_fit, rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
|
| 87 | + |
172 | 88 | if data['f']<0.80: # we need some signal for D to be detected
|
173 | 89 | npt.assert_allclose(data['D'], D_fit, rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
|
174 | 90 | if data['f']>0.03: #we need some f for D* to be interpretable
|
175 | 91 | npt.assert_allclose(data['Dp'], Dp_fit, rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
|
176 |
| - |
0 commit comments