|
4 | 4 | import json
|
5 | 5 | import pathlib
|
6 | 6 | import os
|
| 7 | +import time |
7 | 8 |
|
8 | 9 | from src.wrappers.OsipiBase import OsipiBase
|
9 | 10 | from utilities.data_simulation.GenerateData import GenerateData
|
@@ -158,19 +159,63 @@ def data_ivim_fit_saved():
|
158 | 159 | tolerances = algorithm_dict.get("tolerances", {})
|
159 | 160 | yield name, bvals, data, algorithm, xfail, kwargs, tolerances
|
160 | 161 |
|
161 |
| - |
162 | 162 | @pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", data_ivim_fit_saved())
|
163 | 163 | def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances, request):
|
164 | 164 | if xfail["xfail"]:
|
165 | 165 | mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"])
|
166 | 166 | request.node.add_marker(mark)
|
| 167 | + start_time = time.time() # Record the start time |
167 | 168 | fit = OsipiBase(algorithm=algorithm, **kwargs)
|
168 | 169 | signal = signal_helper(data["data"])
|
169 | 170 | tolerances = tolerances_helper(tolerances, data)
|
170 | 171 | [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals)
|
| 172 | + elapsed_time = time.time() - start_time # Calculate elapsed time |
171 | 173 | npt.assert_allclose(f_fit,data['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
|
172 | 174 | if data['f']<0.80: # we need some signal for D to be detected
|
173 | 175 | npt.assert_allclose(D_fit,data['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
|
174 | 176 | if data['f']>0.03: #we need some f for D* to be interpretable
|
175 | 177 | npt.assert_allclose(Dp_fit,data['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
|
| 178 | + assert elapsed_time < 2, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel |
| 179 | + |
| 180 | +def bound_input(): |
| 181 | + # Find the algorithms from algorithms.json |
| 182 | + file = pathlib.Path(__file__) |
| 183 | + algorithm_path = file.with_name('algorithms.json') |
| 184 | + with algorithm_path.open() as f: |
| 185 | + algorithm_information = json.load(f) |
| 186 | + |
| 187 | + # Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI |
| 188 | + generic = file.with_name('generic.json') |
| 189 | + with generic.open() as f: |
| 190 | + all_data = json.load(f) |
| 191 | + |
| 192 | + algorithms = algorithm_information["algorithms"] |
| 193 | + bvals = all_data.pop('config') |
| 194 | + bvals = bvals['bvalues'] |
| 195 | + for name, data in all_data.items(): |
| 196 | + for algorithm in algorithms: |
| 197 | + algorithm_dict = algorithm_information.get(algorithm, {}) |
| 198 | + xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}), |
| 199 | + "strict": algorithm_dict.get("xfail_names", {}).get(name, True)} |
| 200 | + kwargs = algorithm_dict.get("options", {}) |
| 201 | + tolerances = algorithm_dict.get("tolerances", {}) |
| 202 | + test_bounds = algorithm_dict.get("test_bounds", {}) |
| 203 | + if test_bounds: |
| 204 | + yield name, bvals, data, algorithm, xfail, kwargs, tolerances |
| 205 | + |
| 206 | + |
| 207 | +@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", bound_input()) |
| 208 | +def test_bounds(name, bvals, data, algorithm, xfail, kwargs, tolerances, request): |
| 209 | + bounds = ([0.0008, 0.2, 0.01, 1.1], [0.0012, 0.3, 0.02, 1.3]) |
| 210 | + if xfail["xfail"]: |
| 211 | + mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"]) |
| 212 | + request.node.add_marker(mark) |
| 213 | + # deliberately have silly bounds to see whether they are used |
| 214 | + fit = OsipiBase(algorithm=algorithm, bounds=bounds, initial_guess = [0.001, 0.25, 0.015, 1.2], **kwargs) |
| 215 | + signal = signal_helper(data["data"]) |
| 216 | + tolerances = tolerances_helper(tolerances, data) |
| 217 | + [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals) |
176 | 218 |
|
| 219 | + assert bounds[0][0] <= D_fit <= bounds[1][0], f"Result {D_fit} out of bounds for data: {name}" |
| 220 | + assert bounds[0][1] <= f_fit <= bounds[1][1], f"Result {f_fit} out of bounds for data: {name}" |
| 221 | + assert bounds[0][2] <= Dp_fit <= bounds[1][2], f"Result {Dp_fit} out of bounds for data: {name}" |
0 commit comments