diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index c5475ed..eb228a7 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -34,3 +34,4 @@ jobs: run: | pip install pytest pytest-cov python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html + python -m pytest --doctest-modules --junitxml=junit/test-results-brain.xml --cov=. --cov-report=xml --cov-report=html --dataFile tests/IVIMmodels/unit_tests/generic_brain.json diff --git a/.gitignore b/.gitignore index faf3523..11ea09c 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ __pycache__/ *.nii.gz *.nii +*.bval +*.bvec *.dcm *.mat *.raw diff --git a/conftest.py b/conftest.py index 086585c..911fbb5 100644 --- a/conftest.py +++ b/conftest.py @@ -166,7 +166,15 @@ def algorithm_list(filename, selected, dropped): algorithms = algorithms - set(dropped) if len(selected) > 0 and selected[0]: algorithms = algorithms & set(selected) - return list(algorithms) + options = [] + for algorithm in algorithms: + if algorithm in algorithm_information: + option = algorithm_information[algorithm] + else: + option = {} + options.append(option) + for algorithm, option in zip(list(algorithms), options): + yield algorithm, option def data_list(filename): current_folder = pathlib.Path.cwd() diff --git a/src/wrappers/OsipiBase.py b/src/wrappers/OsipiBase.py index 6c5d380..7572a05 100644 --- a/src/wrappers/OsipiBase.py +++ b/src/wrappers/OsipiBase.py @@ -268,7 +268,7 @@ def osipi_author(): """Author identification""" return '' - def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_realizations=100): + def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_realizations=100, print_results=True): # Generate signal bvalues = np.asarray(bvalues) signals = f*np.exp(-bvalues*Dstar) + (1-f)*np.exp(-bvalues*D) @@ -282,7 +282,10 @@ def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_reali noised_signal = np.array([norm.rvs(signal, sigma) for signal in signals]) # Perform fit with the noised signal - f_estimates[i], Dstar_estimates[i], D_estimates[i] = self.ivim_fit(noised_signal, bvalues) + fit = self.osipi_fit(noised_signal, bvalues) + f_estimates[i] = fit['f'] + Dstar_estimates[i] = fit['Dp'] + D_estimates[i] = fit['D'] # Calculate bias f_bias = np.mean(f_estimates) - f @@ -293,9 +296,13 @@ def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_reali f_RMSE = np.sqrt(np.var(f_estimates) + f_bias**2) Dstar_RMSE = np.sqrt(np.var(Dstar_estimates) + Dstar_bias**2) D_RMSE = np.sqrt(np.var(D_estimates) + D_bias**2) - - print(f"f bias:\t{f_bias}\nf RMSE:\t{f_RMSE}") - print(f"Dstar bias:\t{Dstar_bias}\nDstar RMSE:\t{Dstar_RMSE}") - print(f"D bias:\t{D_bias}\nD RMSE:\t{D_RMSE}") - - + + if print_results: + print() + print(f"\tBias\t\tRMSE") + print(f"f\t{f_bias:.3}\t\t{f_RMSE:.3}") + print(f"D*\t{Dstar_bias:.3}\t\t{Dstar_RMSE:.3}") + print(f"D\t{D_bias:.3}\t{D_RMSE:.3}") + print() + else: + return {'f':f_bias,'Dstar':Dstar_bias,'D':D_bias}, {'f':f_RMSE,'Dstar':Dstar_RMSE,'D':D_RMSE} diff --git a/tests/IVIMmodels/unit_tests/generic_brain.json b/tests/IVIMmodels/unit_tests/generic_brain.json new file mode 100644 index 0000000..9682b24 --- /dev/null +++ b/tests/IVIMmodels/unit_tests/generic_brain.json @@ -0,0 +1,73 @@ +{ + "Gray matter": { + "noise": 0.0005, + "D": 0.81e-3, + "f": 0.044, + "Dp": 84e-3, + "data": [ + 1.00021099, + 0.9668462, + 0.94870507, + 0.93594347, + 0.92702605, + 0.89596427, + 0.8748837, + 0.85339213, + 0.83335988, + 0.81319343, + 0.74911377, + 0.69206656, + 0.63748683, + 0.58790883, + 0.54206431, + 0.50051437, + 0.46152624 + ] + }, + "White matter": { + "noise": 0.0005, + "D": 0.86e-3, + "f": 0.033, + "Dp": 76e-3, + "data": [ + 0.99968445, + 0.97478753, + 0.95818188, + 0.94546804, + 0.93605795, + 0.90321907, + 0.88051331, + 0.85720661, + 0.83480693, + 0.81373184, + 0.74707338, + 0.6854536, + 0.62867824, + 0.57723924, + 0.53090183, + 0.48602603, + 0.44600491 + ] + }, + "config": { + "bvalues": [ + 0.0, + 10.0, + 20.0, + 30.0, + 40.0, + 80.0, + 110.0, + 140.0, + 170.0, + 200.0, + 300.0, + 400.0, + 500.0, + 600.0, + 700.0, + 800.0, + 900.0 + ] + } +} \ No newline at end of file diff --git a/tests/IVIMmodels/unit_tests/test_ivim_fit.py b/tests/IVIMmodels/unit_tests/test_ivim_fit.py index fb58d13..4ff0d0a 100644 --- a/tests/IVIMmodels/unit_tests/test_ivim_fit.py +++ b/tests/IVIMmodels/unit_tests/test_ivim_fit.py @@ -59,15 +59,16 @@ def data_ivim_fit_saved(): first = False yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime -@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved()) -def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property): - if xfail["xfail"]: - mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"]) - request.node.add_marker(mark) +#@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved()) +#def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property): +def test_ivim_fit_saved(ivim_algorithm, ivim_data, record_property): + algorithm, options = ivim_algorithm + name, bvals, data = ivim_data + signal = signal_helper(data["data"]) - tolerances = tolerances_helper(tolerances, data) + tolerances = tolerances_helper(options.get("tolerances", {}), data) start_time = time.time() # Record the start time - fit = OsipiBase(algorithm=algorithm, **kwargs) + fit = OsipiBase(algorithm=algorithm) fit_result = fit.osipi_fit(signal, bvals) elapsed_time = time.time() - start_time # Calculate elapsed time def to_list_if_needed(value): @@ -85,14 +86,15 @@ def to_list_if_needed(value): "atol": tolerances["atol"] } record_property('test_data', test_result) - npt.assert_allclose(fit_result['f'],data['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) + npt.assert_allclose(data['f'], fit_result['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) if data['f']<0.80: # we need some signal for D to be detected - npt.assert_allclose(fit_result['D'],data['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) + npt.assert_allclose(data['D'], fit_result['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) if data['f']>0.03: #we need some f for D* to be interpretable - npt.assert_allclose(fit_result['Dp'],data['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) + npt.assert_allclose(data['Dp'], fit_result['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) #assert fit_result['D'] < fit_result['Dp'], f"D {fit_result['D']} is larger than D* {fit_result['Dp']} for {name}" + skiptime = False if not skiptime: - assert elapsed_time < 0.5, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel + assert elapsed_time < 0.5, f"Algorithm {algorithm} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel def algorithms(): diff --git a/tests/IVIMmodels/unit_tests/test_ivim_synthetic.py b/tests/IVIMmodels/unit_tests/test_ivim_synthetic.py index 2074132..2245969 100644 --- a/tests/IVIMmodels/unit_tests/test_ivim_synthetic.py +++ b/tests/IVIMmodels/unit_tests/test_ivim_synthetic.py @@ -15,6 +15,7 @@ #e.g. pytest -m slow tests/IVIMmodels/unit_tests/test_ivim_synthetic.py --saveFileName test_output.csv --SNR 10 50 100 200 --fitCount 20 @pytest.mark.slow def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician_noise, save_file, save_duration_file, use_prior): + algorithm, _ = ivim_algorithm # assert save_file == "test" rng = np.random.RandomState(42) # random.seed(42) @@ -24,7 +25,7 @@ def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician D = data["D"] f = data["f"] Dp = data["Dp"] - fit = OsipiBase(algorithm=ivim_algorithm) + fit = OsipiBase(algorithm=algorithm) # here is a prior if use_prior and hasattr(fit, "accepts_priors") and fit.accepts_priors: prior = [rng.normal(D, D/3, 10), rng.normal(f, f/3, 10), rng.normal(Dp, Dp/3, 10), rng.normal(1, 1/3, 10)] @@ -40,11 +41,11 @@ def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician fit_result = fit.osipi_fit(signal, bvals) #, prior_in=prior time_delta += datetime.datetime.now() - start_time if save_file is not None: - save_file.writerow([ivim_algorithm, name, SNR, idx, f, Dp, D, fit_result["f"], fit_result["Dp"], fit_result["D"], *signal]) + save_file.writerow([algorithm, name, SNR, idx, f, Dp, D, fit_result["f"], fit_result["Dp"], fit_result["D"], *signal]) # save_results(save_file, ivim_algorithm, name, SNR, idx, [f, Dp, D], [f_fit, Dp_fit, D_fit]) npt.assert_allclose([f, Dp, D], [fit_result["f"], fit_result["Dp"], fit_result["D"]], rtol, atol) if save_duration_file is not None: - save_duration_file.writerow([ivim_algorithm, name, SNR, time_delta/datetime.timedelta(microseconds=1), fit_count]) + save_duration_file.writerow([algorithm, name, SNR, time_delta/datetime.timedelta(microseconds=1), fit_count]) # save_duration(save_duration_file, ivim_algorithm, name, SNR, time_delta, fit_count) diff --git a/utilities/data_simulation/Download_data.py b/utilities/data_simulation/Download_data.py index 3a0f65f..2da2c75 100644 --- a/utilities/data_simulation/Download_data.py +++ b/utilities/data_simulation/Download_data.py @@ -12,16 +12,20 @@ def unzip_file(zip_file_path, extracted_folder_path): zip_ref.extract(file_info, extracted_folder_path) -def download_data(force=False): +def download_data(force=False,folder=None): # Check if the folder exists, and create it if not curdir=os.getcwd() - base_folder = os.path.abspath(os.path.dirname(__file__)) - base_folder = os.path.split(os.path.split(base_folder)[0])[0] - if not os.path.exists(os.path.join(base_folder,'download')): - os.makedirs(os.path.join(base_folder,'download')) - print(f"Folder '{'download'}' created.") - # Change to the specified folder - os.chdir(os.path.join(base_folder,'download')) + if folder is None: + base_folder = os.path.abspath(os.path.dirname(__file__)) + base_folder = os.path.split(os.path.split(base_folder)[0])[0] + if not os.path.exists(os.path.join(base_folder,'download')): + os.makedirs(os.path.join(base_folder,'download')) + print(f"Folder '{'download'}' created.") + download_folder = os.path.join(base_folder,'download') + else: + download_folder = folder + # Change to the specified folder + os.chdir(download_folder) subprocess.check_call(["zenodo_get", 'https://zenodo.org/records/14605039']) # Open the zip file if force or not os.path.exists('Data'):