Skip to content

swap actual and desired in test #105

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/unit_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,4 @@ jobs:
run: |
pip install pytest pytest-cov
python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html
python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html --dataFile tests/IVIMmodels/unit_tests/generic_brain.json
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the test-results.xml should be a different file so it doesn't get ovewritten. Perhaps also the cov, but maybe that's ok?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Indeed. I have now changed the file naming of the test report. Not sure if the cov report is used, but if it is it should be the same regardless of data, i.e. no problem that it is overwritten?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like cov-report can be extended with :DEST, so perhaps a different destination file for that 2nd test for that piece as well?
https://pytest-cov.readthedocs.io/en/latest/config.html

2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
__pycache__/
*.nii.gz
*.nii
*.bval
*.bvec
*.dcm
*.mat
*.raw
Expand Down
10 changes: 9 additions & 1 deletion conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,15 @@ def algorithm_list(filename, selected, dropped):
algorithms = algorithms - set(dropped)
if len(selected) > 0 and selected[0]:
algorithms = algorithms & set(selected)
return list(algorithms)
options = []
for algorithm in algorithms:
if algorithm in algorithm_information:
option = algorithm_information[algorithm]
else:
option = {}
options.append(option)
for algorithm, option in zip(list(algorithms), options):
yield algorithm, option

def data_list(filename):
current_folder = pathlib.Path.cwd()
Expand Down
23 changes: 15 additions & 8 deletions src/wrappers/OsipiBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def osipi_author():
"""Author identification"""
return ''

def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_realizations=100):
def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_realizations=100, print_results=True):
# Generate signal
bvalues = np.asarray(bvalues)
signals = f*np.exp(-bvalues*Dstar) + (1-f)*np.exp(-bvalues*D)
Expand All @@ -282,7 +282,10 @@ def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_reali
noised_signal = np.array([norm.rvs(signal, sigma) for signal in signals])

# Perform fit with the noised signal
f_estimates[i], Dstar_estimates[i], D_estimates[i] = self.ivim_fit(noised_signal, bvalues)
fit = self.osipi_fit(noised_signal, bvalues)
f_estimates[i] = fit['f']
Dstar_estimates[i] = fit['Dp']
D_estimates[i] = fit['D']

# Calculate bias
f_bias = np.mean(f_estimates) - f
Expand All @@ -293,9 +296,13 @@ def osipi_simple_bias_and_RMSE_test(self, SNR, bvalues, f, Dstar, D, noise_reali
f_RMSE = np.sqrt(np.var(f_estimates) + f_bias**2)
Dstar_RMSE = np.sqrt(np.var(Dstar_estimates) + Dstar_bias**2)
D_RMSE = np.sqrt(np.var(D_estimates) + D_bias**2)

print(f"f bias:\t{f_bias}\nf RMSE:\t{f_RMSE}")
print(f"Dstar bias:\t{Dstar_bias}\nDstar RMSE:\t{Dstar_RMSE}")
print(f"D bias:\t{D_bias}\nD RMSE:\t{D_RMSE}")



if print_results:
print()
print(f"\tBias\t\tRMSE")
print(f"f\t{f_bias:.3}\t\t{f_RMSE:.3}")
print(f"D*\t{Dstar_bias:.3}\t\t{Dstar_RMSE:.3}")
print(f"D\t{D_bias:.3}\t{D_RMSE:.3}")
print()
else:
return {'f':f_bias,'Dstar':Dstar_bias,'D':D_bias}, {'f':f_RMSE,'Dstar':Dstar_RMSE,'D':D_RMSE}
73 changes: 73 additions & 0 deletions tests/IVIMmodels/unit_tests/generic_brain.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
{
"Gray matter": {
"noise": 0.0005,
"D": 0.81e-3,
"f": 0.044,
"Dp": 84e-3,
"data": [
1.00021099,
0.9668462,
0.94870507,
0.93594347,
0.92702605,
0.89596427,
0.8748837,
0.85339213,
0.83335988,
0.81319343,
0.74911377,
0.69206656,
0.63748683,
0.58790883,
0.54206431,
0.50051437,
0.46152624
]
},
"White matter": {
"noise": 0.0005,
"D": 0.86e-3,
"f": 0.033,
"Dp": 76e-3,
"data": [
0.99968445,
0.97478753,
0.95818188,
0.94546804,
0.93605795,
0.90321907,
0.88051331,
0.85720661,
0.83480693,
0.81373184,
0.74707338,
0.6854536,
0.62867824,
0.57723924,
0.53090183,
0.48602603,
0.44600491
]
},
"config": {
"bvalues": [
0.0,
10.0,
20.0,
30.0,
40.0,
80.0,
110.0,
140.0,
170.0,
200.0,
300.0,
400.0,
500.0,
600.0,
700.0,
800.0,
900.0
]
}
}
24 changes: 13 additions & 11 deletions tests/IVIMmodels/unit_tests/test_ivim_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,16 @@ def data_ivim_fit_saved():
first = False
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime

@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why un-parameterize this? It seems to be doing the right thing, but I'm honestly surprised it's working. Perhaps this was somewhat unnecessary, but I'm not even really following how it's getting the data. For example, data_ivim_fit_saved was a nested for loop that ran each algorithm for each dataset. I don't see how this is happening anymore, but I think the number of tests hasn't changed, so it must be happening somewhere?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function previously used for parameterization (data_ivim_saved()) was hardcoded to load data from generic.json. To make use of --dataFile in the call to pytest I had to switch to parameterization by pytest_generate_tests, but an alterative would have been to change data_ivim_saved() to also loop over json files. I felt that the fomer would be more less work, but I am open for other solutions.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah ok, took me a while but I see now. I do like this approach more, it seems to be more how pytest should work. My concern is that your addition doesn't cover everything the previous function, data_ivim_fit_saved was doing. Maybe it was no longer necessary or thing we don't want to support anymore, like xfail or fail_first_time? I think data_ivim_fit_saved could be entirely moved over to conftest.py into the algorithm_list function that you modified, but maybe that's not necessary?

def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property):
if xfail["xfail"]:
mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"])
request.node.add_marker(mark)
#@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved())
#def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property):
def test_ivim_fit_saved(ivim_algorithm, ivim_data, record_property):
algorithm, options = ivim_algorithm
name, bvals, data = ivim_data

signal = signal_helper(data["data"])
tolerances = tolerances_helper(tolerances, data)
tolerances = tolerances_helper(options.get("tolerances", {}), data)
start_time = time.time() # Record the start time
fit = OsipiBase(algorithm=algorithm, **kwargs)
fit = OsipiBase(algorithm=algorithm)
fit_result = fit.osipi_fit(signal, bvals)
elapsed_time = time.time() - start_time # Calculate elapsed time
def to_list_if_needed(value):
Expand All @@ -85,14 +86,15 @@ def to_list_if_needed(value):
"atol": tolerances["atol"]
}
record_property('test_data', test_result)
npt.assert_allclose(fit_result['f'],data['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
npt.assert_allclose(data['f'], fit_result['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
if data['f']<0.80: # we need some signal for D to be detected
npt.assert_allclose(fit_result['D'],data['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
npt.assert_allclose(data['D'], fit_result['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
if data['f']>0.03: #we need some f for D* to be interpretable
npt.assert_allclose(fit_result['Dp'],data['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
npt.assert_allclose(data['Dp'], fit_result['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
#assert fit_result['D'] < fit_result['Dp'], f"D {fit_result['D']} is larger than D* {fit_result['Dp']} for {name}"
skiptime = False
if not skiptime:
assert elapsed_time < 0.5, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel
assert elapsed_time < 0.5, f"Algorithm {algorithm} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel


def algorithms():
Expand Down
7 changes: 4 additions & 3 deletions tests/IVIMmodels/unit_tests/test_ivim_synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#e.g. pytest -m slow tests/IVIMmodels/unit_tests/test_ivim_synthetic.py --saveFileName test_output.csv --SNR 10 50 100 200 --fitCount 20
@pytest.mark.slow
def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician_noise, save_file, save_duration_file, use_prior):
algorithm, _ = ivim_algorithm
# assert save_file == "test"
rng = np.random.RandomState(42)
# random.seed(42)
Expand All @@ -24,7 +25,7 @@ def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician
D = data["D"]
f = data["f"]
Dp = data["Dp"]
fit = OsipiBase(algorithm=ivim_algorithm)
fit = OsipiBase(algorithm=algorithm)
# here is a prior
if use_prior and hasattr(fit, "accepts_priors") and fit.accepts_priors:
prior = [rng.normal(D, D/3, 10), rng.normal(f, f/3, 10), rng.normal(Dp, Dp/3, 10), rng.normal(1, 1/3, 10)]
Expand All @@ -40,11 +41,11 @@ def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician
fit_result = fit.osipi_fit(signal, bvals) #, prior_in=prior
time_delta += datetime.datetime.now() - start_time
if save_file is not None:
save_file.writerow([ivim_algorithm, name, SNR, idx, f, Dp, D, fit_result["f"], fit_result["Dp"], fit_result["D"], *signal])
save_file.writerow([algorithm, name, SNR, idx, f, Dp, D, fit_result["f"], fit_result["Dp"], fit_result["D"], *signal])
# save_results(save_file, ivim_algorithm, name, SNR, idx, [f, Dp, D], [f_fit, Dp_fit, D_fit])
npt.assert_allclose([f, Dp, D], [fit_result["f"], fit_result["Dp"], fit_result["D"]], rtol, atol)
if save_duration_file is not None:
save_duration_file.writerow([ivim_algorithm, name, SNR, time_delta/datetime.timedelta(microseconds=1), fit_count])
save_duration_file.writerow([algorithm, name, SNR, time_delta/datetime.timedelta(microseconds=1), fit_count])
# save_duration(save_duration_file, ivim_algorithm, name, SNR, time_delta, fit_count)


Expand Down
20 changes: 12 additions & 8 deletions utilities/data_simulation/Download_data.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is fine for me; but all of the osipi code is expecting the download to be in the defined folder destination. For what purpose do you want to put it elsewhere?

Copy link
Collaborator Author

@oscarjalnefjord oscarjalnefjord Jun 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Indeed. I just wanted to add some flexibility in case you want to place the data somewhere else and use for other purposes.

Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,20 @@ def unzip_file(zip_file_path, extracted_folder_path):
zip_ref.extract(file_info, extracted_folder_path)


def download_data(force=False):
def download_data(force=False,folder=None):
# Check if the folder exists, and create it if not
curdir=os.getcwd()
base_folder = os.path.abspath(os.path.dirname(__file__))
base_folder = os.path.split(os.path.split(base_folder)[0])[0]
if not os.path.exists(os.path.join(base_folder,'download')):
os.makedirs(os.path.join(base_folder,'download'))
print(f"Folder '{'download'}' created.")
# Change to the specified folder
os.chdir(os.path.join(base_folder,'download'))
if folder is None:
base_folder = os.path.abspath(os.path.dirname(__file__))
base_folder = os.path.split(os.path.split(base_folder)[0])[0]
if not os.path.exists(os.path.join(base_folder,'download')):
os.makedirs(os.path.join(base_folder,'download'))
print(f"Folder '{'download'}' created.")
download_folder = os.path.join(base_folder,'download')
else:
download_folder = folder
# Change to the specified folder
os.chdir(download_folder)
subprocess.check_call(["zenodo_get", 'https://zenodo.org/records/14605039'])
# Open the zip file
if force or not os.path.exists('Data'):
Expand Down