Skip to content

Matlab installation + adding Amita's code to wrapper #103

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Jun 14, 2025
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ md5sums.txt
*.asv
src/original/ASD_MemorialSloanKettering/MRI-QAMPER_IVIM/test_data
src/original/ASD_MemorialSloanKettering/MRI-QAMPER_IVIM/output_files


tests/IVIMmodels/unit_tests/*.log
junit/*
ivim_simulation.bval
ivim_simulation.bvec

# Unit test / coverage reports
.tox/
Expand Down
22 changes: 21 additions & 1 deletion conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import csv
# import datetime


def pytest_addoption(parser):
parser.addoption(
"--SNR",
Expand Down Expand Up @@ -81,6 +80,22 @@ def pytest_addoption(parser):
type=str,
help="Drop this algorithm from the list"
)
parser.addoption(
"--withmatlab",
action="store_true",
default=False,
help="Run MATLAB-dependent tests"
)

eng = None

def pytest_configure(config):
global eng
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have another comment related to this elsewhere, but global variables aren't generally encouraged. I could easily see this clashing in some algorithm that happens to use a variable called eng.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed

if config.getoption("--withmatlab"):
import matlab.engine
print("Starting MATLAB engine...")
eng = matlab.engine.start_matlab()
print("MATLAB engine started.")


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -163,6 +178,11 @@ def algorithm_list(filename, selected, dropped):
with algorithm_path.open() as f:
algorithm_information = json.load(f)
algorithms = set(algorithm_information["algorithms"])
for algorithm in algorithms:
algorithm_dict = algorithm_information.get(algorithm, {})
if algorithm_dict.get("requieres_matlab", {}) == True:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You've misspelled requires, though you've been consistent so it'll work as intended!

Also here though you get the value of requires_matlab, which is True or False, but the default return is an empty dict {}. Shouldn't that be False?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed

if eng is None:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this really what we want to do, silently drop an algorithm if we don't have a running matlab engine? Would it be possible to mark all the tests with that algorithm as skipped instead? That way they'd be visible but not run.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've fixed this. Now they are skipped.

algorithms = algorithms - set(algorithm)
algorithms = algorithms - set(dropped)
if len(selected) > 0 and selected[0]:
algorithms = algorithms & set(selected)
Expand Down
3 changes: 2 additions & 1 deletion pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@
markers =
slow: marks tests as slow (deselect with '-m "not slow"')
addopts =
-m 'not slow'
-m 'not slow'
testpaths = tests
127 changes: 127 additions & 0 deletions src/standardized/ASD_MemorialSloanKettering_QAMPER_IVIM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
from src.wrappers.OsipiBase import OsipiBase
import numpy as np
import matlab.engine

class ASD_MemorialSloanKettering_QAMPER_IVIM(OsipiBase):
"""
Bi-exponential fitting algorithm by Oliver Gurney-Champion, Amsterdam UMC
"""

# I'm thinking that we define default attributes for each submission like this
# And in __init__, we can call the OsipiBase control functions to check whether
# the user inputs fulfil the requirements

# Some basic stuff that identifies the algorithm
id_author = "LoCastro, Dr. Ramesh Paudyal, Dr. Amita Shukla-Dave"
id_algorithm_type = "Bi-exponential fit"
id_return_parameters = "f, D*, D, S0"
id_units = "seconds per milli metre squared or milliseconds per micro metre squared"

# Algorithm requirements
required_bvalues = 4
required_thresholds = [0,
0] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds
required_bounds = False
required_bounds_optional = True # Bounds may not be required but are optional
required_initial_guess = False
required_initial_guess_optional = True
accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most?


# Supported inputs in the standardized class
supported_bounds = True
supported_initial_guess = True
supported_thresholds = False

def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None,eng=None):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing a space before eng

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

"""
Everything this algorithm requires should be implemented here.
Number of segmentation thresholds, bounds, etc.

Our OsipiBase object could contain functions that compare the inputs with
the requirements.
"""
#super(OGC_AmsterdamUMC_biexp, self).__init__(bvalues, bounds, initial_guess, fitS0)
super(ASD_MemorialSloanKettering_QAMPER_IVIM, self).__init__(bvalues=bvalues, bounds=bounds, initial_guess=initial_guess)
self.initialize(bounds, initial_guess)
if eng is None:
print('initiating matlab; this may take some time. For repeated testing one could use the optional input eng as an already initiated matlab engine')
self.eng=matlab.engine.start_matlab()
self.keep_alive=False
else:
self.eng = eng
self.keep_alive=True



def algorithm(self,dwi_arr, bval_arr, LB0, UB0, x0in):
dwi_arr = matlab.double(dwi_arr.tolist())
bval_arr = matlab.double(bval_arr.tolist())
LB0 = matlab.double(LB0.tolist())
UB0 = matlab.double(UB0.tolist())
x0in = matlab.double(x0in.tolist())
results = self.eng.IVIM_standard_bcin(
dwi_arr, bval_arr, 0.0, LB0, UB0, x0in, False, 0, 0,nargout=11)
(f_arr, D_arr, Dx_arr, s0_arr, fitted_dwi_arr, RSS, rms_val, chi, AIC, BIC, R_sq) = results
return D_arr/1000, f_arr, Dx_arr/1000, s0_arr

def initialize(self, bounds, initial_guess):
if bounds is None:
print('warning, no bounds were defined, so algorithm-specific default bounds are used')
self.bounds=([1e-6, 0, 0.004, 0],[0.003, 1.0, 0.2, 5])
else:
self.bounds=bounds
if initial_guess is None:
print('warning, no initial guesses were defined, so algorithm-specific default initial guess is used')
self.initial_guess = [0.001, 0.2, 0.01, 1]
else:
self.initial_guess = initial_guess
self.use_initial_guess = True
self.use_initial_guess = True
self.use_bounds = True

def ivim_fit(self, signals, bvalues, **kwargs):
"""Perform the IVIM fit

Args:
signals (array-like)
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.

Returns:
_type_: _description_
"""

bvalues=np.array(bvalues)
LB = np.array(self.bounds[0])[[1,0,2,3]]
UB = np.array(self.bounds[1])[[1,0,2,3]]

fit_results = self.algorithm(np.array(signals)[:,np.newaxis], bvalues, LB, UB, np.array(self.initial_guess)[[1,0,2,3]])

results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["Dp"] = fit_results[2]

return results


def __del__(self):
if not self.keep_alive:
if hasattr(self, "eng") and self.eng:
try:
self.eng.quit()
except Exception as e:
print(f"Warning: Failed to quit MATLAB engine cleanly: {e}")


def __enter__(self):
return self


def __exit__(self, exc_type, exc_val, exc_tb):
if not self.keep_alive:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is duplicated with the above so perhaps just a separate internal method that's called from both?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

if hasattr(self, "eng") and self.eng:
try:
self.eng.quit()
except Exception as e:
print(f"Warning: Failed to quit MATLAB engine cleanly: {e}")
2 changes: 1 addition & 1 deletion src/standardized/OGC_AmsterdamUMC_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def initialize(self, bounds, initial_guess, fitS0):
self.bounds=bounds
if initial_guess is None:
print('warning, no initial guesses were defined, so default bounds are used of [0.001, 0.001, 0.01, 1]')
self.initial_guess = [0.001, 0.001, 0.01, 1]
self.initial_guess = [0.001, 0.1, 0.01, 1]
else:
self.initial_guess = initial_guess
self.use_initial_guess = True
Expand Down
4 changes: 4 additions & 0 deletions tests/IVIMmodels/unit_tests/algorithms.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
{
"algorithms": [
"ASD_MemorialSloanKettering_QAMPER_IVIM",
"ETP_SRI_LinearFitting",
"IAR_LU_biexp",
"IAR_LU_modified_mix",
Expand All @@ -14,6 +15,9 @@
"PvH_KB_NKI_IVIMfit",
"OJ_GU_seg"
],
"ASD_MemorialSloanKettering_QAMPER_IVIM": {
"requieres_matlab": true
},
"IAR_LU_biexp": {
"fail_first_time": true
},
Expand Down
37 changes: 28 additions & 9 deletions tests/IVIMmodels/unit_tests/test_ivim_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
import pathlib
import time
from src.wrappers.OsipiBase import OsipiBase
from conftest import eng
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think this is encouraged. https://stackoverflow.com/questions/68738078/is-it-discouraged-to-import-conftest-py-within-test-modules

I think the approved way would be to have a session scoped fixture so it would automatically be run once for each test session. @pytest.fixture(scope="session")

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OKay, so this is now "fixed". I did also move a lot of initialisation code from the tests to contest. Not sure whether that is completely where it should go, but it was in lign with other code that was there.

#run using python -m pytest from the root folder


def signal_helper(signal):
signal = np.asarray(signal)
#signal[signal < 0] = 0.00001
Expand Down Expand Up @@ -44,9 +46,9 @@ def data_ivim_fit_saved():
algorithms = algorithm_information["algorithms"]
bvals = all_data.pop('config')
bvals = bvals['bvalues']
first=True
for name, data in all_data.items():
for algorithm in algorithms:
for algorithm in algorithms:
first = True
for name, data in all_data.items():
algorithm_dict = algorithm_information.get(algorithm, {})
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
Expand All @@ -57,6 +59,11 @@ def data_ivim_fit_saved():
if algorithm_dict.get("fail_first_time", {}) == True:
skiptime = True
first = False
if algorithm_dict.get("requieres_matlab", {}) == True:
if eng is None:
continue
else:
kwargs={**kwargs,'eng': eng}
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime

@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved())
Expand Down Expand Up @@ -95,19 +102,26 @@ def to_list_if_needed(value):
assert elapsed_time < 0.5, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel


def algorithms():
def algorithmlist():
# Find the algorithms from algorithms.json
file = pathlib.Path(__file__)
algorithm_path = file.with_name('algorithms.json')
with algorithm_path.open() as f:
algorithm_information = json.load(f)
algorithms = algorithm_information["algorithms"]
for algorithm in algorithms:
yield algorithm

@pytest.mark.parametrize("algorithm", algorithms())
def test_default_bounds_and_initial_guesses(algorithm):
fit = OsipiBase(algorithm=algorithm)
algorithm_dict = algorithm_information.get(algorithm, {})
args={}
if algorithm_dict.get("requieres_matlab", {}) == True:
if eng is None:
continue
else:
args = {**args, 'eng': eng}
yield algorithm, args

@pytest.mark.parametrize("algorithm, args", algorithmlist())
def test_default_bounds_and_initial_guesses(algorithm, args):
fit = OsipiBase(algorithm=algorithm,**args)
#assert fit.bounds is not None, f"For {algorithm}, there is no default fit boundary"
#assert fit.initial_guess is not None, f"For {algorithm}, there is no default fit initial guess"
if fit.use_bounds:
Expand Down Expand Up @@ -149,6 +163,11 @@ def bound_input():
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
kwargs = algorithm_dict.get("options", {})
tolerances = algorithm_dict.get("tolerances", {})
if algorithm_dict.get("requieres_matlab", {}) == True:
if eng is None:
continue
else:
kwargs = {**kwargs, 'eng': eng}
yield name, bvals, data, algorithm, xfail, kwargs, tolerances


Expand Down
Loading