Skip to content

Commit efcd3f4

Browse files
Built wrapper and unit_testing for DL algorithms
I built a wrapper in which DL algorithm train at initialisation - If self-supervised, one can give data to the algorithm to train from. - If supervised, or if no data is given, data is simulated - For IVIM-NET this happens within the wrapper - For Super IVIM DC this happens in the package (as I did not see an option to give training data) Then testing occurs. For speed, I give all testing data in 1 go. Also, as deep learning is known to predominantly work better than LSQ in noisy data, and actually do a poor job in noise-less data, I made a second DL-specific dataset which contains much more noise. Also, I made DL-specific boundaries for passing unit testing.
1 parent 3440416 commit efcd3f4

File tree

11 files changed

+1215
-28
lines changed

11 files changed

+1215
-28
lines changed

conftest.py

Lines changed: 51 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,12 @@ def pytest_addoption(parser):
3737
type=str,
3838
help="Default data file name",
3939
)
40+
parser.addoption(
41+
"--dataFileDL",
42+
default="tests/IVIMmodels/unit_tests/generic_DL.json",
43+
type=str,
44+
help="Default data file name",
45+
)
4046
parser.addoption(
4147
"--saveFileName",
4248
default="",
@@ -179,6 +185,10 @@ def pytest_generate_tests(metafunc):
179185
if "bound_input" in metafunc.fixturenames:
180186
args = bound_input(metafunc.config.getoption("dataFile"),metafunc.config.getoption("algorithmFile"))
181187
metafunc.parametrize("bound_input", args)
188+
if "deep_learning_algorithms" in metafunc.fixturenames:
189+
args = deep_learning_algorithms(metafunc.config.getoption("dataFileDL"),metafunc.config.getoption("algorithmFile"))
190+
metafunc.parametrize("deep_learning_algorithms", args)
191+
182192

183193

184194
def data_list(filename):
@@ -210,17 +220,18 @@ def data_ivim_fit_saved(datafile, algorithmFile):
210220
first = True
211221
for name, data in all_data.items():
212222
algorithm_dict = algorithm_information.get(algorithm, {})
213-
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
214-
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
215-
kwargs = algorithm_dict.get("options", {})
216-
tolerances = algorithm_dict.get("tolerances", {})
217-
skiptime=False
218-
if first:
219-
if algorithm_dict.get("fail_first_time", False):
220-
skiptime = True
221-
first = False
222-
requires_matlab = algorithm_dict.get("requires_matlab", False)
223-
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab
223+
if not algorithm_dict.get('deep_learning',False):
224+
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
225+
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
226+
kwargs = algorithm_dict.get("options", {})
227+
tolerances = algorithm_dict.get("tolerances", {})
228+
skiptime=False
229+
if first:
230+
if algorithm_dict.get("fail_first_time", False):
231+
skiptime = True
232+
first = False
233+
requires_matlab = algorithm_dict.get("requires_matlab", False)
234+
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab
224235

225236
def algorithmlist(algorithmFile):
226237
# Find the algorithms from algorithms.json
@@ -232,8 +243,9 @@ def algorithmlist(algorithmFile):
232243
algorithms = algorithm_information["algorithms"]
233244
for algorithm in algorithms:
234245
algorithm_dict = algorithm_information.get(algorithm, {})
235-
requires_matlab = algorithm_dict.get("requires_matlab", False)
236-
yield algorithm, requires_matlab
246+
if not algorithm_dict.get('deep_learning', False):
247+
requires_matlab = algorithm_dict.get("requires_matlab", False)
248+
yield algorithm, requires_matlab
237249

238250
def bound_input(datafile,algorithmFile):
239251
# Find the algorithms from algorithms.json
@@ -251,9 +263,31 @@ def bound_input(datafile,algorithmFile):
251263
for name, data in all_data.items():
252264
for algorithm in algorithms:
253265
algorithm_dict = algorithm_information.get(algorithm, {})
254-
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
255-
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
266+
if not algorithm_dict.get('deep_learning',False):
267+
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
268+
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
269+
kwargs = algorithm_dict.get("options", {})
270+
tolerances = algorithm_dict.get("tolerances", {})
271+
requires_matlab = algorithm_dict.get("requires_matlab", False)
272+
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab
273+
274+
def deep_learning_algorithms(datafile,algorithmFile):
275+
# Find the algorithms from algorithms.json
276+
current_folder = pathlib.Path.cwd()
277+
algorithm_path = current_folder / algorithmFile
278+
with algorithm_path.open() as f:
279+
algorithm_information = json.load(f)
280+
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI
281+
generic = current_folder / datafile
282+
with generic.open() as f:
283+
all_data = json.load(f)
284+
algorithms = algorithm_information["algorithms"]
285+
bvals = all_data.pop('config')
286+
bvals = bvals['bvalues']
287+
for algorithm in algorithms:
288+
algorithm_dict = algorithm_information.get(algorithm, {})
289+
if algorithm_dict.get('deep_learning',False):
256290
kwargs = algorithm_dict.get("options", {})
257-
tolerances = algorithm_dict.get("tolerances", {})
258291
requires_matlab = algorithm_dict.get("requires_matlab", False)
259-
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab
292+
tolerances = algorithm_dict.get("tolerances", {"atol":{"f": 2e-1, "D": 8e-4, "Dp": 6e-2},"rtol":{"f": 0.2, "D": 0.3, "Dp": 0.3}})
293+
yield algorithm, all_data, bvals, kwargs, requires_matlab, tolerances

src/standardized/IVIM_NEToptim.py

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
from src.wrappers.OsipiBase import OsipiBase
2+
import numpy as np
3+
import IVIMNET.deep as deep
4+
import torch
5+
import warnings
6+
7+
class IVIM_NEToptim(OsipiBase):
8+
"""
9+
Bi-exponential fitting algorithm by Oliver Gurney-Champion, Amsterdam UMC
10+
"""
11+
12+
# I'm thinking that we define default attributes for each submission like this
13+
# And in __init__, we can call the OsipiBase control functions to check whether
14+
# the user inputs fulfil the requirements
15+
16+
# Some basic stuff that identifies the algorithm
17+
id_author = "Oliver Gurney Champion, Amsterdam UMC"
18+
id_algorithm_type = "Deep learnt bi-exponential fit"
19+
id_return_parameters = "f, D*, D, S0"
20+
id_units = "seconds per milli metre squared or milliseconds per micro metre squared"
21+
22+
# Algorithm requirements
23+
required_bvalues = 4
24+
required_thresholds = [0,
25+
0] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds
26+
required_bounds = False
27+
required_bounds_optional = True # Bounds may not be required but are optional
28+
required_initial_guess = False
29+
required_initial_guess_optional = False
30+
accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most?
31+
32+
33+
# Supported inputs in the standardized class
34+
supported_bounds = True
35+
supported_initial_guess = False
36+
supported_thresholds = False
37+
38+
def __init__(self, SNR=None, bvalues=None, thresholds=None, bounds=None, initial_guess=None, fitS0=True, traindata=None):
39+
"""
40+
Everything this algorithm requires should be implemented here.
41+
Number of segmentation thresholds, bounds, etc.
42+
43+
Our OsipiBase object could contain functions that compare the inputs with
44+
the requirements.
45+
"""
46+
if bvalues == None:
47+
raise ValueError("for deep learning models, bvalues need defining at initiaition")
48+
#super(OGC_AmsterdamUMC_biexp, self).__init__(bvalues, bounds, initial_guess, fitS0)
49+
super(IVIM_NEToptim, self).__init__(bvalues=bvalues, bounds=bounds, initial_guess=initial_guess)
50+
self.fitS0=fitS0
51+
self.bvalues=np.array(bvalues)
52+
self.initialize(bounds, initial_guess, fitS0, traindata, SNR)
53+
54+
def initialize(self, bounds, initial_guess, fitS0, traindata, SNR):
55+
self.fitS0=fitS0
56+
self.deep_learning = True
57+
self.supervised = False
58+
if traindata is None:
59+
warnings.warn('no training data provided (traindata = None). Training data will be simulated')
60+
if SNR is None:
61+
warnings.warn('No SNR indicated. Data simulated with SNR = (5-1000)')
62+
SNR = (5, 1000)
63+
self.training_data(self.bvalues,n=1000000,SNR=SNR)
64+
self.arg=Arg()
65+
if bounds is not None:
66+
self.arg.net_pars.cons_min = bounds[0] # Dt, Fp, Ds, S0
67+
self.arg.net_pars.cons_max = bounds[1] # Dt, Fp, Ds, S0
68+
if traindata is None:
69+
self.net = deep.learn_IVIM(self.train_data['data'], self.bvalues, self.arg)
70+
else:
71+
self.net = deep.learn_IVIM(traindata, self.bvalues, self.arg)
72+
self.algorithm =lambda data: deep.predict_IVIM(data, self.bvalues, self.net, self.arg)
73+
74+
75+
def ivim_fit(self, signals, bvalues, **kwargs):
76+
"""Perform the IVIM fit
77+
78+
Args:
79+
signals (array-like)
80+
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.
81+
82+
Returns:
83+
_type_: _description_
84+
"""
85+
if not np.array_equal(bvalues, self.bvalues):
86+
raise ValueError("bvalue list at fitting must be identical as the one at initiation, otherwise it will not run")
87+
88+
paramsNN = deep.predict_IVIM(signals, self.bvalues, self.net, self.arg)
89+
90+
results = {}
91+
results["D"] = paramsNN[0]
92+
results["f"] = paramsNN[1]
93+
results["Dp"] = paramsNN[2]
94+
95+
return results
96+
97+
class NetArgs:
98+
def __init__(self):
99+
self.optim = 'adam' # these are the optimisers implementd. Choices are: 'sgd'; 'sgdr'; 'adagrad' adam
100+
self.lr = 0.00003 # this is the learning rate.
101+
self.patience = 10 # this is the number of epochs without improvement that the network waits untill determining it found its optimum
102+
self.batch_size = 128 # number of datasets taken along per iteration
103+
self.maxit = 500 # max iterations per epoch
104+
self.split = 0.9 # split of test and validation data
105+
self.load_nn = False # load the neural network instead of retraining
106+
self.loss_fun = 'rms' # what is the loss used for the model. rms is root mean square (linear regression-like); L1 is L1 normalisation (less focus on outliers)
107+
self.skip_net = False # skip the network training and evaluation
108+
self.scheduler = False # as discussed in the article, LR is important. This approach allows to reduce the LR itteratively when there is no improvement throughout an 5 consecutive epochs
109+
# use GPU if available
110+
self.use_cuda = torch.cuda.is_available()
111+
self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
112+
self.select_best = False
113+
# the optimized network settings
114+
115+
class NetPars:
116+
def __init__(self):
117+
self.dropout = 0.1 # 0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
118+
self.batch_norm = True # False/True turns on batch normalistion
119+
self.parallel = 'parallel' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
120+
self.con = 'sigmoid' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
121+
self.tri_exp = False
122+
#### only if sigmoid constraint is used!
123+
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, S0
124+
self.cons_max = [0.005, 0.8, 0.2, 2.0] # Dt, Fp, Ds, S0
125+
####
126+
self.fitS0 = True # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
127+
self.depth = 2 # number of layers
128+
self.width = 0 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
129+
boundsrange = 0.3 * (np.array(self.cons_max)-np.array(self.cons_min)) # ensure that we are on the most lineair bit of the sigmoid function
130+
self.cons_min = np.array(self.cons_min) - boundsrange
131+
self.cons_max = np.array(self.cons_max) + boundsrange
132+
class Arg:
133+
def __init__(self):
134+
self.train_pars = NetArgs()
135+
self.net_pars = NetPars()

src/standardized/Super_IVIM_DC.py

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
from src.wrappers.OsipiBase import OsipiBase
2+
import numpy as np
3+
import os
4+
from super_ivim_dc.train import train
5+
from pathlib import Path
6+
from super_ivim_dc.infer import infer_from_signal
7+
import warnings
8+
9+
10+
class Super_IVIM_DC(OsipiBase):
11+
"""
12+
Bi-exponential fitting algorithm by Oliver Gurney-Champion, Amsterdam UMC
13+
"""
14+
15+
# I'm thinking that we define default attributes for each submission like this
16+
# And in __init__, we can call the OsipiBase control functions to check whether
17+
# the user inputs fulfil the requirements
18+
19+
# Some basic stuff that identifies the algorithm
20+
id_author = ""
21+
id_algorithm_type = "Supervised Deep learnt bi-exponential fit with data consistency"
22+
id_return_parameters = "f, D*, D, S0"
23+
id_units = "seconds per milli metre squared or milliseconds per micro metre squared"
24+
25+
# Algorithm requirements
26+
required_bvalues = 4
27+
required_thresholds = [0,
28+
0] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds
29+
required_bounds = False
30+
required_bounds_optional = True # Bounds may not be required but are optional
31+
required_initial_guess = False
32+
required_initial_guess_optional = True
33+
accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most?
34+
35+
36+
# Supported inputs in the standardized class
37+
supported_bounds = True
38+
supported_initial_guess = True
39+
supported_thresholds = False
40+
41+
def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, fitS0=True, SNR = None):
42+
"""
43+
Everything this algorithm requires should be implemented here.
44+
Number of segmentation thresholds, bounds, etc.
45+
46+
Our OsipiBase object could contain functions that compare the inputs with
47+
the requirements.
48+
"""
49+
if bvalues == None:
50+
raise ValueError("for deep learning models, bvalues need defining at initiaition")
51+
super(Super_IVIM_DC, self).__init__(bvalues=bvalues, bounds=bounds, initial_guess=initial_guess)
52+
self.fitS0=fitS0
53+
self.bvalues=np.array(bvalues)
54+
self.initialize(bounds, initial_guess, fitS0, SNR)
55+
56+
def initialize(self, bounds, initial_guess, fitS0, SNR, working_dir=os.getcwd(),ivimnet_filename='ivimnet',super_ivim_dc_filename='super_ivim_dc'):
57+
if SNR is None:
58+
warnings.warn('No SNR indicated. Data simulated with SNR = 100')
59+
SNR=100
60+
self.fitS0=fitS0
61+
self.use_initial_guess = False
62+
self.use_bounds = False
63+
self.deep_learning = True
64+
self.supervised = True
65+
modeldir = Path(working_dir) # Ensure it's a Path object
66+
modeldir = modeldir / "models"
67+
modeldir.mkdir(parents=True, exist_ok=True)
68+
working_dir: str = str(modeldir)
69+
super_ivim_dc_filename: str = super_ivim_dc_filename # do not include .pt
70+
ivimnet_filename: str = ivimnet_filename # do not include .pt
71+
self.super_ivim_dc_filename=super_ivim_dc_filename
72+
self.ivimnet_filename=ivimnet_filename
73+
self.working_dir=working_dir
74+
train(
75+
SNR=SNR,
76+
bvalues=self.bvalues,
77+
super_ivim_dc=True,
78+
work_dir=self.working_dir,
79+
super_ivim_dc_filename=self.super_ivim_dc_filename,
80+
ivimnet_filename=ivimnet_filename,
81+
verbose=False,
82+
ivimnet=False
83+
)
84+
85+
86+
def ivim_fit(self, signals, bvalues, **kwargs):
87+
"""Perform the IVIM fit
88+
89+
Args:
90+
signals (array-like)
91+
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.
92+
93+
Returns:
94+
results: a dictionary containing "d", "f", and "Dp".
95+
"""
96+
if not np.array_equal(bvalues, self.bvalues):
97+
raise ValueError("bvalue list at fitting must be identical as the one at initiation, otherwise it will not run")
98+
99+
Dp, Dt, f, S0_superivimdc = infer_from_signal(
100+
signal=signals,
101+
bvalues=self.bvalues,
102+
model_path=f"{self.working_dir}/{self.super_ivim_dc_filename}.pt",
103+
)
104+
105+
results = {}
106+
results["D"] = Dt
107+
results["f"] = f
108+
results["Dp"] = Dp
109+
110+
return results

0 commit comments

Comments
 (0)