Skip to content

Commit 3440416

Browse files
ivimenet added
1 parent 34e033b commit 3440416

File tree

5 files changed

+365
-0
lines changed

5 files changed

+365
-0
lines changed

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,4 @@ pandas
1515
sphinx
1616
sphinx_rtd_theme
1717
pytest-json-report
18+
ivimnet
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
5+
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
6+
https://www.github.com/ochampion
7+
8+
Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)
9+
10+
requirements:
11+
numpy
12+
torch
13+
tqdm
14+
matplotlib
15+
scipy
16+
joblib
17+
"""
18+
import IVIMNET.simulations as sim
19+
from hyperparams import hyperparams as hp_example_1
20+
import IVIMNET.deep as deep
21+
import time
22+
import torch
23+
import IVIMNET.fitting_algorithms as fit
24+
25+
# Import parameters
26+
arg = hp_example_1()
27+
arg = deep.checkarg(arg)
28+
print(arg.save_name)
29+
for SNR in arg.sim.SNR:
30+
# this simulates the signal
31+
IVIM_signal_noisy, D, f, Dp = sim.sim_signal(SNR, arg.sim.bvalues, sims=arg.sim.sims, Dmin=arg.sim.range[0][0],
32+
Dmax=arg.sim.range[1][0], fmin=arg.sim.range[0][1],
33+
fmax=arg.sim.range[1][1], Dsmin=arg.sim.range[0][2],
34+
Dsmax=arg.sim.range[1][2], rician=arg.sim.rician)
35+
36+
start_time = time.time()
37+
# train network
38+
net = deep.learn_IVIM(IVIM_signal_noisy, arg.sim.bvalues, arg)
39+
elapsed_time = time.time() - start_time
40+
print('\ntime elapsed for training: {}\n'.format(elapsed_time))
41+
42+
# simulate IVIM signal for prediction
43+
[dwi_image_long, Dt_truth, Fp_truth, Dp_truth] = sim.sim_signal_predict(arg, SNR)
44+
45+
# predict
46+
start_time = time.time()
47+
paramsNN = deep.predict_IVIM(dwi_image_long, arg.sim.bvalues, net, arg)
48+
elapsed_time = time.time() - start_time
49+
print('\ntime elapsed for inference: {}\n'.format(elapsed_time))
50+
# remove network to save memory
51+
del net
52+
if arg.train_pars.use_cuda:
53+
torch.cuda.empty_cache()
54+
55+
start_time = time.time()
56+
# all fitting is done in the fit.fit_dats for the other fitting algorithms (lsq, segmented and Baysesian)
57+
paramsf = fit.fit_dats(arg.sim.bvalues, dwi_image_long, arg.fit)
58+
elapsed_time = time.time() - start_time
59+
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time))
60+
print('results for lsqfit')
61+
62+
# plot values predict and truth
63+
sim.plot_example1(paramsNN, paramsf, Dt_truth, Fp_truth, Dp_truth, arg, SNR)
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
"""
2+
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
3+
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
4+
https://www.github.com/ochampion
5+
6+
Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)
7+
8+
requirements:
9+
numpy
10+
torch
11+
tqdm
12+
matplotlib
13+
scipy
14+
joblib
15+
"""
16+
17+
# import
18+
import numpy as np
19+
import IVIMNET.simulations as sim
20+
import IVIMNET.deep as deep
21+
from hyperparams import hyperparams as hp_example
22+
23+
# load hyperparameter
24+
arg = hp_example()
25+
arg = deep.checkarg(arg)
26+
27+
matlsq = np.zeros([len(arg.sim.SNR), 3, 3])
28+
matNN = np.zeros([len(arg.sim.SNR), 3, 3])
29+
stability = np.zeros([len(arg.sim.SNR), 3])
30+
a = 0
31+
32+
for SNR in arg.sim.SNR:
33+
print('\n simulation at SNR of {snr}\n'.format(snr=SNR))
34+
if arg.fit.do_fit:
35+
matlsq[a, :, :], matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
36+
print('\nresults from lsq:')
37+
print(matlsq)
38+
else:
39+
matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
40+
a = a + 1
41+
print('\nresults from NN: columns show themean, the RMSE/mean and the Spearman coef [DvDp,Dvf,fvDp] \n'
42+
'the rows show D, f and D*\n'
43+
'and the different matixes repressent the different SNR levels {}:'.format(arg.sim.SNR))
44+
print(matNN)
45+
# if repeat is higher than 1, then print stability (CVNET)
46+
if arg.sim.repeats > 1:
47+
print('\nstability of NN for D, f and D* at different SNR levels:')
48+
print(stability)
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
"""
2+
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
3+
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
4+
https://www.github.com/ochampion
5+
6+
Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)
7+
8+
requirements:
9+
numpy
10+
torch
11+
tqdm
12+
matplotlib
13+
scipy
14+
joblib
15+
"""
16+
17+
# this loads all patient data and evaluates it all.
18+
import os
19+
import time
20+
import nibabel as nib
21+
import numpy as np
22+
import IVIMNET.deep as deep
23+
import torch
24+
from IVIMNET.fitting_algorithms import fit_dats
25+
from hyperparams import hyperparams as hp
26+
27+
arg = hp()
28+
arg = deep.checkarg(arg)
29+
30+
testdata = False
31+
32+
### folder patient data
33+
folder = 'example_data'
34+
35+
### load patient data
36+
print('Load patient data \n')
37+
# load and init b-values
38+
text_file = np.genfromtxt('{folder}/bvalues.bval'.format(folder=folder))
39+
bvalues = np.array(text_file)
40+
selsb = np.array(bvalues) == 0
41+
# load nifti
42+
data = nib.load('{folder}/data.nii.gz'.format(folder=folder))
43+
datas = data.get_fdata()
44+
# reshape image for fitting
45+
sx, sy, sz, n_b_values = datas.shape
46+
X_dw = np.reshape(datas, (sx * sy * sz, n_b_values))
47+
48+
### select only relevant values, delete background and noise, and normalise data
49+
S0 = np.nanmean(X_dw[:, selsb], axis=1)
50+
S0[S0 != S0] = 0
51+
S0 = np.squeeze(S0)
52+
valid_id = (S0 > (0.5 * np.median(S0[S0 > 0])))
53+
datatot = X_dw[valid_id, :]
54+
# normalise data
55+
S0 = np.nanmean(datatot[:, selsb], axis=1).astype('<f')
56+
datatot = datatot / S0[:, None]
57+
print('Patient data loaded\n')
58+
59+
### least square fitting
60+
if arg.fit.do_fit:
61+
print('Conventional fitting\n')
62+
start_time = time.time()
63+
paramslsq = fit_dats(bvalues.copy(), datatot.copy()[:, :len(bvalues)], arg.fit)
64+
elapsed_time1 = time.time() - start_time
65+
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time1))
66+
# define names IVIM params
67+
names_lsq = ['D_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
68+
'f_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
69+
'Dp_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0')]
70+
71+
tot = 0
72+
# fill image array
73+
for k in range(len(names_lsq)):
74+
img = np.zeros([sx * sy * sz])
75+
img[valid_id] = paramslsq[k][tot:(tot + sum(valid_id))]
76+
img = np.reshape(img, [sx, sy, sz])
77+
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder=folder,name=names_lsq[k]))
78+
print('Conventional fitting done\n')
79+
80+
### NN network
81+
if not arg.train_pars.skip_net:
82+
print('NN fitting\n')
83+
res = [i for i, val in enumerate(datatot != datatot) if not val.any()] # Remove NaN data
84+
start_time = time.time()
85+
# train network
86+
net = deep.learn_IVIM(datatot[res], bvalues, arg)
87+
elapsed_time1net = time.time() - start_time
88+
print('\ntime elapsed for Net: {}\n'.format(elapsed_time1net))
89+
start_time = time.time()
90+
# predict parameters
91+
paramsNN = deep.predict_IVIM(datatot, bvalues, net, arg)
92+
elapsed_time1netinf = time.time() - start_time
93+
print('\ntime elapsed for Net inf: {}\n'.format(elapsed_time1netinf))
94+
print('\ndata length: {}\n'.format(len(datatot)))
95+
if arg.train_pars.use_cuda:
96+
torch.cuda.empty_cache()
97+
# define names IVIM params
98+
names = ['D_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
99+
'f_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
100+
'Dp_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),]
101+
tot = 0
102+
# fill image array and make nifti
103+
for k in range(len(names)):
104+
img = np.zeros([sx * sy * sz])
105+
img[valid_id] = paramsNN[k][tot:(tot + sum(valid_id))]
106+
img = np.reshape(img, [sx, sy, sz])
107+
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder = folder,name=names[k])),
108+
print('NN fitting done\n')
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
"""
2+
September 2020 by Oliver Gurney-Champion
3+
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
4+
https://www.github.com/ochampion
5+
6+
Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)
7+
8+
requirements:
9+
numpy
10+
torch
11+
tqdm
12+
matplotlib
13+
scipy
14+
joblib
15+
"""
16+
import torch
17+
import numpy as np
18+
19+
20+
#most of these are options from the article and explained in the M&M.
21+
class train_pars:
22+
def __init__(self,nets):
23+
self.optim='adam' #these are the optimisers implementd. Choices are: 'sgd'; 'sgdr'; 'adagrad' adam
24+
if nets == 'optim':
25+
self.lr = 0.00003 # this is the learning rate.
26+
elif nets == 'orig':
27+
self.lr = 0.001 # this is the learning rate.
28+
else:
29+
self.lr = 0.00003 # this is the learning rate.
30+
self.patience= 10 # this is the number of epochs without improvement that the network waits untill determining it found its optimum
31+
self.batch_size= 128 # number of datasets taken along per iteration
32+
self.maxit = 500 # max iterations per epoch
33+
self.split = 0.9 # split of test and validation data
34+
self.load_nn= False # load the neural network instead of retraining
35+
self.loss_fun = 'rms' # what is the loss used for the model. rms is root mean square (linear regression-like); L1 is L1 normalisation (less focus on outliers)
36+
self.skip_net = False # skip the network training and evaluation
37+
self.scheduler = False # as discussed in the article, LR is important. This approach allows to reduce the LR itteratively when there is no improvement throughout an 5 consecutive epochs
38+
# use GPU if available
39+
self.use_cuda = torch.cuda.is_available()
40+
self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
41+
self.select_best = False
42+
43+
44+
class net_pars:
45+
def __init__(self,nets):
46+
# select a network setting
47+
if (nets == 'optim'):
48+
# the optimized network settings
49+
self.dropout = 0.1 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
50+
self.batch_norm = True # False/True turns on batch normalistion
51+
self.parallel = 'parallel' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
52+
self.con = 'sigmoid' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
53+
self.tri_exp = False
54+
#### only if sigmoid constraint is used!
55+
if self.tri_exp:
56+
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
57+
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
58+
else:
59+
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, S0
60+
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, S0
61+
####
62+
self.fitS0 = True # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
63+
self.depth = 2 # number of layers
64+
self.width = 0 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
65+
elif nets == 'orig':
66+
# as summarized in Table 1 from the main article for the original network
67+
self.dropout = 0.0 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
68+
self.batch_norm = False # False/True turns on batch normalistion
69+
self.parallel = 'single' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
70+
self.con = 'abs' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
71+
self.tri_exp = False
72+
#### only if sigmoid constraint is used!
73+
if self.tri_exp:
74+
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
75+
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
76+
else:
77+
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, f0
78+
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, f0
79+
####
80+
self.fitS0 = False # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
81+
self.depth = 3 # number of layers
82+
self.width = 0 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
83+
else:
84+
# chose wisely :)
85+
self.dropout = 0.3 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
86+
self.batch_norm = True # False/True turns on batch normalistion
87+
self.parallel = 'parallel' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
88+
self.con = 'sigmoid' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
89+
self.tri_exp = True
90+
#### only if sigmoid constraint is used!
91+
if self.tri_exp:
92+
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
93+
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
94+
else:
95+
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, f0
96+
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, f0
97+
####
98+
self.fitS0 = False # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
99+
self.depth = 4 # number of layers
100+
self.width = 500 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
101+
boundsrange = 0.3 * (np.array(self.cons_max)-np.array(self.cons_min)) # ensure that we are on the most lineair bit of the sigmoid function
102+
self.cons_min = np.array(self.cons_min) - boundsrange
103+
self.cons_max = np.array(self.cons_max) + boundsrange
104+
105+
106+
107+
class lsqfit:
108+
def __init__(self):
109+
self.method = 'lsq' #seg, bayes or lsq
110+
self.model = 'bi-exp' #bi-exp or tri-exp
111+
self.do_fit = False # skip lsq fitting
112+
self.load_lsq = False # load the last results for lsq fit
113+
self.fitS0 = True # indicates whether to fit S0 (True) or fix it to 1 in the least squares fit.
114+
self.jobs = 4 # number of parallel jobs. If set to 1, no parallel computing is used
115+
self.bvalues_included=[50, 700] # for ADC fit
116+
if self.model == 'tri-exp':
117+
self.bounds = ([0, 0, 0, 0.008, 0, 0.06], [2.5, 0.008, 1, 0.08, 1, 5]) # F0', D0, F1', D1, F2', D2
118+
elif self.model == 'ADC':
119+
self.bounds = ([0, 0], [0.005, 2.5]) # Dt, Fp, Ds, S0
120+
else:
121+
self.bounds = ([0.0003, 0, 0.005, 0.5], [0.005, 0.7, 0.3, 2.5]) # Dt, Fp, Ds, S0
122+
123+
class sim:
124+
def __init__(self):
125+
self.bvalues = np.array([0, 5, 10, 20, 30, 40, 60, 150, 300, 500, 700]) # array of b-values
126+
self.SNR = [15, 20, 30, 50] # the SNRs to simulate at
127+
self.sims = 1000000 # number of simulations to run
128+
self.num_samples_eval = 10000 # number of simualtiosn te evaluate. This can be lower than the number run. Particularly to save time when fitting. More simulations help with generating sufficient data for the neural network
129+
self.repeats = 1 # this is the number of repeats for simulations
130+
self.rician = False # add rician noise to simulations; if false, gaussian noise is added instead
131+
self.model = 'bi-exp'
132+
if self.model == 'bi-exp':
133+
self.range = ([0.0005, 0.05, 0.01], [0.003, 0.55, 0.1])
134+
else:
135+
self.range = ([0.0005, 0.05, 0.001, 0.05, 0.08], [0.003, 0.5, 0.05, 0.5, 2]) # D0, F1', D1, F2', D2
136+
137+
138+
class hyperparams:
139+
def __init__(self):
140+
self.fig = False # plot results and intermediate steps
141+
self.save_name = 'optim' # orig or optim (or optim_adsig for in vivo)
142+
self.net_pars = net_pars(self.save_name)
143+
self.train_pars = train_pars(self.save_name)
144+
self.fit = lsqfit()
145+
self.sim = sim()

0 commit comments

Comments
 (0)