Skip to content

IVIM-NET_optim added #108

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ pandas
sphinx
sphinx_rtd_theme
pytest-json-report
ivimnet
63 changes: 63 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_1_simple_map.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""
import IVIMNET.simulations as sim
from hyperparams import hyperparams as hp_example_1
import IVIMNET.deep as deep
import time
import torch
import IVIMNET.fitting_algorithms as fit

# Import parameters
arg = hp_example_1()
arg = deep.checkarg(arg)
print(arg.save_name)
for SNR in arg.sim.SNR:
# this simulates the signal
IVIM_signal_noisy, D, f, Dp = sim.sim_signal(SNR, arg.sim.bvalues, sims=arg.sim.sims, Dmin=arg.sim.range[0][0],
Dmax=arg.sim.range[1][0], fmin=arg.sim.range[0][1],
fmax=arg.sim.range[1][1], Dsmin=arg.sim.range[0][2],
Dsmax=arg.sim.range[1][2], rician=arg.sim.rician)

start_time = time.time()
# train network
net = deep.learn_IVIM(IVIM_signal_noisy, arg.sim.bvalues, arg)
elapsed_time = time.time() - start_time
print('\ntime elapsed for training: {}\n'.format(elapsed_time))

# simulate IVIM signal for prediction
[dwi_image_long, Dt_truth, Fp_truth, Dp_truth] = sim.sim_signal_predict(arg, SNR)

# predict
start_time = time.time()
paramsNN = deep.predict_IVIM(dwi_image_long, arg.sim.bvalues, net, arg)
elapsed_time = time.time() - start_time
print('\ntime elapsed for inference: {}\n'.format(elapsed_time))
# remove network to save memory
del net
if arg.train_pars.use_cuda:
torch.cuda.empty_cache()

start_time = time.time()
# all fitting is done in the fit.fit_dats for the other fitting algorithms (lsq, segmented and Baysesian)
paramsf = fit.fit_dats(arg.sim.bvalues, dwi_image_long, arg.fit)
elapsed_time = time.time() - start_time
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time))
print('results for lsqfit')

# plot values predict and truth
sim.plot_example1(paramsNN, paramsf, Dt_truth, Fp_truth, Dp_truth, arg, SNR)
48 changes: 48 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_2_simulations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""

# import
import numpy as np
import IVIMNET.simulations as sim
import IVIMNET.deep as deep
from hyperparams import hyperparams as hp_example

# load hyperparameter
arg = hp_example()
arg = deep.checkarg(arg)

matlsq = np.zeros([len(arg.sim.SNR), 3, 3])
matNN = np.zeros([len(arg.sim.SNR), 3, 3])
stability = np.zeros([len(arg.sim.SNR), 3])
a = 0

for SNR in arg.sim.SNR:
print('\n simulation at SNR of {snr}\n'.format(snr=SNR))
if arg.fit.do_fit:
matlsq[a, :, :], matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
print('\nresults from lsq:')
print(matlsq)
else:
matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
a = a + 1
print('\nresults from NN: columns show themean, the RMSE/mean and the Spearman coef [DvDp,Dvf,fvDp] \n'
'the rows show D, f and D*\n'
'and the different matixes repressent the different SNR levels {}:'.format(arg.sim.SNR))
print(matNN)
# if repeat is higher than 1, then print stability (CVNET)
if arg.sim.repeats > 1:
print('\nstability of NN for D, f and D* at different SNR levels:')
print(stability)
108 changes: 108 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_3_volunteer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""

# this loads all patient data and evaluates it all.
import os
import time
import nibabel as nib
import numpy as np
import IVIMNET.deep as deep
import torch
from IVIMNET.fitting_algorithms import fit_dats
from hyperparams import hyperparams as hp

arg = hp()
arg = deep.checkarg(arg)

testdata = False

### folder patient data
folder = 'example_data'

### load patient data
print('Load patient data \n')
# load and init b-values
text_file = np.genfromtxt('{folder}/bvalues.bval'.format(folder=folder))
bvalues = np.array(text_file)
selsb = np.array(bvalues) == 0
# load nifti
data = nib.load('{folder}/data.nii.gz'.format(folder=folder))
datas = data.get_fdata()
# reshape image for fitting
sx, sy, sz, n_b_values = datas.shape
X_dw = np.reshape(datas, (sx * sy * sz, n_b_values))

### select only relevant values, delete background and noise, and normalise data
S0 = np.nanmean(X_dw[:, selsb], axis=1)
S0[S0 != S0] = 0
S0 = np.squeeze(S0)
valid_id = (S0 > (0.5 * np.median(S0[S0 > 0])))
datatot = X_dw[valid_id, :]
# normalise data
S0 = np.nanmean(datatot[:, selsb], axis=1).astype('<f')
datatot = datatot / S0[:, None]
print('Patient data loaded\n')

### least square fitting
if arg.fit.do_fit:
print('Conventional fitting\n')
start_time = time.time()
paramslsq = fit_dats(bvalues.copy(), datatot.copy()[:, :len(bvalues)], arg.fit)
elapsed_time1 = time.time() - start_time
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time1))
# define names IVIM params
names_lsq = ['D_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
'f_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
'Dp_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0')]

tot = 0
# fill image array
for k in range(len(names_lsq)):
img = np.zeros([sx * sy * sz])
img[valid_id] = paramslsq[k][tot:(tot + sum(valid_id))]
img = np.reshape(img, [sx, sy, sz])
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder=folder,name=names_lsq[k]))
print('Conventional fitting done\n')

### NN network
if not arg.train_pars.skip_net:
print('NN fitting\n')
res = [i for i, val in enumerate(datatot != datatot) if not val.any()] # Remove NaN data
start_time = time.time()
# train network
net = deep.learn_IVIM(datatot[res], bvalues, arg)
elapsed_time1net = time.time() - start_time
print('\ntime elapsed for Net: {}\n'.format(elapsed_time1net))
start_time = time.time()
# predict parameters
paramsNN = deep.predict_IVIM(datatot, bvalues, net, arg)
elapsed_time1netinf = time.time() - start_time
print('\ntime elapsed for Net inf: {}\n'.format(elapsed_time1netinf))
print('\ndata length: {}\n'.format(len(datatot)))
if arg.train_pars.use_cuda:
torch.cuda.empty_cache()
# define names IVIM params
names = ['D_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
'f_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
'Dp_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),]
tot = 0
# fill image array and make nifti
for k in range(len(names)):
img = np.zeros([sx * sy * sz])
img[valid_id] = paramsNN[k][tot:(tot + sum(valid_id))]
img = np.reshape(img, [sx, sy, sz])
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder = folder,name=names[k])),
print('NN fitting done\n')
145 changes: 145 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/hyperparams.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
"""
September 2020 by Oliver Gurney-Champion
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""
import torch
import numpy as np


#most of these are options from the article and explained in the M&M.
class train_pars:
def __init__(self,nets):
self.optim='adam' #these are the optimisers implementd. Choices are: 'sgd'; 'sgdr'; 'adagrad' adam
if nets == 'optim':
self.lr = 0.00003 # this is the learning rate.
elif nets == 'orig':
self.lr = 0.001 # this is the learning rate.
else:
self.lr = 0.00003 # this is the learning rate.
self.patience= 10 # this is the number of epochs without improvement that the network waits untill determining it found its optimum
self.batch_size= 128 # number of datasets taken along per iteration
self.maxit = 500 # max iterations per epoch
self.split = 0.9 # split of test and validation data
self.load_nn= False # load the neural network instead of retraining
self.loss_fun = 'rms' # what is the loss used for the model. rms is root mean square (linear regression-like); L1 is L1 normalisation (less focus on outliers)
self.skip_net = False # skip the network training and evaluation
self.scheduler = False # as discussed in the article, LR is important. This approach allows to reduce the LR itteratively when there is no improvement throughout an 5 consecutive epochs
# use GPU if available
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
self.select_best = False


class net_pars:
def __init__(self,nets):
# select a network setting
if (nets == 'optim'):
# the optimized network settings
self.dropout = 0.1 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
self.batch_norm = True # False/True turns on batch normalistion
self.parallel = 'parallel' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
self.con = 'sigmoid' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
self.tri_exp = False
#### only if sigmoid constraint is used!
if self.tri_exp:
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
else:
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, S0
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, S0
####
self.fitS0 = True # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
self.depth = 2 # number of layers
self.width = 0 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
elif nets == 'orig':
# as summarized in Table 1 from the main article for the original network
self.dropout = 0.0 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
self.batch_norm = False # False/True turns on batch normalistion
self.parallel = 'single' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
self.con = 'abs' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
self.tri_exp = False
#### only if sigmoid constraint is used!
if self.tri_exp:
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
else:
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, f0
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, f0
####
self.fitS0 = False # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
self.depth = 3 # number of layers
self.width = 0 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
else:
# chose wisely :)
self.dropout = 0.3 #0.0/0.1 chose how much dropout one likes. 0=no dropout; internet says roughly 20% (0.20) is good, although it also states that smaller networks might desire smaller amount of dropout
self.batch_norm = True # False/True turns on batch normalistion
self.parallel = 'parallel' # defines whether the network exstimates each parameter seperately (each parameter has its own network) or whether 1 shared network is used instead
self.con = 'sigmoid' # defines the constraint function; 'sigmoid' gives a sigmoid function giving the max/min; 'abs' gives the absolute of the output, 'none' does not constrain the output
self.tri_exp = True
#### only if sigmoid constraint is used!
if self.tri_exp:
self.cons_min = [0., 0.0003, 0.0, 0.003, 0.0, 0.08] # F0', D0, F1', D1, F2', D2
self.cons_max = [2.5, 0.003, 1, 0.08, 1, 5] # F0', D0, F1', D1, F2', D2
else:
self.cons_min = [0, 0, 0.005, 0] # Dt, Fp, Ds, f0
self.cons_max = [0.005, 0.7, 0.2, 2.0] # Dt, Fp, Ds, f0
####
self.fitS0 = False # indicates whether to fit S0 (True) or fix it to 1 (for normalised signals); I prefer fitting S0 as it takes along the potential error is S0.
self.depth = 4 # number of layers
self.width = 500 # new option that determines network width. Putting to 0 makes it as wide as the number of b-values
boundsrange = 0.3 * (np.array(self.cons_max)-np.array(self.cons_min)) # ensure that we are on the most lineair bit of the sigmoid function
self.cons_min = np.array(self.cons_min) - boundsrange
self.cons_max = np.array(self.cons_max) + boundsrange



class lsqfit:
def __init__(self):
self.method = 'lsq' #seg, bayes or lsq
self.model = 'bi-exp' #bi-exp or tri-exp
self.do_fit = False # skip lsq fitting
self.load_lsq = False # load the last results for lsq fit
self.fitS0 = True # indicates whether to fit S0 (True) or fix it to 1 in the least squares fit.
self.jobs = 4 # number of parallel jobs. If set to 1, no parallel computing is used
self.bvalues_included=[50, 700] # for ADC fit
if self.model == 'tri-exp':
self.bounds = ([0, 0, 0, 0.008, 0, 0.06], [2.5, 0.008, 1, 0.08, 1, 5]) # F0', D0, F1', D1, F2', D2
elif self.model == 'ADC':
self.bounds = ([0, 0], [0.005, 2.5]) # Dt, Fp, Ds, S0
else:
self.bounds = ([0.0003, 0, 0.005, 0.5], [0.005, 0.7, 0.3, 2.5]) # Dt, Fp, Ds, S0

class sim:
def __init__(self):
self.bvalues = np.array([0, 5, 10, 20, 30, 40, 60, 150, 300, 500, 700]) # array of b-values
self.SNR = [15, 20, 30, 50] # the SNRs to simulate at
self.sims = 1000000 # number of simulations to run
self.num_samples_eval = 10000 # number of simualtiosn te evaluate. This can be lower than the number run. Particularly to save time when fitting. More simulations help with generating sufficient data for the neural network
self.repeats = 1 # this is the number of repeats for simulations
self.rician = False # add rician noise to simulations; if false, gaussian noise is added instead
self.model = 'bi-exp'
if self.model == 'bi-exp':
self.range = ([0.0005, 0.05, 0.01], [0.003, 0.55, 0.1])
else:
self.range = ([0.0005, 0.05, 0.001, 0.05, 0.08], [0.003, 0.5, 0.05, 0.5, 2]) # D0, F1', D1, F2', D2


class hyperparams:
def __init__(self):
self.fig = False # plot results and intermediate steps
self.save_name = 'optim' # orig or optim (or optim_adsig for in vivo)
self.net_pars = net_pars(self.save_name)
self.train_pars = train_pars(self.save_name)
self.fit = lsqfit()
self.sim = sim()