Skip to content

Dl wrapper + DL testing #109

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ tests/IVIMmodels/unit_tests/*.log
junit/*
ivim_simulation.bval
ivim_simulation.bvec
*.pt

# Unit test / coverage reports
.tox/
Expand All @@ -32,4 +33,5 @@ nosetests.xml
coverage.xml
*.pyc
phantoms/MR_XCAT_qMRI/*.json
phantoms/MR_XCAT_qMRI/*.txt
phantoms/MR_XCAT_qMRI/*.txt
tests/IVIMmodels/unit_tests/models
65 changes: 49 additions & 16 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@ def pytest_addoption(parser):
type=str,
help="Default data file name",
)
parser.addoption(
"--dataFileDL",
default="tests/IVIMmodels/unit_tests/generic_DL.json",
type=str,
help="Default data file name",
)
parser.addoption(
"--saveFileName",
default="",
Expand Down Expand Up @@ -179,6 +185,10 @@ def pytest_generate_tests(metafunc):
if "bound_input" in metafunc.fixturenames:
args = bound_input(metafunc.config.getoption("dataFile"),metafunc.config.getoption("algorithmFile"))
metafunc.parametrize("bound_input", args)
if "deep_learning_algorithms" in metafunc.fixturenames:
args = deep_learning_algorithms(metafunc.config.getoption("dataFileDL"),metafunc.config.getoption("algorithmFile"))
metafunc.parametrize("deep_learning_algorithms", args)



def data_list(filename):
Expand Down Expand Up @@ -210,17 +220,18 @@ def data_ivim_fit_saved(datafile, algorithmFile):
first = True
for name, data in all_data.items():
algorithm_dict = algorithm_information.get(algorithm, {})
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
kwargs = algorithm_dict.get("options", {})
tolerances = algorithm_dict.get("tolerances", {})
skiptime=False
if first:
if algorithm_dict.get("fail_first_time", False):
skiptime = True
first = False
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab
if not algorithm_dict.get('deep_learning',False):
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
kwargs = algorithm_dict.get("options", {})
tolerances = algorithm_dict.get("tolerances", {})
skiptime=False
if first:
if algorithm_dict.get("fail_first_time", False):
skiptime = True
first = False
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab

def algorithmlist(algorithmFile):
# Find the algorithms from algorithms.json
Expand All @@ -233,7 +244,7 @@ def algorithmlist(algorithmFile):
for algorithm in algorithms:
algorithm_dict = algorithm_information.get(algorithm, {})
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield algorithm, requires_matlab
yield algorithm, requires_matlab, algorithm_dict.get('deep_learning', False)

def bound_input(datafile,algorithmFile):
# Find the algorithms from algorithms.json
Expand All @@ -251,9 +262,31 @@ def bound_input(datafile,algorithmFile):
for name, data in all_data.items():
for algorithm in algorithms:
algorithm_dict = algorithm_information.get(algorithm, {})
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
if not algorithm_dict.get('deep_learning',False):
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}),
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)}
kwargs = algorithm_dict.get("options", {})
tolerances = algorithm_dict.get("tolerances", {})
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab

def deep_learning_algorithms(datafile,algorithmFile):
# Find the algorithms from algorithms.json
current_folder = pathlib.Path.cwd()
algorithm_path = current_folder / algorithmFile
with algorithm_path.open() as f:
algorithm_information = json.load(f)
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI
generic = current_folder / datafile
with generic.open() as f:
all_data = json.load(f)
algorithms = algorithm_information["algorithms"]
bvals = all_data.pop('config')
bvals = bvals['bvalues']
for algorithm in algorithms:
algorithm_dict = algorithm_information.get(algorithm, {})
if algorithm_dict.get('deep_learning',False):
kwargs = algorithm_dict.get("options", {})
tolerances = algorithm_dict.get("tolerances", {})
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab
tolerances = algorithm_dict.get("tolerances", {"atol":{"f": 2e-1, "D": 8e-4, "Dp": 8e-2},"rtol":{"f": 0.2, "D": 0.3, "Dp": 0.4}})
yield algorithm, all_data, bvals, kwargs, requires_matlab, tolerances
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ pandas
sphinx
sphinx_rtd_theme
pytest-json-report
ivimnet
63 changes: 63 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_1_simple_map.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""
import IVIMNET.simulations as sim
from hyperparams import hyperparams as hp_example_1
import IVIMNET.deep as deep
import time
import torch
import IVIMNET.fitting_algorithms as fit

# Import parameters
arg = hp_example_1()
arg = deep.checkarg(arg)
print(arg.save_name)
for SNR in arg.sim.SNR:
# this simulates the signal
IVIM_signal_noisy, D, f, Dp = sim.sim_signal(SNR, arg.sim.bvalues, sims=arg.sim.sims, Dmin=arg.sim.range[0][0],
Dmax=arg.sim.range[1][0], fmin=arg.sim.range[0][1],
fmax=arg.sim.range[1][1], Dsmin=arg.sim.range[0][2],
Dsmax=arg.sim.range[1][2], rician=arg.sim.rician)

start_time = time.time()
# train network
net = deep.learn_IVIM(IVIM_signal_noisy, arg.sim.bvalues, arg)
elapsed_time = time.time() - start_time
print('\ntime elapsed for training: {}\n'.format(elapsed_time))

# simulate IVIM signal for prediction
[dwi_image_long, Dt_truth, Fp_truth, Dp_truth] = sim.sim_signal_predict(arg, SNR)

# predict
start_time = time.time()
paramsNN = deep.predict_IVIM(dwi_image_long, arg.sim.bvalues, net, arg)
elapsed_time = time.time() - start_time
print('\ntime elapsed for inference: {}\n'.format(elapsed_time))
# remove network to save memory
del net
if arg.train_pars.use_cuda:
torch.cuda.empty_cache()

start_time = time.time()
# all fitting is done in the fit.fit_dats for the other fitting algorithms (lsq, segmented and Baysesian)
paramsf = fit.fit_dats(arg.sim.bvalues, dwi_image_long, arg.fit)
elapsed_time = time.time() - start_time
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time))
print('results for lsqfit')

# plot values predict and truth
sim.plot_example1(paramsNN, paramsf, Dt_truth, Fp_truth, Dp_truth, arg, SNR)
48 changes: 48 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_2_simulations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""

# import
import numpy as np
import IVIMNET.simulations as sim
import IVIMNET.deep as deep
from hyperparams import hyperparams as hp_example

# load hyperparameter
arg = hp_example()
arg = deep.checkarg(arg)

matlsq = np.zeros([len(arg.sim.SNR), 3, 3])
matNN = np.zeros([len(arg.sim.SNR), 3, 3])
stability = np.zeros([len(arg.sim.SNR), 3])
a = 0

for SNR in arg.sim.SNR:
print('\n simulation at SNR of {snr}\n'.format(snr=SNR))
if arg.fit.do_fit:
matlsq[a, :, :], matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
print('\nresults from lsq:')
print(matlsq)
else:
matNN[a, :, :], stability[a, :] = sim.sim(SNR, arg)
a = a + 1
print('\nresults from NN: columns show themean, the RMSE/mean and the Spearman coef [DvDp,Dvf,fvDp] \n'
'the rows show D, f and D*\n'
'and the different matixes repressent the different SNR levels {}:'.format(arg.sim.SNR))
print(matNN)
# if repeat is higher than 1, then print stability (CVNET)
if arg.sim.repeats > 1:
print('\nstability of NN for D, f and D* at different SNR levels:')
print(stability)
108 changes: 108 additions & 0 deletions src/original/OGC_AUMC_IVIMNET/Example_3_volunteer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
"""
September 2020 by Oliver Gurney-Champion & Misha Kaandorp
oliver.gurney.champion@gmail.com / o.j.gurney-champion@amsterdamumc.nl
https://www.github.com/ochampion

Code is uploaded as part of our publication in MRM (Kaandorp et al. Improved unsupervised physics-informed deep learning for intravoxel-incoherent motion modeling and evaluation in pancreatic cancer patients. MRM 2021)

requirements:
numpy
torch
tqdm
matplotlib
scipy
joblib
"""

# this loads all patient data and evaluates it all.
import os
import time
import nibabel as nib
import numpy as np
import IVIMNET.deep as deep
import torch
from IVIMNET.fitting_algorithms import fit_dats
from hyperparams import hyperparams as hp

arg = hp()
arg = deep.checkarg(arg)

testdata = False

### folder patient data
folder = 'example_data'

### load patient data
print('Load patient data \n')
# load and init b-values
text_file = np.genfromtxt('{folder}/bvalues.bval'.format(folder=folder))
bvalues = np.array(text_file)
selsb = np.array(bvalues) == 0
# load nifti
data = nib.load('{folder}/data.nii.gz'.format(folder=folder))
datas = data.get_fdata()
# reshape image for fitting
sx, sy, sz, n_b_values = datas.shape
X_dw = np.reshape(datas, (sx * sy * sz, n_b_values))

### select only relevant values, delete background and noise, and normalise data
S0 = np.nanmean(X_dw[:, selsb], axis=1)
S0[S0 != S0] = 0
S0 = np.squeeze(S0)
valid_id = (S0 > (0.5 * np.median(S0[S0 > 0])))
datatot = X_dw[valid_id, :]
# normalise data
S0 = np.nanmean(datatot[:, selsb], axis=1).astype('<f')
datatot = datatot / S0[:, None]
print('Patient data loaded\n')

### least square fitting
if arg.fit.do_fit:
print('Conventional fitting\n')
start_time = time.time()
paramslsq = fit_dats(bvalues.copy(), datatot.copy()[:, :len(bvalues)], arg.fit)
elapsed_time1 = time.time() - start_time
print('\ntime elapsed for lsqfit: {}\n'.format(elapsed_time1))
# define names IVIM params
names_lsq = ['D_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
'f_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0'),
'Dp_{}_{}'.format(arg.fit.method, 'fitS0' if not arg.fit.fitS0 else 'freeS0')]

tot = 0
# fill image array
for k in range(len(names_lsq)):
img = np.zeros([sx * sy * sz])
img[valid_id] = paramslsq[k][tot:(tot + sum(valid_id))]
img = np.reshape(img, [sx, sy, sz])
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder=folder,name=names_lsq[k]))
print('Conventional fitting done\n')

### NN network
if not arg.train_pars.skip_net:
print('NN fitting\n')
res = [i for i, val in enumerate(datatot != datatot) if not val.any()] # Remove NaN data
start_time = time.time()
# train network
net = deep.learn_IVIM(datatot[res], bvalues, arg)
elapsed_time1net = time.time() - start_time
print('\ntime elapsed for Net: {}\n'.format(elapsed_time1net))
start_time = time.time()
# predict parameters
paramsNN = deep.predict_IVIM(datatot, bvalues, net, arg)
elapsed_time1netinf = time.time() - start_time
print('\ntime elapsed for Net inf: {}\n'.format(elapsed_time1netinf))
print('\ndata length: {}\n'.format(len(datatot)))
if arg.train_pars.use_cuda:
torch.cuda.empty_cache()
# define names IVIM params
names = ['D_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
'f_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),
'Dp_NN_{nn}_lr_{lr}'.format(nn=arg.save_name, lr=arg.train_pars.lr),]
tot = 0
# fill image array and make nifti
for k in range(len(names)):
img = np.zeros([sx * sy * sz])
img[valid_id] = paramsNN[k][tot:(tot + sum(valid_id))]
img = np.reshape(img, [sx, sy, sz])
nib.save(nib.Nifti1Image(img, data.affine, data.header),'{folder}/{name}.nii.gz'.format(folder = folder,name=names[k])),
print('NN fitting done\n')
Loading