@@ -66,7 +66,7 @@ def order(Dt, Fp, Dp, S0=None):
66
66
return Dt , Fp , Dp , S0
67
67
68
68
69
- def fit_segmented_array (bvalues , dw_data , njobs = 4 , bounds = ([0 , 0 , 0.005 ],[0.005 , 0.7 , 0.2 ]), cutoff = 75 ):
69
+ def fit_segmented_array (bvalues , dw_data , njobs = 4 , bounds = ([0 , 0 , 0.005 ],[0.005 , 0.7 , 0.2 ]), cutoff = 75 , p0 = [ 0.001 , 0.1 , 0.01 , 1 ] ):
70
70
"""
71
71
This is an implementation of the segmented fit, in which we first estimate D using a curve fit to b-values>cutoff;
72
72
then estimate f from the fitted S0 and the measured S0 and finally estimate D* while fixing D and f. This fit
@@ -90,7 +90,7 @@ def fit_segmented_array(bvalues, dw_data, njobs=4, bounds=([0, 0, 0.005],[0.005,
90
90
try :
91
91
# define the parallel function
92
92
def parfun (i ):
93
- return fit_segmented (bvalues , dw_data [i , :], bounds = bounds , cutoff = cutoff )
93
+ return fit_segmented (bvalues , dw_data [i , :], bounds = bounds , cutoff = cutoff , p0 = p0 )
94
94
95
95
output = Parallel (n_jobs = njobs )(delayed (parfun )(i ) for i in tqdm (range (len (dw_data )), position = 0 , leave = True ))
96
96
Dt , Fp , Dp = np .transpose (output )
@@ -107,11 +107,11 @@ def parfun(i):
107
107
Fp = np .zeros (len (dw_data ))
108
108
for i in tqdm (range (len (dw_data )), position = 0 , leave = True ):
109
109
# fill arrays with fit results on a per voxel base:
110
- Dt [i ], Fp [i ], Dp [i ] = fit_segmented (bvalues , dw_data [i , :], bounds = bounds , cutoff = cutoff )
110
+ Dt [i ], Fp [i ], Dp [i ] = fit_segmented (bvalues , dw_data [i , :], bounds = bounds , cutoff = cutoff , p0 = p0 )
111
111
return [Dt , Fp , Dp , S0 ]
112
112
113
113
114
- def fit_segmented (bvalues , dw_data , bounds = ([0 , 0 , 0.005 ],[0.005 , 0.7 , 0.2 ]), cutoff = 75 ):
114
+ def fit_segmented (bvalues , dw_data , bounds = ([0 , 0 , 0.005 ],[0.005 , 0.7 , 0.2 ]), cutoff = 75 , p0 = [ 0.001 , 0.1 , 0.01 , 1 ] ):
115
115
"""
116
116
This is an implementation of the segmented fit, in which we first estimate D using a curve fit to b-values>cutoff;
117
117
then estimate f from the fitted S0 and the measured S0 and finally estimate D* while fixing D and f.
@@ -124,23 +124,24 @@ def fit_segmented(bvalues, dw_data, bounds=([0, 0, 0.005],[0.005, 0.7, 0.2]), cu
124
124
:return Dp: Fitted Dp
125
125
:return S0: Fitted S0
126
126
"""
127
+ p0 = [p0 [0 ] * 1000 , p0 [1 ] * 10 , p0 [2 ] * 10 , p0 [3 ]]
127
128
try :
128
129
# determine high b-values and data for D
129
130
high_b = bvalues [bvalues >= cutoff ]
130
131
high_dw_data = dw_data [bvalues >= cutoff ]
131
132
# correct the bounds. Note that S0 bounds determine the max and min of f
132
- bounds1 = ([bounds [0 ][0 ] * 1000. , 1 - bounds [1 ][1 ]], [bounds [1 ][0 ] * 1000. , 1. - bounds [0 ][
133
+ bounds1 = ([bounds [0 ][0 ] * 1000. , 0.7 - bounds [1 ][1 ]], [bounds [1 ][0 ] * 1000. , 1.3 - bounds [0 ][
133
134
1 ]]) # By bounding S0 like this, we effectively insert the boundaries of f
134
135
# fit for S0' and D
135
136
params , _ = curve_fit (lambda b , Dt , int : int * np .exp (- b * Dt / 1000 ), high_b , high_dw_data ,
136
- p0 = (1 , 1 ),
137
+ p0 = (p0 [ 0 ], p0 [ 3 ] - p0 [ 1 ] / 10 ),
137
138
bounds = bounds1 )
138
139
Dt , Fp = params [0 ] / 1000 , 1 - params [1 ]
139
140
# remove the diffusion part to only keep the pseudo-diffusion
140
141
dw_data_remaining = dw_data - (1 - Fp ) * np .exp (- bvalues * Dt )
141
- bounds2 = (bounds [0 ][2 ], bounds [1 ][2 ])
142
+ bounds2 = (bounds [0 ][2 ]* 10 , bounds [1 ][2 ]* 10 )
142
143
# fit for D*
143
- params , _ = curve_fit (lambda b , Dp : Fp * np .exp (- b * Dp ), bvalues , dw_data_remaining , p0 = (0.1 ), bounds = bounds2 )
144
+ params , _ = curve_fit (lambda b , Dp : Fp * np .exp (- b * Dp ), bvalues , dw_data_remaining , p0 = (p0 [ 2 ] ), bounds = bounds2 )
144
145
Dp = params [0 ]
145
146
return Dt , Fp , Dp
146
147
except :
@@ -150,7 +151,7 @@ def fit_segmented(bvalues, dw_data, bounds=([0, 0, 0.005],[0.005, 0.7, 0.2]), cu
150
151
151
152
152
153
def fit_least_squares_array (bvalues , dw_data , S0_output = True , fitS0 = True , njobs = 4 ,
153
- bounds = ([0 , 0 , 0.005 , 0.7 ],[0.005 , 0.7 , 0.2 , 1.3 ])):
154
+ bounds = ([0 , 0 , 0.005 , 0.7 ],[0.005 , 0.7 , 0.2 , 1.3 ]), p0 = [ 0.001 , 0.1 , 0.01 , 1 ] ):
154
155
"""
155
156
This is an implementation of the conventional IVIM fit. It is fitted in array form.
156
157
:param bvalues: 1D Array with the b-values
@@ -175,7 +176,7 @@ def fit_least_squares_array(bvalues, dw_data, S0_output=True, fitS0=True, njobs=
175
176
try :
176
177
# defining parallel function
177
178
def parfun (i ):
178
- return fit_least_squares (bvalues , dw_data [i , :], S0_output = S0_output , fitS0 = fitS0 , bounds = bounds )
179
+ return fit_least_squares (bvalues , dw_data [i , :], S0_output = S0_output , fitS0 = fitS0 , bounds = bounds , p0 = p0 )
179
180
180
181
output = Parallel (n_jobs = njobs )(delayed (parfun )(i ) for i in tqdm (range (len (dw_data )), position = 0 , leave = True ))
181
182
Dt , Fp , Dp , S0 = np .transpose (output )
@@ -192,14 +193,14 @@ def parfun(i):
192
193
# running in a single loop and filling arrays
193
194
for i in tqdm (range (len (dw_data )), position = 0 , leave = True ):
194
195
Dt [i ], Fp [i ], Dp [i ], S0 [i ] = fit_least_squares (bvalues , dw_data [i , :], S0_output = S0_output , fitS0 = fitS0 ,
195
- bounds = bounds )
196
+ bounds = bounds , p0 = p0 )
196
197
return [Dt , Fp , Dp , S0 ]
197
198
else :
198
199
# if S0 is not exported
199
200
if njobs > 1 :
200
201
try :
201
202
def parfun (i ):
202
- return fit_least_squares (bvalues , dw_data [i , :], fitS0 = fitS0 , bounds = bounds )
203
+ return fit_least_squares (bvalues , dw_data [i , :], fitS0 = fitS0 , bounds = bounds , p0 = p0 )
203
204
204
205
output = Parallel (n_jobs = njobs )(delayed (parfun )(i ) for i in tqdm (range (len (dw_data )), position = 0 , leave = True ))
205
206
Dt , Fp , Dp = np .transpose (output )
@@ -213,12 +214,12 @@ def parfun(i):
213
214
Fp = np .zeros (len (dw_data ))
214
215
for i in range (len (dw_data )):
215
216
Dt [i ], Fp [i ], Dp [i ] = fit_least_squares (bvalues , dw_data [i , :], S0_output = S0_output , fitS0 = fitS0 ,
216
- bounds = bounds )
217
+ bounds = bounds , p0 = p0 )
217
218
return [Dt , Fp , Dp ]
218
219
219
220
220
221
def fit_least_squares (bvalues , dw_data , S0_output = False , fitS0 = True ,
221
- bounds = ([0 , 0 , 0.005 , 0.7 ],[0.005 , 0.7 , 0.2 , 1.3 ])):
222
+ bounds = ([0 , 0 , 0.005 , 0.7 ],[0.005 , 0.7 , 0.2 , 1.3 ]), p0 = [ 0.001 , 0.1 , 0.01 , 1 ] ):
222
223
"""
223
224
This is an implementation of the conventional IVIM fit. It fits a single curve
224
225
:param bvalues: Array with the b-values
@@ -236,13 +237,15 @@ def fit_least_squares(bvalues, dw_data, S0_output=False, fitS0=True,
236
237
# bounds are rescaled such that each parameter changes at roughly the same rate to help fitting.
237
238
bounds = ([bounds [0 ][0 ] * 1000 , bounds [0 ][1 ] * 10 , bounds [0 ][2 ] * 10 ],
238
239
[bounds [1 ][0 ] * 1000 , bounds [1 ][1 ] * 10 , bounds [1 ][2 ] * 10 ])
239
- params , _ = curve_fit (ivimN_noS0 , bvalues , dw_data , p0 = [1 , 1 , 0.1 ], bounds = bounds )
240
+ p0 = [p0 [0 ]* 1000 ,p0 [1 ]* 10 ,p0 [2 ]* 10 ]
241
+ params , _ = curve_fit (ivimN_noS0 , bvalues , dw_data , p0 = p0 , bounds = bounds )
240
242
S0 = 1
241
243
else :
242
244
# bounds are rescaled such that each parameter changes at roughly the same rate to help fitting.
243
245
bounds = ([bounds [0 ][0 ] * 1000 , bounds [0 ][1 ] * 10 , bounds [0 ][2 ] * 10 , bounds [0 ][3 ]],
244
246
[bounds [1 ][0 ] * 1000 , bounds [1 ][1 ] * 10 , bounds [1 ][2 ] * 10 , bounds [1 ][3 ]])
245
- params , _ = curve_fit (ivimN , bvalues , dw_data , p0 = [1 , 1 , 0.1 , 1 ], bounds = bounds )
247
+ p0 = [p0 [0 ]* 1000 ,p0 [1 ]* 10 ,p0 [2 ]* 10 ,p0 [3 ]]
248
+ params , _ = curve_fit (ivimN , bvalues , dw_data , p0 = p0 , bounds = bounds )
246
249
S0 = params [3 ]
247
250
# correct for the rescaling of parameters
248
251
Dt , Fp , Dp = params [0 ] / 1000 , params [1 ] / 10 , params [2 ] / 10
@@ -463,6 +466,20 @@ def fit_segmented_tri_exp(bvalues, dw_data, bounds=([0, 0, 0, 0.005, 0, 0.06], [
463
466
return 0. , 0. , 0. , 0. , 0. , 0.
464
467
465
468
469
+ def neg_log_likelihood (p , bvalues , dw_data ):
470
+ """
471
+ This function determines the negative of the log of the likelihood of parameters p, given the data dw_data for the Bayesian fit
472
+ :param p: 1D Array with the estimates of D, f, D* and (optionally) S0
473
+ :param bvalues: 1D array with b-values
474
+ :param dw_data: 1D Array diffusion-weighted data
475
+ :returns: the log-likelihood of the parameters given the data
476
+ """
477
+ if len (p ) == 4 :
478
+ return 0.5 * (len (bvalues ) + 1 ) * np .log (
479
+ np .sum ((ivim (bvalues , p [0 ], p [1 ], p [2 ], p [3 ]) - dw_data ) ** 2 )) # 0.5*sum simplified
480
+ else :
481
+ return 0.5 * (len (bvalues ) + 1 ) * np .log (
482
+ np .sum ((ivim (bvalues , p [0 ], p [1 ], p [2 ], 1 ) - dw_data ) ** 2 )) # 0.5*sum simplified
466
483
def empirical_neg_log_prior (Dt0 , Fp0 , Dp0 , S00 = None ):
467
484
"""
468
485
This function determines the negative of the log of the empirical prior probability of the IVIM parameters
@@ -516,23 +533,6 @@ def neg_log_prior(p):
516
533
517
534
return neg_log_prior
518
535
519
-
520
- def neg_log_likelihood (p , bvalues , dw_data ):
521
- """
522
- This function determines the negative of the log of the likelihood of parameters p, given the data dw_data for the Bayesian fit
523
- :param p: 1D Array with the estimates of D, f, D* and (optionally) S0
524
- :param bvalues: 1D array with b-values
525
- :param dw_data: 1D Array diffusion-weighted data
526
- :returns: the log-likelihood of the parameters given the data
527
- """
528
- if len (p ) == 4 :
529
- return 0.5 * (len (bvalues ) + 1 ) * np .log (
530
- np .sum ((ivim (bvalues , p [0 ], p [1 ], p [2 ], p [3 ]) - dw_data ) ** 2 )) # 0.5*sum simplified
531
- else :
532
- return 0.5 * (len (bvalues ) + 1 ) * np .log (
533
- np .sum ((ivim (bvalues , p [0 ], p [1 ], p [2 ], 1 ) - dw_data ) ** 2 )) # 0.5*sum simplified
534
-
535
-
536
536
def neg_log_posterior (p , bvalues , dw_data , neg_log_prior ):
537
537
"""
538
538
This function determines the negative of the log of the likelihood of parameters p, given the prior likelihood and the data
@@ -545,6 +545,39 @@ def neg_log_posterior(p, bvalues, dw_data, neg_log_prior):
545
545
return neg_log_likelihood (p , bvalues , dw_data ) + neg_log_prior (p )
546
546
547
547
548
+ def flat_neg_log_prior (Dt_range , Fp_range , Dp_range , S0_range = None ):
549
+ """
550
+ This function determines the negative of the log of the empirical prior probability of the IVIM parameters
551
+ :param Dt0: 1D Array with the initial D estimates
552
+ :param Dt0: 1D Array with the initial f estimates
553
+ :param Dt0: 1D Array with the initial D* estimates
554
+ :param Dt0: 1D Array with the initial S0 estimates (optional)
555
+ """
556
+ def neg_log_prior (p ):
557
+ # depends on whether S0 is fitted or not
558
+ if len (p ) == 4 :
559
+ Dt , Fp , Dp , S0 = p [0 ], p [1 ], p [2 ], p [3 ]
560
+ else :
561
+ Dt , Fp , Dp = p [0 ], p [1 ], p [2 ]
562
+ # make D*<D very unlikely
563
+ if (Dp < Dt ):
564
+ return 1e3
565
+ else :
566
+ # determine and return the prior for D, f and D* (and S0)
567
+ if len (p ) == 4 :
568
+ if Dt_range [0 ] < Dt < Dt_range [1 ] and Fp_range [0 ] < Fp < Fp_range [1 ] and Dp_range [0 ] < Dp < Dp_range [1 ]: # and S0_range[0] < S0 < S0_range[1]: << not sure whether this helps. Technically it should be here
569
+ return 0
570
+ else :
571
+ return 1e3
572
+ else :
573
+ if Dt_range [0 ] < Dt < Dt_range [1 ] and Fp_range [0 ] < Fp < Fp_range [1 ] and Dp_range [0 ] < Dp < Dp_range [1 ]:
574
+ return 0
575
+ else :
576
+ return 1e3
577
+
578
+ return neg_log_prior
579
+
580
+
548
581
def fit_bayesian_array (bvalues , dw_data , paramslsq , arg ):
549
582
"""
550
583
This is an implementation of the Bayesian IVIM fit for arrays. The fit is taken from Barbieri et al. which was
@@ -563,7 +596,6 @@ def fit_bayesian_array(bvalues, dw_data, paramslsq, arg):
563
596
:return Dp: Array with Dp in each voxel
564
597
:return S0: Array with S0 in each voxel
565
598
"""
566
- arg = checkarg_lsq (arg )
567
599
# fill out missing args
568
600
Dt0 , Fp0 , Dp0 , S00 = paramslsq
569
601
# determine prior
@@ -606,7 +638,7 @@ def parfun(i):
606
638
return Dt_pred , Fp_pred , Dp_pred , S0_pred
607
639
608
640
609
- def fit_bayesian (bvalues , dw_data , neg_log_prior , x0 = [0.001 , 0.2 , 0.05 ], fitS0 = True ):
641
+ def fit_bayesian (bvalues , dw_data , neg_log_prior , x0 = [0.001 , 0.2 , 0.05 , 1 ], fitS0 = True ):
610
642
'''
611
643
This is an implementation of the Bayesian IVIM fit. It returns the Maximum a posterior probability.
612
644
The fit is taken from Barbieri et al. which was initially introduced in http://arxiv.org/10.1002/mrm.25765 and
@@ -623,7 +655,7 @@ def fit_bayesian(bvalues, dw_data, neg_log_prior, x0=[0.001, 0.2, 0.05], fitS0=T
623
655
'''
624
656
try :
625
657
# define fit bounds
626
- bounds = [(0 , 0.005 ), (0 , 0.7 ), (0.005 , 0. 2 ), (0 , 2.5 )]
658
+ bounds = [(0 , 0.005 ), (0 , 1.5 ), (0 , 2 ), (0 , 2.5 )]
627
659
# Find the Maximum a posterior probability (MAP) by minimising the negative log of the posterior
628
660
if fitS0 :
629
661
params = minimize (neg_log_posterior , x0 = x0 , args = (bvalues , dw_data , neg_log_prior ), bounds = bounds )
@@ -704,7 +736,7 @@ def goodness_of_fit(bvalues, Dt, Fp, Dp, S0, dw_data, Fp2=None, Dp2=None):
704
736
# plt.ion()
705
737
# plt.show()
706
738
# print(R2[vox])
707
- return R2 , adjust
739
+ return R2 , adjusted_R2
708
740
# ed_R2
709
741
710
742
def MSE (bvalues , Dt , Fp , Dp , S0 , dw_data ):
0 commit comments