Skip to content

Commit e030c14

Browse files
MATLAB code contribution
1 parent a36245f commit e030c14

File tree

3 files changed

+761
-0
lines changed

3 files changed

+761
-0
lines changed

src/original/OJ_GU/IVIM_bayes.m

Lines changed: 398 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,398 @@
1+
function out = IVIM_bayes(Y,f,D,Dstar,S0,b,lim,n,rician,prior,burns,meanonly)
2+
% out = IVIM_bayes(Y,f,D,Dstar,S0,b,N,lim,rician,prior,burns,meanonly)
3+
%
4+
% Input:
5+
% Y: a VxB matrix containing all image data where V is the number of
6+
% voxels in the analysis ROI and B is the number of b-values
7+
% f: a Vx1 vector containg the perfusion fraction parameters for all
8+
% voxels from a least squares fit
9+
% D: a Vx1 vector containg the diffusion parameters for all voxels
10+
% from a least squares fit
11+
% Dstar: a Vx1 vector containg the pseudo diffusion parameters for
12+
% all voxels from a least squares fit
13+
% b: a 1xB vector containing all b-values
14+
% lim:a 2x4 or 2x5 matrix with lower (1st row) and upper (2nd row)
15+
% limits of all parameters in the order f,D,D*,S0,noisevar
16+
% n: number of iterations after "burn in" (default: 10000)
17+
% rician: scalar boolean. If true Rician noise distribution is used,
18+
% else Gaussian noise distribution is used (default: false)
19+
% prior: cell of size equal to number of estimated parameters
20+
% the cells should contain string equal to 'flat', 'reci' or
21+
% 'lognorm' to define the prior disitribution for the
22+
% corresponding parameter (default: {'flat','flat','flat','flat','reci'}
23+
% 'flat' = uniform, 'reci' = reciprocal, 'lognorm' = lognormal
24+
% lognormal is only available for D and D*
25+
% burns: number of burn-in steps (default: 1000)
26+
% meanonly: scalar boolean. if true, only the posterior mean and std
27+
% are estimated (substantially more memory efficient and
28+
% therefore faster) (default: false)
29+
%
30+
% Output:
31+
% out: a struct with the fields D, f, Dstar and S0(Vx1) containing the
32+
% voxelwise mean, median, mode and standard deviation of each parameter
33+
%
34+
% By Oscar Jalnefjord 2018-08-27
35+
%
36+
% If you use this function in research, please cite:
37+
% Gustafsson et al. 2017 Impact of prior distributions and central tendency
38+
% measures on Bayesian intravoxel incoherent motion model fitting, MRM
39+
40+
%%%%%%%%%%%%%%%%%%
41+
% Error handling %
42+
%%%%%%%%%%%%%%%%%%
43+
if ~isrow(b)
44+
b = b';
45+
if ~isrow(b)
46+
error('b must be a row vector');
47+
end
48+
end
49+
B = length(b);
50+
51+
if size(Y,2) ~= B
52+
Y = Y';
53+
if size(Y,2) ~= B
54+
error('Y must have same number of columns as b');
55+
end
56+
end
57+
V = size(Y,1);
58+
59+
if isvector(f) && length(f) > 1
60+
if ~isequal([V 1],size(f))
61+
f = f';
62+
if ~isequal([V 1],size(f))
63+
error('f must be a column vector with the same number of rows as Y or a scalar');
64+
end
65+
end
66+
elseif isscalar(f)
67+
f = f*ones(V,1);
68+
else
69+
error('f must be a column vector or a scalar');
70+
end
71+
72+
if isvector(D) && length(D) > 1
73+
if ~isequal([V 1],size(D))
74+
D = D';
75+
if ~isequal([V 1],size(D))
76+
error('D must be a column vector with the same number of rows as Y or a scalar');
77+
end
78+
end
79+
elseif isscalar(D)
80+
D = D*ones(V,1);
81+
else
82+
error('D must be a column vector or a scalar');
83+
end
84+
85+
if isvector(Dstar) && length(Dstar) > 1
86+
if ~isequal([V 1],size(Dstar))
87+
Dstar = Dstar';
88+
if ~isequal([V 1],size(Dstar))
89+
error('Dstar must be a column vector with the same number of rows as Y or a scalar');
90+
end
91+
end
92+
elseif isscalar(Dstar)
93+
Dstar = Dstar*ones(V,1);
94+
else
95+
error('Dstar must be a column vector or a scalar');
96+
end
97+
98+
if isvector(S0) && length(S0) > 1
99+
if ~isequal([V 1],size(S0))
100+
S0 = S0';
101+
if ~isequal([V 1],size(S0))
102+
error('S0 must be a column vector with the same number of rows as Y or a scalar');
103+
end
104+
end
105+
elseif isscalar(S0)
106+
S0 = S0*ones(V,1);
107+
else
108+
error('S0 must be a column vector or a scalar');
109+
end
110+
111+
% N = number of iterations
112+
if nargin < 8
113+
n = 10000;
114+
else
115+
if ~(isscalar(n) && n > 0 && (round(n) == n) && isreal(n))
116+
error('N must be a positive scalar integer');
117+
end
118+
end
119+
120+
if nargin < 9
121+
rician = false; % use rician noise distribution
122+
end
123+
124+
if nargin < 10
125+
prior = {'flat','flat','flat','flat','reci'}; % use flat prior distributions
126+
end
127+
128+
% lim
129+
if rician
130+
limsz = [2,5];
131+
else
132+
limsz = [2,4];
133+
end
134+
if ~isequal(size(lim),limsz)
135+
error('lim must be 2x%d',limsz(2));
136+
end
137+
if rician && ~isequal(size(lim),[2,5])
138+
error('noise variance must be estimated for Rician noise distribution');
139+
end
140+
141+
% burn-in steps
142+
if nargin < 11
143+
burns = 1000;
144+
end
145+
146+
% mean only
147+
if nargin < 12
148+
meanonly = false;
149+
end
150+
151+
%%%%%%%%%%%%%%%%%%%%%%%%%
152+
% Parameter preparation %
153+
%%%%%%%%%%%%%%%%%%%%%%%%%
154+
M = length(f);
155+
nbs = length(b);
156+
157+
if rician
158+
% startvalue for variance
159+
is2 = nbs./sum((Y-repmat(S0,1,nbs).*((1-repmat(f,1,nbs)).*exp(-D*b) + ...
160+
repmat(f,1,nbs).*exp(-(D+Dstar)*b))).^2,2); %1/s2
161+
end
162+
163+
if meanonly
164+
voxelsPerRun = V;
165+
else
166+
voxelsPerRun = min(V,1500); % to avoid filling the memory for N > 40,000
167+
end
168+
169+
if meanonly
170+
thetasum = zeros(M,4+rician);
171+
theta2sum = zeros(M,4+rician);
172+
end
173+
174+
% burn-in parameters
175+
burnUpdateInterval = 100;
176+
burnUpdateFraction = 1/2;
177+
178+
%%%%%%%%%%%%%%%%%%%%%%%%
179+
% Parameter estimation %
180+
%%%%%%%%%%%%%%%%%%%%%%%%
181+
for i = 1:ceil(M/voxelsPerRun)
182+
% partition voxels
183+
usedVoxels = (i-1)*voxelsPerRun + 1:min(i*voxelsPerRun,M);
184+
185+
fpart = f(usedVoxels);
186+
Dpart = D(usedVoxels);
187+
Dstarpart = Dstar(usedVoxels);
188+
S0part = S0(usedVoxels);
189+
if rician
190+
is2part = is2(usedVoxels);
191+
end
192+
Ypart = Y(usedVoxels,:);
193+
Mpart = min(i*voxelsPerRun,M) - (i-1)*voxelsPerRun;
194+
195+
% initialize parameter vector
196+
if meanonly
197+
theta = zeros(Mpart,4+rician,2);
198+
else
199+
theta = zeros(Mpart,4+rician,n+burns);
200+
end
201+
theta(:,1:4,1) = [fpart, Dpart, Dstarpart,S0part];
202+
if rician
203+
theta(:,5,1) = is2part;
204+
end
205+
206+
207+
% step length parameter
208+
w = zeros(Mpart,4+rician);
209+
w(:,1:4) = [fpart Dpart Dstarpart S0part]/10;
210+
if rician
211+
w(:,5) = 0.01*ones(Mpart,1);
212+
end
213+
214+
N = zeros(Mpart,4+rician); % number of accepted samples
215+
216+
% iterate for j = 2,3,...,n
217+
for j = 2:n + burns
218+
% initialize theta(j)
219+
if meanonly
220+
theta(:,:,2) = theta(:,:,1);
221+
thetanew = theta(:,:,2);
222+
thetaold = theta(:,:,1);
223+
else
224+
theta(:,:,j) = theta(:,:,j-1);
225+
thetanew = theta(:,:,j);
226+
thetaold = theta(:,:,j-1);
227+
end
228+
229+
% sample each parameter
230+
for k = 1:4+rician
231+
% sample s and r and update
232+
s = thetaold(:,k) + randn(Mpart,1).*w(:,k);
233+
r = rand(Mpart,1);
234+
235+
thetas = thetanew;
236+
thetas(:,k) = s;
237+
alpha = acc_MH(thetas,thetanew,Ypart,b,lim,rician,prior{k});
238+
sample_ok = r < alpha;
239+
thetanew(sample_ok,k) = thetas(sample_ok,k);
240+
thetanew(~sample_ok,k) = thetaold(~sample_ok,k); % reject samples
241+
N(:,k) = N(:,k) + sample_ok;
242+
end
243+
244+
% prepare for next iteration
245+
if meanonly
246+
theta(:,:,1) = thetanew;
247+
else
248+
theta(:,:,j) = thetanew;
249+
end
250+
251+
% save parameter value after burn-in phase
252+
if meanonly && j > burns
253+
thetasum = thetasum + thetanew;
254+
theta2sum = theta2sum + thetanew.^2;
255+
end
256+
257+
% adapt step length
258+
if j <= burns*burnUpdateFraction && mod(j,burnUpdateInterval) == 0
259+
w = w*(burnUpdateInterval+1)./(2*((burnUpdateInterval+1)-N));
260+
N = zeros(Mpart,4+rician);
261+
end
262+
263+
264+
% Display iteration every 500th iteration
265+
if ~mod(j,500) && j > burns
266+
disp(['Iterations: ' num2str(j-burns)]);
267+
elseif ~mod(j,100) && j < burns
268+
disp(['Burn in-steps: ' num2str(j)]);
269+
elseif j == burns
270+
disp(['Burn in complete: ' num2str(j)]);
271+
end
272+
end
273+
274+
% Saves distribution measures
275+
if meanonly
276+
%mean
277+
out.f.mean(usedVoxels) = thetasum(:,1)/n;
278+
out.D.mean(usedVoxels) = thetasum(:,2)/n;
279+
out.Dstar.mean(usedVoxels) = thetasum(:,3)/n;
280+
out.S0.mean(usedVoxels) = thetasum(:,4)/n;
281+
282+
% standard deviation
283+
out.f.std(usedVoxels) = sqrt(theta2sum(:,1)/n-(thetasum(:,1)/n).^2);
284+
out.D.std(usedVoxels) = sqrt(theta2sum(:,2)/n-(thetasum(:,2)/n).^2);
285+
out.Dstar.std(usedVoxels) = sqrt(theta2sum(:,3)/n-(thetasum(:,3)/n).^2);
286+
out.S0.std(usedVoxels) = sqrt(theta2sum(:,4)/n-(thetasum(:,4)/n).^2);
287+
else
288+
%mean
289+
out.f.mean(usedVoxels) = mean(squeeze(theta(:,1,burns + 1:n+burns)),2);
290+
out.D.mean(usedVoxels) = mean(squeeze(theta(:,2,burns + 1:n+burns)),2);
291+
out.Dstar.mean(usedVoxels) = mean(squeeze(theta(:,3,burns + 1:n+burns)),2);
292+
out.S0.mean(usedVoxels) = mean(squeeze(theta(:,4,burns + 1:n+burns)),2);
293+
294+
%median
295+
out.f.median(usedVoxels) = median(squeeze(theta(:,1,burns + 1:n+burns)),2);
296+
out.D.median(usedVoxels) = median(squeeze(theta(:,2,burns + 1:n+burns)),2);
297+
out.Dstar.median(usedVoxels) = median(squeeze(theta(:,3,burns + 1:n+burns)),2);
298+
out.S0.median(usedVoxels) = median(squeeze(theta(:,4,burns + 1:n+burns)),2);
299+
300+
% mode
301+
out.f.mode(usedVoxels) = halfSampleMode(squeeze(theta(:,1,burns + 1:n+burns)));
302+
out.D.mode(usedVoxels) = halfSampleMode(squeeze(theta(:,2,burns + 1:n+burns)));
303+
out.Dstar.mode(usedVoxels) = halfSampleMode(squeeze(theta(:,3,burns + 1:n+burns)));
304+
out.S0.mode(usedVoxels) = halfSampleMode(squeeze(theta(:,4,burns + 1:n+burns)));
305+
306+
% standard deviation
307+
out.f.std(usedVoxels) = std(squeeze(theta(:,1,burns + 1:n+burns)),1,2);
308+
out.D.std(usedVoxels) = std(squeeze(theta(:,2,burns + 1:n+burns)),1,2);
309+
out.Dstar.std(usedVoxels) = std(squeeze(theta(:,3,burns + 1:n+burns)),1,2);
310+
out.S0.std(usedVoxels) = std(squeeze(theta(:,4,burns + 1:n+burns)),1,2);
311+
end
312+
end
313+
314+
315+
function alpha = acc_MH(thetas,thetaj,Y,b,lim,rician,prior)
316+
% theta = [f, D, Dstar,S0,1/s2];
317+
M = size(thetas,1);
318+
N = length(b);
319+
320+
q = zeros(M,1);
321+
322+
% p(theta|lim)
323+
pts = min((thetas >= repmat(lim(1,:),M,1)) & (thetas <= repmat(lim(2,:),M,1)),[],2);
324+
325+
% D < D*
326+
pts = pts & (thetas(:,2) < thetas(:,3));
327+
328+
% signal model
329+
Ss = repmat(thetas(pts,4),1,N).*((1-repmat(thetas(pts,1),1,N)).*exp(-thetas(pts,2)*b) + repmat(thetas(pts,1),1,N).*exp(-thetas(pts,3)*b));
330+
Sj = repmat(thetaj(pts,4),1,N).*((1-repmat(thetaj(pts,1),1,N)).*exp(-thetaj(pts,2)*b) + repmat(thetaj(pts,1),1,N).*exp(-thetaj(pts,3)*b));
331+
ptsptj = double(pts);
332+
333+
if strcmp(prior,'reci')
334+
diffpar = find(thetas(1,:) ~= thetaj(1,:));
335+
ptsptj(pts) = thetaj(pts,diffpar)./thetas(pts,diffpar); % rejects samples outside the limits % ~pts already == 0
336+
337+
elseif strcmp(prior,'lognorm')
338+
diffpar = find(thetas(1,:) ~= thetaj(1,:));
339+
340+
% ptsptj = pts;
341+
if diffpar == 2
342+
mu = -6;
343+
s = 1;
344+
elseif diffpar == 3
345+
mu = -3.5;
346+
s = 1;
347+
else
348+
error('lognorm prior not available'); % only for D and D*
349+
end
350+
ptsptj(pts) = lognormprior(thetas(pts,diffpar),mu,s)./lognormprior(thetaj(pts,diffpar),mu,s); % ~pts already == 0
351+
elseif ~strcmp(prior,'flat')
352+
error('unknown prior');
353+
end
354+
355+
if rician % rician noise distribution
356+
q(pts) = exp(-N*log(thetaj(pts,5))+sum(Y(pts,:).^2,2).*0.5.*thetaj(pts,5)+sum(Sj.^2,2).*0.5.*thetaj(pts,5)-sum(logIo(Y(pts,:).*Sj.*repmat(thetaj(pts,5),1,N)),2)...
357+
+N*log(thetas(pts,5))-sum(Y(pts,:).^2,2).*0.5.*thetas(pts,5)-sum(Ss.^2,2).*0.5.*thetas(pts,5)+sum(logIo(Y(pts,:).*Ss.*repmat(thetas(pts,5),1,N)),2)) .* ptsptj(pts);
358+
else % gaussian noise distribution
359+
q(pts) = (sum((Y(pts,:)-Sj).^2,2)./sum((Y(pts,:)-Ss).^2,2)).^(N/2) .* ptsptj(pts);
360+
end
361+
362+
alpha = min(1,q);
363+
364+
365+
function p = lognormprior(x,mu,s)
366+
p = 1./(s*sqrt(2*pi)*x).*exp(-(log(x)-mu).^2/(2*s^2));
367+
368+
function y = logIo(x)
369+
% Returns the natural log of the 0th order modified Bessel function of first kind for an argument x
370+
% Follows the exponential implementation of the Bessel function in Numerical Recipes, Ch. 6
371+
%
372+
% Translated to MATLAB from C++ from the FSL source code and vectorized for
373+
% faster computations in MATLAB
374+
375+
b = abs(x);
376+
377+
y = zeros(size(x));
378+
379+
a1 = (x(b < 3.75)/3.75).^2;
380+
a2 = 3.75./b(b >= 3.75);
381+
y(b < 3.75) = log(1.0 + a1.*(3.5156229 + a1.*(3.0899424 + a1.*(1.2067492 + a1.*(0.2659732 + a1.*(0.0360768 + a1.*0.0045813))))));
382+
y(b >= 3.75) = b(b >= 3.75) + log((0.39894228+a2.*(0.01328592+a2.*(0.00225319+a2.*(-0.00157565+a2.*(0.00916281+a2.*(-0.02057706+a2.*(0.02635537+a2.*(-0.01647633+a2.*0.00392377))))))))./sqrt(b(b>=3.75)));
383+
384+
385+
386+
387+
388+
389+
390+
391+
392+
393+
394+
395+
396+
397+
398+

0 commit comments

Comments
 (0)