-
Notifications
You must be signed in to change notification settings - Fork 429
Differentiating mvnormal #1554
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Differentiating mvnormal #1554
Changes from 13 commits
789ad0a
24861ca
6e6c029
2bcc217
1e9571b
ac0995e
522c13e
6529a4a
4e4d982
d398140
661de16
3c5007c
1f18e67
b60147d
375cad8
c34a3ed
2d96680
f32c223
b225298
cf1242a
e2846e8
6bc85da
e303416
e21506a
fd272ae
8b7d451
53601fe
2c75061
6d88e4d
1f00a3b
8ebf419
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -253,7 +253,7 @@ Base.show(io::IO, d::MvNormal) = | |
length(d::MvNormal) = length(d.μ) | ||
mean(d::MvNormal) = d.μ | ||
params(d::MvNormal) = (d.μ, d.Σ) | ||
@inline partype(d::MvNormal{T}) where {T<:Real} = T | ||
@inline partype(::MvNormal{T}) where {T<:Real} = T | ||
|
||
var(d::MvNormal) = diag(d.Σ) | ||
cov(d::MvNormal) = Matrix(d.Σ) | ||
|
@@ -372,7 +372,7 @@ struct MvNormalStats <: SufficientStats | |
tw::Float64 # total sample weight | ||
end | ||
|
||
function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}) | ||
function suffstats(::Type{MvNormal}, x::AbstractMatrix{Float64}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you revert non-CR changes? It seems not only unused names were removed but also some types and dispatches changed, creating slight inconsistencies and related to the open issue about type parameters in |
||
d = size(x, 1) | ||
n = size(x, 2) | ||
s = vec(sum(x, dims=2)) | ||
|
@@ -382,7 +382,7 @@ function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}) | |
MvNormalStats(s, m, s2, Float64(n)) | ||
end | ||
|
||
function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
function suffstats(::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
d = size(x, 1) | ||
n = size(x, 2) | ||
length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions.")) | ||
|
@@ -410,13 +410,13 @@ end | |
# each kind of covariance | ||
# | ||
|
||
fit_mle(D::Type{MvNormal}, ss::MvNormalStats) = fit_mle(FullNormal, ss) | ||
fit_mle(D::Type{MvNormal}, x::AbstractMatrix{Float64}) = fit_mle(FullNormal, x) | ||
fit_mle(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractArray{Float64}) = fit_mle(FullNormal, x, w) | ||
fit_mle(::Type{MvNormal}, ss::MvNormalStats) = fit_mle(FullNormal, ss) | ||
fit_mle(::Type{MvNormal}, x::AbstractMatrix{Float64}) = fit_mle(FullNormal, x) | ||
fit_mle(::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractArray{Float64}) = fit_mle(FullNormal, x, w) | ||
|
||
fit_mle(D::Type{FullNormal}, ss::MvNormalStats) = MvNormal(ss.m, ss.s2 * inv(ss.tw)) | ||
fit_mle(::Type{<:FullNormal}, ss::MvNormalStats) = MvNormal(ss.m, ss.s2 * inv(ss.tw)) | ||
|
||
function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}) | ||
function fit_mle(::Type{FullNormal}, x::AbstractMatrix{Float64}) | ||
n = size(x, 2) | ||
mu = vec(mean(x, dims=2)) | ||
z = x .- mu | ||
|
@@ -425,7 +425,7 @@ function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}) | |
MvNormal(mu, PDMat(C)) | ||
end | ||
|
||
function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
function fit_mle(::Type{<:FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
m = size(x, 1) | ||
n = size(x, 2) | ||
length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) | ||
|
@@ -445,7 +445,7 @@ function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVec | |
MvNormal(mu, PDMat(C)) | ||
end | ||
|
||
function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) | ||
function fit_mle(::Type{DiagNormal}, x::AbstractMatrix{Float64}) | ||
m = size(x, 1) | ||
n = size(x, 2) | ||
|
||
|
@@ -460,7 +460,7 @@ function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) | |
MvNormal(mu, PDiagMat(va)) | ||
end | ||
|
||
function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
function fit_mle(::Type{<:DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
m = size(x, 1) | ||
n = size(x, 2) | ||
length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) | ||
|
@@ -479,7 +479,7 @@ function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVec | |
MvNormal(mu, PDiagMat(va)) | ||
end | ||
|
||
function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}) | ||
function fit_mle(::Type{IsoNormal}, x::AbstractMatrix{Float64}) | ||
m = size(x, 1) | ||
n = size(x, 2) | ||
|
||
|
@@ -495,7 +495,7 @@ function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}) | |
MvNormal(mu, ScalMat(m, va / (m * n))) | ||
end | ||
|
||
function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
function fit_mle(::Type{<:IsoNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) | ||
m = size(x, 1) | ||
n = size(x, 2) | ||
length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) | ||
|
@@ -515,3 +515,57 @@ function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}, w::AbstractVect | |
end | ||
MvNormal(mu, ScalMat(m, va / (m * sw))) | ||
end | ||
|
||
## Differentiation | ||
|
||
function ChainRulesCore.frule((_, Δd)::Tuple{Any,Any}, ::typeof(mvnormal_c0), d::MvNormal) | ||
y = mvnormal_c0(d) | ||
Δd = ChainRulesCore.unthunk(Δd) | ||
Δy = -dot(Δd.Σ, invcov(d)) / 2 | ||
return y, Δy | ||
end | ||
|
||
function ChainRulesCore.rrule(::typeof(mvnormal_c0), d::MvNormal) | ||
y = mvnormal_c0(d) | ||
function mvnormal_c0_pullback(dy) | ||
dy = ChainRulesCore.unthunk(dy) | ||
∂Σ = -dy/2 * invcov(d) | ||
matbesancon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
∂d = ChainRulesCore.Tangent{typeof(d)}(μ = ChainRulesCore.ZeroTangent(), Σ = ∂Σ) | ||
matbesancon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return ChainRulesCore.NoTangent(), ∂d | ||
end | ||
return y, mvnormal_c0_pullback | ||
end | ||
|
||
function ChainRulesCore.frule(dargs::Tuple{Any,Any,Any}, ::typeof(sqmahal), d::MvNormal, x::AbstractVector) | ||
y = sqmahal(d, x) | ||
(_, Δd, Δx) = dargs | ||
Δd = ChainRulesCore.unthunk(Δd) | ||
Δx = ChainRulesCore.unthunk(Δx) | ||
Σinv = invcov(d) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we avoid computing the inverse? |
||
# TODO optimize | ||
dΣ = -dot(Σinv * Δd.Σ * Σinv, x * x' - d.μ * x' - x * d.μ' + d.μ * d.μ') | ||
dx = 2 * dot(Σinv * (x - d.μ), Δx) | ||
dμ = 2 * dot(Σinv * (d.μ - x), Δd.μ) | ||
Δy = dΣ + dx + dμ | ||
matbesancon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return (y, Δy) | ||
end | ||
|
||
function ChainRulesCore.rrule(::typeof(sqmahal), d::MvNormal, x::AbstractVector) | ||
y = sqmahal(d, x) | ||
function sqmahal_pullback(dy) | ||
Σinv = invcov(d) | ||
dy = ChainRulesCore.unthunk(dy) | ||
∂x = ChainRulesCore.@thunk(begin | ||
2dy * Σinv * (x - d.μ) | ||
end) | ||
∂d = ChainRulesCore.@thunk(begin | ||
cx = x - d.μ | ||
∂μ = -2dy * Σinv * cx | ||
∂J = dy * cx * cx' | ||
∂Σ = - Σinv * ∂J * Σinv | ||
ChainRulesCore.Tangent{typeof(d)}(μ = ∂μ, Σ = ∂Σ) | ||
end) | ||
matbesancon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return (ChainRulesCore.NoTangent(), ∂d, ∂x) | ||
end | ||
return y, sqmahal_pullback | ||
end |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe move unrelated changes to a separate PR?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
there were all relatively minor things (unused variables)