From 572590d5a7b752edd8279c034893a128c8d961b0 Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 16:37:03 +0200 Subject: [PATCH 1/8] executed JuliaFormatter.format(".") --- docs/make.jl | 10 +- .../eachvariate.jl | 5 +- .../multivariate/dirichlet.jl | 58 +- .../univariate/continuous/uniform.jl | 4 +- .../univariate/discrete/negativebinomial.jl | 15 +- .../univariate/discrete/poissonbinomial.jl | 4 +- ext/DistributionsDensityInterfaceExt.jl | 23 +- ext/DistributionsTestExt.jl | 26 +- perf/mixtures.jl | 47 +- perf/samplers.jl | 34 +- src/Distributions.jl | 31 +- src/censored.jl | 75 +- src/cholesky/lkjcholesky.jl | 42 +- src/common.jl | 114 +- src/convolution.jl | 18 +- src/deprecates.jl | 81 +- src/eachvariate.jl | 11 +- src/edgeworth.jl | 96 +- src/functionals.jl | 17 +- src/genericfit.jl | 19 +- src/genericrand.jl | 17 +- src/matrix/inversewishart.jl | 24 +- src/matrix/lkj.jl | 69 +- src/matrix/matrixbeta.jl | 38 +- src/matrix/matrixfdist.jl | 47 +- src/matrix/matrixnormal.jl | 74 +- src/matrix/matrixtdist.jl | 94 +- src/matrix/wishart.jl | 43 +- src/matrixvariates.jl | 18 +- src/mixtures/mixturemodel.jl | 117 +- src/mixtures/unigmm.jl | 20 +- src/multivariate/dirichlet.jl | 119 +- src/multivariate/dirichletmultinomial.jl | 46 +- src/multivariate/jointorderstatistics.jl | 12 +- src/multivariate/multinomial.jl | 80 +- src/multivariate/mvlogitnormal.jl | 8 +- src/multivariate/mvlognormal.jl | 174 ++- src/multivariate/mvnormal.jl | 121 +- src/multivariate/mvnormalcanon.jl | 108 +- src/multivariate/mvtdist.jl | 87 +- src/multivariate/product.jl | 26 +- src/multivariate/vonmisesfisher.jl | 24 +- src/multivariates.jl | 39 +- src/namedtuple/productnamedtuple.jl | 11 +- src/pdfnorm.jl | 10 +- src/product.jl | 75 +- src/qq.jl | 6 +- src/quantilealgs.jl | 94 +- src/reshaped.jl | 29 +- src/samplers.jl | 27 +- src/samplers/aliastable.jl | 2 +- src/samplers/binomial.jl | 116 +- src/samplers/categorical.jl | 8 +- src/samplers/discretenonparametric.jl | 14 +- src/samplers/gamma.jl | 102 +- src/samplers/multinomial.jl | 13 +- src/samplers/obsoleted.jl | 57 +- src/samplers/poisson.jl | 30 +- src/samplers/productnamedtuple.jl | 4 +- src/samplers/vonmisesfisher.jl | 6 +- src/show.jl | 2 +- src/statsapi.jl | 16 +- src/test_utils.jl | 2 +- src/truncate.jl | 63 +- src/truncated/discrete_uniform.jl | 2 +- src/truncated/exponential.jl | 6 +- src/truncated/loguniform.jl | 3 +- src/truncated/normal.jl | 11 +- src/truncated/uniform.jl | 2 +- src/univariate/continuous/arcsine.jl | 26 +- src/univariate/continuous/beta.jl | 101 +- src/univariate/continuous/betaprime.jl | 35 +- src/univariate/continuous/biweight.jl | 43 +- src/univariate/continuous/cauchy.jl | 24 +- src/univariate/continuous/chernoff.jl | 306 ++--- src/univariate/continuous/chi.jl | 32 +- src/univariate/continuous/chisq.jl | 19 +- src/univariate/continuous/cosine.jl | 22 +- src/univariate/continuous/epanechnikov.jl | 40 +- src/univariate/continuous/erlang.jl | 25 +- src/univariate/continuous/exponential.jl | 22 +- src/univariate/continuous/fdist.jl | 42 +- src/univariate/continuous/frechet.jl | 37 +- src/univariate/continuous/gamma.jl | 40 +- .../continuous/generalizedextremevalue.jl | 85 +- .../continuous/generalizedpareto.jl | 33 +- src/univariate/continuous/gumbel.jl | 20 +- src/univariate/continuous/inversegamma.jl | 35 +- src/univariate/continuous/inversegaussian.jl | 33 +- src/univariate/continuous/johnsonsu.jl | 21 +- src/univariate/continuous/kolmogorov.jl | 77 +- src/univariate/continuous/ksdist.jl | 80 +- src/univariate/continuous/ksonesided.jl | 10 +- src/univariate/continuous/kumaraswamy.jl | 5 +- src/univariate/continuous/laplace.jl | 36 +- src/univariate/continuous/levy.jl | 30 +- src/univariate/continuous/lindley.jl | 12 +- src/univariate/continuous/logistic.jl | 20 +- src/univariate/continuous/logitnormal.jl | 22 +- src/univariate/continuous/lognormal.jl | 29 +- src/univariate/continuous/loguniform.jl | 28 +- src/univariate/continuous/noncentralbeta.jl | 8 +- src/univariate/continuous/noncentralchisq.jl | 22 +- src/univariate/continuous/noncentralf.jl | 21 +- src/univariate/continuous/noncentralt.jl | 25 +- src/univariate/continuous/normal.jl | 57 +- src/univariate/continuous/normalcanon.jl | 34 +- .../continuous/normalinversegaussian.jl | 40 +- src/univariate/continuous/pareto.jl | 36 +- .../continuous/pgeneralizedgaussian.jl | 18 +- src/univariate/continuous/rayleigh.jl | 27 +- src/univariate/continuous/rician.jl | 33 +- src/univariate/continuous/semicircle.jl | 7 +- .../continuous/skewedexponentialpower.jl | 80 +- src/univariate/continuous/skewnormal.jl | 45 +- src/univariate/continuous/studentizedrange.jl | 14 +- src/univariate/continuous/symtriangular.jl | 45 +- src/univariate/continuous/tdist.jl | 37 +- src/univariate/continuous/triangular.jl | 44 +- src/univariate/continuous/triweight.jl | 51 +- src/univariate/continuous/uniform.jl | 32 +- src/univariate/continuous/vonmises.jl | 21 +- src/univariate/continuous/weibull.jl | 55 +- src/univariate/discrete/bernoulli.jl | 40 +- src/univariate/discrete/bernoullilogit.jl | 29 +- src/univariate/discrete/betabinomial.jl | 33 +- src/univariate/discrete/binomial.jl | 70 +- src/univariate/discrete/categorical.jl | 102 +- src/univariate/discrete/dirac.jl | 2 +- .../discrete/discretenonparametric.jl | 106 +- src/univariate/discrete/discreteuniform.jl | 9 +- src/univariate/discrete/geometric.jl | 33 +- src/univariate/discrete/hypergeometric.jl | 19 +- src/univariate/discrete/negativebinomial.jl | 42 +- .../discrete/noncentralhypergeometric.jl | 144 ++- src/univariate/discrete/poisson.jl | 29 +- src/univariate/discrete/poissonbinomial.jl | 62 +- src/univariate/discrete/skellam.jl | 31 +- src/univariate/discrete/soliton.jl | 13 +- src/univariate/locationscale.jl | 69 +- src/univariate/orderstatistic.jl | 5 +- src/univariates.jl | 148 ++- src/utils.jl | 33 +- test/censored.jl | 132 ++- test/cholesky/lkjcholesky.jl | 49 +- test/convolution.jl | 59 +- test/density_interface.jl | 9 +- test/eachvariate.jl | 2 +- test/edgecases.jl | 6 +- test/edgeworth.jl | 57 +- test/fit.jl | 120 +- test/functionals.jl | 46 +- test/gradlogpdf.jl | 40 +- test/matrixvariates.jl | 1004 +++++++++-------- test/mixture.jl | 136 ++- test/multivariate/dirichlet.jl | 59 +- test/multivariate/dirichletmultinomial.jl | 161 +-- test/multivariate/jointorderstatistics.jl | 42 +- test/multivariate/multinomial.jl | 325 +++--- test/multivariate/mvlogitnormal.jl | 40 +- test/multivariate/mvlognormal.jl | 159 ++- test/multivariate/mvnormal.jl | 111 +- test/multivariate/mvtdist.jl | 148 +-- test/multivariate/product.jl | 152 +-- test/multivariate/vonmisesfisher.jl | 89 +- test/multivariate_stats.jl | 19 +- test/namedtuple/productnamedtuple.jl | 123 +- test/pdfnorm.jl | 26 +- test/product.jl | 97 +- test/qq.jl | 16 +- test/reshaped.jl | 18 +- test/runtests.jl | 2 +- test/samplers.jl | 85 +- test/statsapi.jl | 10 +- test/testutils.jl | 300 +++-- test/truncate.jl | 161 +-- test/truncated/exponential.jl | 21 +- test/truncated/normal.jl | 55 +- test/truncated/uniform.jl | 12 +- test/types.jl | 4 +- test/univariate/continuous.jl | 39 +- test/univariate/continuous/arcsine.jl | 2 +- test/univariate/continuous/chernoff.jl | 290 ++--- test/univariate/continuous/chi.jl | 4 +- test/univariate/continuous/erlang.jl | 4 +- test/univariate/continuous/exponential.jl | 18 +- test/univariate/continuous/gamma.jl | 2 +- test/univariate/continuous/gumbel.jl | 12 +- test/univariate/continuous/inversegaussian.jl | 7 +- test/univariate/continuous/johnsonsu.jl | 16 +- test/univariate/continuous/kolmogorov.jl | 20 +- test/univariate/continuous/kumaraswamy.jl | 12 +- test/univariate/continuous/laplace.jl | 2 +- test/univariate/continuous/lindley.jl | 18 +- test/univariate/continuous/logistic.jl | 4 +- test/univariate/continuous/logitnormal.jl | 44 +- test/univariate/continuous/lognormal.jl | 46 +- test/univariate/continuous/loguniform.jl | 75 +- test/univariate/continuous/noncentralchisq.jl | 2 +- test/univariate/continuous/noncentralt.jl | 2 +- test/univariate/continuous/normal.jl | 154 +-- test/univariate/continuous/pareto.jl | 10 +- .../continuous/pgeneralizedgaussian.jl | 8 +- test/univariate/continuous/rician.jl | 15 +- test/univariate/continuous/semicircle.jl | 38 +- .../continuous/skewedexponentialpower.jl | 60 +- test/univariate/continuous/skewnormal.jl | 2 +- test/univariate/continuous/triangular.jl | 11 +- test/univariate/continuous/uniform.jl | 40 +- test/univariate/continuous/weibull.jl | 3 +- test/univariate/discrete/bernoulli.jl | 6 +- test/univariate/discrete/bernoullilogit.jl | 16 +- test/univariate/discrete/betabinomial.jl | 14 +- test/univariate/discrete/binomial.jl | 79 +- test/univariate/discrete/categorical.jl | 243 ++-- test/univariate/discrete/dirac.jl | 4 +- .../discrete/discretenonparametric.jl | 339 +++--- test/univariate/discrete/discreteuniform.jl | 8 +- test/univariate/discrete/geometric.jl | 4 +- test/univariate/discrete/negativebinomial.jl | 47 +- test/univariate/discrete/poisson.jl | 8 +- test/univariate/discrete/poissonbinomial.jl | 278 ++--- test/univariate/discrete/soliton.jl | 6 +- test/univariate/locationscale.jl | 84 +- test/univariate/orderstatistic.jl | 30 +- test/univariate_bounds.jl | 15 +- test/univariates.jl | 62 +- test/utils.jl | 23 +- test/vonmises.jl | 14 +- 229 files changed, 7020 insertions(+), 5122 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index e6b67f3ea5..23363175df 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -3,10 +3,12 @@ import Random: AbstractRNG, rand! makedocs(; sitename = "Distributions.jl", - modules = [Distributions], - format = Documenter.HTML(; prettyurls = get(ENV, "CI", nothing) == "true", - assets = ["assets/favicon.ico"]), - pages = [ + modules = [Distributions], + format = Documenter.HTML(; + prettyurls = get(ENV, "CI", nothing) == "true", + assets = ["assets/favicon.ico"], + ), + pages = [ "index.md", "starting.md", "types.md", diff --git a/ext/DistributionsChainRulesCoreExt/eachvariate.jl b/ext/DistributionsChainRulesCoreExt/eachvariate.jl index 359a6703eb..4ac0b55956 100644 --- a/ext/DistributionsChainRulesCoreExt/eachvariate.jl +++ b/ext/DistributionsChainRulesCoreExt/eachvariate.jl @@ -1,4 +1,7 @@ -function ChainRulesCore.rrule(::Type{Distributions.EachVariate{V}}, x::AbstractArray{<:Real}) where {V} +function ChainRulesCore.rrule( + ::Type{Distributions.EachVariate{V}}, + x::AbstractArray{<:Real}, +) where {V} y = Distributions.EachVariate{V}(x) size_x = size(x) function EachVariate_pullback(Δ) diff --git a/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl b/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl index 5aa0d727d5..09ae1a5477 100644 --- a/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl +++ b/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl @@ -1,30 +1,54 @@ -function ChainRulesCore.frule((_, Δalpha)::Tuple{Any,Any}, ::Type{DT}, alpha::AbstractVector{T}; check_args::Bool = true) where {T <: Real, DT <: Union{Dirichlet{T}, Dirichlet}} - d = DT(alpha; check_args=check_args) +function ChainRulesCore.frule( + (_, Δalpha)::Tuple{Any,Any}, + ::Type{DT}, + alpha::AbstractVector{T}; + check_args::Bool = true, +) where {T<:Real,DT<:Union{Dirichlet{T},Dirichlet}} + d = DT(alpha; check_args = check_args) ∂alpha0 = sum(Δalpha) digamma_alpha0 = SpecialFunctions.digamma(d.alpha0) - ∂lmnB = sum(Broadcast.instantiate(Broadcast.broadcasted(Δalpha, alpha) do Δalphai, alphai - Δalphai * (SpecialFunctions.digamma(alphai) - digamma_alpha0) - end)) - Δd = ChainRulesCore.Tangent{typeof(d)}(; alpha=Δalpha, alpha0=∂alpha0, lmnB=∂lmnB) + ∂lmnB = sum( + Broadcast.instantiate( + Broadcast.broadcasted(Δalpha, alpha) do Δalphai, alphai + Δalphai * (SpecialFunctions.digamma(alphai) - digamma_alpha0) + end, + ), + ) + Δd = ChainRulesCore.Tangent{typeof(d)}(; alpha = Δalpha, alpha0 = ∂alpha0, lmnB = ∂lmnB) return d, Δd end -function ChainRulesCore.rrule(::Type{DT}, alpha::AbstractVector{T}; check_args::Bool = true) where {T <: Real, DT <: Union{Dirichlet{T}, Dirichlet}} - d = DT(alpha; check_args=check_args) +function ChainRulesCore.rrule( + ::Type{DT}, + alpha::AbstractVector{T}; + check_args::Bool = true, +) where {T<:Real,DT<:Union{Dirichlet{T},Dirichlet}} + d = DT(alpha; check_args = check_args) digamma_alpha0 = SpecialFunctions.digamma(d.alpha0) function Dirichlet_pullback(_Δd) Δd = ChainRulesCore.unthunk(_Δd) - Δalpha = Δd.alpha .+ Δd.alpha0 .+ Δd.lmnB .* (SpecialFunctions.digamma.(alpha) .- digamma_alpha0) + Δalpha = + Δd.alpha .+ Δd.alpha0 .+ + Δd.lmnB .* (SpecialFunctions.digamma.(alpha) .- digamma_alpha0) return ChainRulesCore.NoTangent(), Δalpha end return d, Dirichlet_pullback end -function ChainRulesCore.frule((_, Δd, Δx)::Tuple{Any,Any,Any}, ::typeof(Distributions._logpdf), d::Dirichlet, x::AbstractVector{<:Real}) +function ChainRulesCore.frule( + (_, Δd, Δx)::Tuple{Any,Any,Any}, + ::typeof(Distributions._logpdf), + d::Dirichlet, + x::AbstractVector{<:Real}, +) Ω = Distributions._logpdf(d, x) - ∂alpha = sum(Broadcast.instantiate(Broadcast.broadcasted(Δd.alpha, Δx, d.alpha, x) do Δalphai, Δxi, alphai, xi - StatsFuns.xlogy(Δalphai, xi) + (alphai - 1) * Δxi / xi - end)) + ∂alpha = sum( + Broadcast.instantiate( + Broadcast.broadcasted(Δd.alpha, Δx, d.alpha, x) do Δalphai, Δxi, alphai, xi + StatsFuns.xlogy(Δalphai, xi) + (alphai - 1) * Δxi / xi + end, + ), + ) ∂lmnB = -Δd.lmnB ΔΩ = ∂alpha + ∂lmnB if !isfinite(Ω) @@ -33,7 +57,11 @@ function ChainRulesCore.frule((_, Δd, Δx)::Tuple{Any,Any,Any}, ::typeof(Distri return Ω, ΔΩ end -function ChainRulesCore.rrule(::typeof(Distributions._logpdf), d::T, x::AbstractVector{<:Real}) where {T<:Dirichlet} +function ChainRulesCore.rrule( + ::typeof(Distributions._logpdf), + d::T, + x::AbstractVector{<:Real}, +) where {T<:Dirichlet} Ω = Distributions._logpdf(d, x) isfinite_Ω = isfinite(Ω) alpha = d.alpha @@ -41,7 +69,7 @@ function ChainRulesCore.rrule(::typeof(Distributions._logpdf), d::T, x::Abstract ΔΩ = ChainRulesCore.unthunk(_ΔΩ) ∂alpha = _logpdf_Dirichlet_∂alphai.(x, ΔΩ, isfinite_Ω) ∂lmnB = isfinite_Ω ? -float(ΔΩ) : oftype(float(ΔΩ), NaN) - Δd = ChainRulesCore.Tangent{T}(; alpha=∂alpha, lmnB=∂lmnB) + Δd = ChainRulesCore.Tangent{T}(; alpha = ∂alpha, lmnB = ∂lmnB) Δx = _logpdf_Dirichlet_Δxi.(ΔΩ, alpha, x, isfinite_Ω) return ChainRulesCore.NoTangent(), Δd, Δx end diff --git a/ext/DistributionsChainRulesCoreExt/univariate/continuous/uniform.jl b/ext/DistributionsChainRulesCoreExt/univariate/continuous/uniform.jl index 0461329577..1f8bc9fdec 100644 --- a/ext/DistributionsChainRulesCoreExt/univariate/continuous/uniform.jl +++ b/ext/DistributionsChainRulesCoreExt/univariate/continuous/uniform.jl @@ -23,9 +23,9 @@ function ChainRulesCore.rrule(::typeof(logpdf), d::Uniform, x::Real) function logpdf_Uniform_pullback(Δ) Δa = Δ / diff Δd = if insupport - ChainRulesCore.Tangent{typeof(d)}(; a=Δa, b=-Δa) + ChainRulesCore.Tangent{typeof(d)}(; a = Δa, b = -Δa) else - ChainRulesCore.Tangent{typeof(d)}(; a=zero(Δa), b=zero(Δa)) + ChainRulesCore.Tangent{typeof(d)}(; a = zero(Δa), b = zero(Δa)) end return ChainRulesCore.NoTangent(), Δd, ChainRulesCore.ZeroTangent() end diff --git a/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl b/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl index 06ee0294f7..80949f6e15 100644 --- a/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl +++ b/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl @@ -7,7 +7,7 @@ end function (f::LogPDFNegativeBinomialPullback{D})(Δ) where {D} Δr = Δ * f.∂r Δp = Δ * f.∂p - Δd = ChainRulesCore.Tangent{D}(; r=Δr, p=Δp) + Δd = ChainRulesCore.Tangent{D}(; r = Δr, p = Δp) return ChainRulesCore.NoTangent(), Δd, ChainRulesCore.NoTangent() end @@ -18,11 +18,15 @@ function ChainRulesCore.rrule(::typeof(logpdf), d::NegativeBinomial, k::Real) if iszero(k) Ω = z ∂r = oftype(z, log(p)) - ∂p = oftype(z, r/p) + ∂p = oftype(z, r / p) elseif insupport(d, k) Ω = z - log(k + r) - SpecialFunctions.logbeta(r, k + 1) - ∂r = oftype(z, log(p) - inv(k + r) - SpecialFunctions.digamma(r) + SpecialFunctions.digamma(r + k + 1)) - ∂p = oftype(z, r/p - k / (1 - p)) + ∂r = oftype( + z, + log(p) - inv(k + r) - SpecialFunctions.digamma(r) + + SpecialFunctions.digamma(r + k + 1), + ) + ∂p = oftype(z, r / p - k / (1 - p)) else Ω = oftype(z, -Inf) ∂r = oftype(z, NaN) @@ -30,7 +34,8 @@ function ChainRulesCore.rrule(::typeof(logpdf), d::NegativeBinomial, k::Real) end # Define pullback - logpdf_NegativeBinomial_pullback = LogPDFNegativeBinomialPullback{typeof(d),typeof(z)}(∂r, ∂p) + logpdf_NegativeBinomial_pullback = + LogPDFNegativeBinomialPullback{typeof(d),typeof(z)}(∂r, ∂p) return Ω, logpdf_NegativeBinomial_pullback end diff --git a/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl b/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl index aa27a9fd94..1d8449a03e 100644 --- a/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl +++ b/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl @@ -2,7 +2,9 @@ for f in (:poissonbinomial_pdf, :poissonbinomial_pdf_fft) pullback = Symbol(f, :_pullback) @eval begin function ChainRulesCore.frule( - (_, Δp)::Tuple{<:Any,<:AbstractVector{<:Real}}, ::typeof(Distributions.$f), p::AbstractVector{<:Real} + (_, Δp)::Tuple{<:Any,<:AbstractVector{<:Real}}, + ::typeof(Distributions.$f), + p::AbstractVector{<:Real}, ) y = Distributions.$f(p) A = Distributions.poissonbinomial_pdf_partialderivatives(p) diff --git a/ext/DistributionsDensityInterfaceExt.jl b/ext/DistributionsDensityInterfaceExt.jl index 4617c923ec..ce8d143b47 100644 --- a/ext/DistributionsDensityInterfaceExt.jl +++ b/ext/DistributionsDensityInterfaceExt.jl @@ -10,15 +10,30 @@ for (di_func, d_func) in ((:logdensityof, :logpdf), (:densityof, :pdf)) DensityInterface.$di_func(d::Distribution, x) = $d_func(d, x) function DensityInterface.$di_func(d::UnivariateDistribution, x::AbstractArray) - throw(ArgumentError("$(DensityInterface.$di_func) doesn't support multiple samples as an argument")) + throw( + ArgumentError( + "$(DensityInterface.$di_func) doesn't support multiple samples as an argument", + ), + ) end function DensityInterface.$di_func(d::MultivariateDistribution, x::AbstractMatrix) - throw(ArgumentError("$(DensityInterface.$di_func) doesn't support multiple samples as an argument")) + throw( + ArgumentError( + "$(DensityInterface.$di_func) doesn't support multiple samples as an argument", + ), + ) end - function DensityInterface.$di_func(d::MatrixDistribution, x::AbstractArray{<:AbstractMatrix{<:Real}}) - throw(ArgumentError("$(DensityInterface.$di_func) doesn't support multiple samples as an argument")) + function DensityInterface.$di_func( + d::MatrixDistribution, + x::AbstractArray{<:AbstractMatrix{<:Real}}, + ) + throw( + ArgumentError( + "$(DensityInterface.$di_func) doesn't support multiple samples as an argument", + ), + ) end end end diff --git a/ext/DistributionsTestExt.jl b/ext/DistributionsTestExt.jl index fa0a753e5a..ed684be633 100644 --- a/ext/DistributionsTestExt.jl +++ b/ext/DistributionsTestExt.jl @@ -25,7 +25,9 @@ Test that `AbstractMvNormal` implements the expected API. this function. """ function Distributions.TestUtils.test_mvnormal( - g::AbstractMvNormal, n_tsamples::Int=10^6, rng::Union{AbstractRNG, Nothing}=nothing + g::AbstractMvNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG,Nothing} = nothing, ) d = length(g) μ = mean(g) @@ -52,51 +54,51 @@ function Distributions.TestUtils.test_mvnormal( # sampling @test isa(__rand(rng, g), Vector{Float64}) X = __rand(rng, g, n_tsamples) - emp_mu = vec(mean(X, dims=2)) + emp_mu = vec(mean(X, dims = 2)) Z = X .- emp_mu emp_cov = (Z * Z') * inv(n_tsamples) mean_atols = 8 .* sqrt.(vs ./ n_tsamples) cov_atols = 10 .* sqrt.(vs .* vs') ./ sqrt.(n_tsamples) for i = 1:d - @test isapprox(emp_mu[i], μ[i], atol=mean_atols[i]) + @test isapprox(emp_mu[i], μ[i], atol = mean_atols[i]) end for i = 1:d, j = 1:d - @test isapprox(emp_cov[i,j], Σ[i,j], atol=cov_atols[i,j]) + @test isapprox(emp_cov[i, j], Σ[i, j], atol = cov_atols[i, j]) end X = rand(MersenneTwister(14), g, n_tsamples) Y = rand(MersenneTwister(14), g, n_tsamples) @test X == Y - emp_mu = vec(mean(X, dims=2)) + emp_mu = vec(mean(X, dims = 2)) Z = X .- emp_mu emp_cov = (Z * Z') * inv(n_tsamples) for i = 1:d - @test isapprox(emp_mu[i] , μ[i] , atol=mean_atols[i]) + @test isapprox(emp_mu[i], μ[i], atol = mean_atols[i]) end for i = 1:d, j = 1:d - @test isapprox(emp_cov[i,j], Σ[i,j], atol=cov_atols[i,j]) + @test isapprox(emp_cov[i, j], Σ[i, j], atol = cov_atols[i, j]) end # evaluation of sqmahal & logpdf U = X .- μ - sqm = vec(sum(U .* (Σ \ U), dims=1)) + sqm = vec(sum(U .* (Σ \ U), dims = 1)) for i = 1:min(100, n_tsamples) - @test sqmahal(g, X[:,i]) ≈ sqm[i] + @test sqmahal(g, X[:, i]) ≈ sqm[i] end @test sqmahal(g, X) ≈ sqm lp = -0.5 .* sqm .- 0.5 * (d * log(2.0 * pi) + ldcov) for i = 1:min(100, n_tsamples) - @test logpdf(g, X[:,i]) ≈ lp[i] + @test logpdf(g, X[:, i]) ≈ lp[i] end @test logpdf(g, X) ≈ lp # log likelihood - @test loglikelihood(g, X) ≈ sum(i -> Distributions._logpdf(g, X[:,i]), 1:n_tsamples) + @test loglikelihood(g, X) ≈ sum(i -> Distributions._logpdf(g, X[:, i]), 1:n_tsamples) @test loglikelihood(g, X[:, 1]) ≈ logpdf(g, X[:, 1]) @test loglikelihood(g, [X[:, i] for i in axes(X, 2)]) ≈ loglikelihood(g, X) end -end # module \ No newline at end of file +end # module diff --git a/perf/mixtures.jl b/perf/mixtures.jl index eafe9acae5..5bc1d03b41 100644 --- a/perf/mixtures.jl +++ b/perf/mixtures.jl @@ -1,6 +1,16 @@ using BenchmarkTools: @btime import Random -using Distributions: AbstractMixtureModel, MixtureModel, LogNormal, Normal, pdf, ncomponents, probs, component, components, ContinuousUnivariateDistribution +using Distributions: + AbstractMixtureModel, + MixtureModel, + LogNormal, + Normal, + pdf, + ncomponents, + probs, + component, + components, + ContinuousUnivariateDistribution using Test # v0.22.1 @@ -40,8 +50,8 @@ function improved_no_inbound(d, x) p = probs(d) return sum(enumerate(p)) do (i, pi) if pi > 0 - c = component(d, i) - pdf(c, x) * pi + c = component(d, i) + pdf(c, x) * pi else zero(eltype(p)) end @@ -92,11 +102,7 @@ function sumcomp_cond(d, x) @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) end -distributions = [ - Normal(-1.0, 0.3), - Normal(0.0, 0.5), - Normal(3.0, 1.0), -] +distributions = [Normal(-1.0, 0.3), Normal(0.0, 0.5), Normal(3.0, 1.0)] priors = [0.25, 0.25, 0.5] @@ -110,7 +116,7 @@ for x in rand(5) @info "evaluate_manual_pdf" vman = @btime evaluate_manual_pdf($distributions, $priors, $x) @info "current_master" - vmaster = @btime current_master($gmm_normal, $x) + vmaster = @btime current_master($gmm_normal, $x) @info "improved_version" v1 = @btime improved_version($gmm_normal, $x) @info "improved_no_inbound" @@ -131,8 +137,8 @@ for x in rand(5) @info "===================" end -large_normals = [Normal(rand(), rand()) for _ in 1:1000] -large_probs = [rand() for _ in 1:1000] +large_normals = [Normal(rand(), rand()) for _ = 1:1000] +large_probs = [rand() for _ = 1:1000] large_probs .= large_probs ./ sum(large_probs) gmm_large = MixtureModel(large_normals, large_probs) @@ -145,7 +151,7 @@ for x in rand(5) @info "evaluate_manual_pdf" vman = @btime evaluate_manual_pdf($large_normals, $large_probs, $x) @info "current_master" - vmaster = @btime current_master($gmm_large, $x) + vmaster = @btime current_master($gmm_large, $x) @info "improved_version" v1 = @btime improved_version($gmm_large, $x) @info "improved_no_inbound" @@ -167,11 +173,11 @@ for x in rand(5) end large_het = append!( - ContinuousUnivariateDistribution[Normal(rand(), rand()) for _ in 1:1000], - ContinuousUnivariateDistribution[LogNormal(rand(), rand()) for _ in 1:1000], + ContinuousUnivariateDistribution[Normal(rand(), rand()) for _ = 1:1000], + ContinuousUnivariateDistribution[LogNormal(rand(), rand()) for _ = 1:1000], ) -large_het_probs = [rand() for _ in 1:2000] +large_het_probs = [rand() for _ = 1:2000] large_het_probs .= large_het_probs ./ sum(large_het_probs) gmm_het = MixtureModel(large_het, large_het_probs) @@ -184,7 +190,7 @@ for x in rand(5) @info "evaluate_manual_pdf" vman = @btime evaluate_manual_pdf($large_het, $large_het_probs, $x) @info "current_master" - vmaster = @btime current_master($gmm_het, $x) + vmaster = @btime current_master($gmm_het, $x) @info "improved_version" v1 = @btime improved_version($gmm_het, $x) @info "improved_no_inbound" @@ -208,12 +214,7 @@ end @info "Test with one NaN" -distributions = [ - Normal(-1.0, 0.3), - Normal(0.0, 0.5), - Normal(3.0, 1.0), - Normal(NaN, 1.0), -] +distributions = [Normal(-1.0, 0.3), Normal(0.0, 0.5), Normal(3.0, 1.0), Normal(NaN, 1.0)] priors = [0.25, 0.25, 0.5, 0.0] @@ -223,7 +224,7 @@ Random.seed!(42) for x in rand(1) @info "sampling $x" @info "current_master" - vmaster = @btime current_master($gmm_normal, $x) + vmaster = @btime current_master($gmm_normal, $x) @info "improved_version" v1 = @btime improved_version($gmm_normal, $x) @info "improved_no_inbound" diff --git a/perf/samplers.jl b/perf/samplers.jl index 5dc5fb2692..d390e55067 100644 --- a/perf/samplers.jl +++ b/perf/samplers.jl @@ -13,12 +13,13 @@ end import Distributions: AliasTable, CategoricalDirectSampler -make_sampler(::Type{<:CategoricalDirectSampler}, k::Integer) = CategoricalDirectSampler(fill(1/k, k)) -make_sampler(::Type{<:AliasTable}, k::Integer) = AliasTable(fill(1/k, k)) +make_sampler(::Type{<:CategoricalDirectSampler}, k::Integer) = + CategoricalDirectSampler(fill(1 / k, k)) +make_sampler(::Type{<:AliasTable}, k::Integer) = AliasTable(fill(1 / k, k)) if haskey(ENV, "categorical") && ENV["categorical"] != "skip" @info "Categorical" - + for ST in [CategoricalDirectSampler, AliasTable] mt = Random.MersenneTwister(33) @info string(ST) @@ -34,13 +35,12 @@ end if haskey(ENV, "binomial") && ENV["binomial"] != "skip" @info "Binomial" - - import Distributions: BinomialAliasSampler, BinomialGeomSampler, BinomialTPESampler, BinomialPolySampler - - for ST in [BinomialAliasSampler, - BinomialGeomSampler, - BinomialTPESampler, - BinomialPolySampler] + + import Distributions: + BinomialAliasSampler, BinomialGeomSampler, BinomialTPESampler, BinomialPolySampler + + for ST in + [BinomialAliasSampler, BinomialGeomSampler, BinomialTPESampler, BinomialPolySampler] mt = Random.MersenneTwister(33) @info string(ST) nvals = haskey(ENV, "CI") ? [2] : 2 .^ (1:12) @@ -55,9 +55,9 @@ end if haskey(ENV, "poisson") && ENV["poisson"] != "skip" @info "Poisson samplers" - + import Distributions: PoissonCountSampler, PoissonADSampler - + for ST in [PoissonCountSampler, PoissonADSampler] @info string(ST) mt = Random.MersenneTwister(33) @@ -72,9 +72,9 @@ end if haskey(ENV, "exponential") && ENV["exponential"] != "skip" @info "Exponential" - + import Distributions: ExponentialSampler, ExponentialLogUSampler - + for ST in (ExponentialSampler, ExponentialLogUSampler) @info string(ST) mt = Random.MersenneTwister(33) @@ -82,14 +82,14 @@ if haskey(ENV, "exponential") && ENV["exponential"] != "skip" for scale in scale_values s = ST(scale) b = @benchmark rand($mt, $s) - @info "scale: $scale, result: $b" + @info "scale: $scale, result: $b" end end end if haskey(ENV, "gamma") && ENV["gamma"] != "skip" @info "Gamma" - + import Distributions: GammaGDSampler, GammaGSSampler, GammaMTSampler, GammaIPSampler @info "Low" for ST in [GammaGSSampler, GammaIPSampler] @@ -103,7 +103,7 @@ if haskey(ENV, "gamma") && ENV["gamma"] != "skip" @info "α: $α, result: $b" end end - @info "High" + @info "High" for ST in [GammaMTSampler, GammaGDSampler] @info string(ST) mt = Random.MersenneTwister(33) diff --git a/src/Distributions.jl b/src/Distributions.jl index fcf6994347..250a039c4a 100644 --- a/src/Distributions.jl +++ b/src/Distributions.jl @@ -18,9 +18,19 @@ import Random: default_rng, rand!, SamplerRangeInt import Statistics: mean, median, quantile, std, var, cov, cor import StatsAPI -import StatsBase: kurtosis, skewness, entropy, mode, modes, - fit, kldivergence, loglikelihood, dof, span, - params, params! +import StatsBase: + kurtosis, + skewness, + entropy, + mode, + modes, + fit, + kldivergence, + loglikelihood, + dof, + span, + params, + params! import PDMats: dim, PDMat, invquad @@ -31,7 +41,13 @@ import AliasTables export # re-export Statistics - mean, median, quantile, std, var, cov, cor, + mean, + median, + quantile, + std, + var, + cov, + cor, # generic types VariateForm, @@ -224,7 +240,6 @@ export loglikelihood, # log probability of array of IID draws logpdf, # log probability density logpdf!, # evaluate log pdf to provided storage - invscale, # Inverse scale parameter sqmahal, # squared Mahalanobis distance to Gaussian center sqmahal!, # in-place evaluation of sqmahal @@ -273,8 +288,10 @@ export gradlogpdf, # gradient (or derivative) of logpdf(d,x) wrt x # reexport from StatsBase - sample, sample!, # sample from a source array - wsample, wsample! # weighted sampling from a source array + sample, + sample!, # sample from a source array + wsample, + wsample! # weighted sampling from a source array ### source files diff --git a/src/censored.jl b/src/censored.jl index 5b8cfac5ee..0db920f8b9 100644 --- a/src/censored.jl +++ b/src/censored.jl @@ -74,20 +74,37 @@ struct Censored{ uncensored::D # the original distribution (uncensored) lower::TL # lower bound upper::TU # upper bound - function Censored(d0::UnivariateDistribution, lower::T, upper::T; check_args::Bool=true) where {T<:Real} - @check_args(Censored, lower ≤ upper) - new{typeof(d0), value_support(typeof(d0)), T, T, T}(d0, lower, upper) + function Censored( + d0::UnivariateDistribution, + lower::T, + upper::T; + check_args::Bool = true, + ) where {T<:Real} + @check_args(Censored, lower ≤ upper) + new{typeof(d0),value_support(typeof(d0)),T,T,T}(d0, lower, upper) end - function Censored(d0::UnivariateDistribution, l::Nothing, u::Real; check_args::Bool=true) - new{typeof(d0), value_support(typeof(d0)), typeof(u), Nothing, typeof(u)}(d0, l, u) + function Censored( + d0::UnivariateDistribution, + l::Nothing, + u::Real; + check_args::Bool = true, + ) + new{typeof(d0),value_support(typeof(d0)),typeof(u),Nothing,typeof(u)}(d0, l, u) end - function Censored(d0::UnivariateDistribution, l::Real, u::Nothing; check_args::Bool=true) - new{typeof(d0), value_support(typeof(d0)), typeof(l), typeof(l), Nothing}(d0, l, u) + function Censored( + d0::UnivariateDistribution, + l::Real, + u::Nothing; + check_args::Bool = true, + ) + new{typeof(d0),value_support(typeof(d0)),typeof(l),typeof(l),Nothing}(d0, l, u) end end -const LeftCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = Censored{D,S,T,T,Nothing} -const RightCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = Censored{D,S,T,Nothing,T} +const LeftCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = + Censored{D,S,T,T,Nothing} +const RightCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = + Censored{D,S,T,Nothing,T} function censored(d::Censored, l::T, u::T) where {T<:Real} return censored( @@ -117,7 +134,8 @@ Base.eltype(::Type{<:Censored{D,S,T}}) where {D,S,T} = promote_type(T, eltype(D) #### Range and Support isupperbounded(d::LeftCensored) = isupperbounded(d.uncensored) -isupperbounded(d::Censored) = isupperbounded(d.uncensored) || _ccdf_inclusive(d.uncensored, d.upper) > 0 +isupperbounded(d::Censored) = + isupperbounded(d.uncensored) || _ccdf_inclusive(d.uncensored, d.upper) > 0 islowerbounded(d::RightCensored) = islowerbounded(d.uncensored) islowerbounded(d::Censored) = islowerbounded(d.uncensored) || cdf(d.uncensored, d.lower) > 0 @@ -145,7 +163,7 @@ function show(io::IO, ::MIME"text/plain", d::Censored) print(io, "Censored(") d0 = d.uncensored uml, namevals = _use_multline_show(d0) - uml ? show_multline(io, d0, namevals; newline=false) : show_oneline(io, d0, namevals) + uml ? show_multline(io, d0, namevals; newline = false) : show_oneline(io, d0, namevals) if d.lower === nothing print(io, "; upper=$(d.upper))") elseif d.upper === nothing @@ -191,8 +209,11 @@ function mean(d::Censored) log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) - μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + - xexpy(mean(_to_truncated(d)), log_prob_interval)) + μ = ( + xexpy(lower, log_prob_lower) + + xexpy(upper, log_prob_upper) + + xexpy(mean(_to_truncated(d)), log_prob_interval) + ) return μ end @@ -227,11 +248,17 @@ function var(d::Censored) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) - μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + - xexpy(μ_interval, log_prob_interval)) + μ = ( + xexpy(lower, log_prob_lower) + + xexpy(upper, log_prob_upper) + + xexpy(μ_interval, log_prob_interval) + ) v_interval = var(dtrunc) + abs2(μ_interval - μ) - v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + - xexpy(v_interval, log_prob_interval)) + v = ( + xexpy(abs2(lower - μ), log_prob_lower) + + xexpy(abs2(upper - μ), log_prob_upper) + + xexpy(v_interval, log_prob_interval) + ) return v end @@ -257,7 +284,8 @@ function entropy(d::LeftCensored) log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) - entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + entropy_interval = + xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) @@ -275,7 +303,8 @@ function entropy(d::RightCensored) log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) - entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu + entropy_interval = + xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) @@ -299,7 +328,10 @@ function entropy(d::Censored) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) - entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu + entropy_interval = + xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + + xlogx_pl + + xlogx_pu return entropy_interval + entropy_bound end @@ -352,7 +384,8 @@ function loglikelihood(d::Censored, x::AbstractArray{<:Real}) upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) - log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) + log_prob_upper = + upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) diff --git a/src/cholesky/lkjcholesky.jl b/src/cholesky/lkjcholesky.jl index 3455bf8bb1..bf6c478964 100644 --- a/src/cholesky/lkjcholesky.jl +++ b/src/cholesky/lkjcholesky.jl @@ -29,7 +29,7 @@ External links Journal of Multivariate Analysis (2009), 100(9): 1989-2001 doi: [10.1016/j.jmva.2009.04.008](https://doi.org/10.1016/j.jmva.2009.04.008) """ -struct LKJCholesky{T <: Real} <: Distribution{CholeskyVariate,Continuous} +struct LKJCholesky{T<:Real} <: Distribution{CholeskyVariate,Continuous} d::Int η::T uplo::Char @@ -40,7 +40,12 @@ end # Constructors # ----------------------------------------------------------------------------- -function LKJCholesky(d::Int, η::Real, _uplo::Union{Char,Symbol} = 'L'; check_args::Bool=true) +function LKJCholesky( + d::Int, + η::Real, + _uplo::Union{Char,Symbol} = 'L'; + check_args::Bool = true, +) @check_args( LKJCholesky, (d, d > 0, "matrix dimension must be positive"), @@ -69,12 +74,18 @@ Base.show(io::IO, d::LKJCholesky) = show(io, d, (:d, :η, :uplo)) # Conversion # ----------------------------------------------------------------------------- -function Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky) where T <: Real +function Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky) where {T<:Real} return LKJCholesky{T}(d.d, T(d.η), d.uplo, T(d.logc0)) end -Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky{T}) where T <: Real = d +Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky{T}) where {T<:Real} = d -function convert(::Type{LKJCholesky{T}}, d::Integer, η::Real, uplo::Char, logc0::Real) where T <: Real +function convert( + ::Type{LKJCholesky{T}}, + d::Integer, + η::Real, + uplo::Char, + logc0::Real, +) where {T<:Real} return LKJCholesky{T}(Int(d), T(η), uplo, T(logc0)) end @@ -109,7 +120,7 @@ function insupport(d::LKJCholesky, R::LinearAlgebra.Cholesky) return true end -function mode(d::LKJCholesky; check_args::Bool=true) +function mode(d::LKJCholesky; check_args::Bool = true) @check_args( LKJCholesky, @setup(η = d.η), @@ -121,7 +132,7 @@ end StatsBase.params(d::LKJCholesky) = (d.d, d.η, d.uplo) -@inline partype(::LKJCholesky{T}) where {T <: Real} = T +@inline partype(::LKJCholesky{T}) where {T<:Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -134,7 +145,7 @@ function logkernel(d::LKJCholesky, R::LinearAlgebra.Cholesky) p == 1 && return c * log(first(factors)) # assuming D = diag(factors) with length(D) = p, # logp = sum(i -> (c - i) * log(D[i]), 2:p) - logp = sum(Iterators.drop(enumerate(diagind(factors)), 1)) do (i, di) + logp = sum(Iterators.drop(enumerate(diagind(factors)), 1)) do (i, di) return (c - i) * log(factors[di]) end return logp @@ -210,7 +221,8 @@ function Random.rand!( d::LKJCholesky, Rs::AbstractArray{<:LinearAlgebra.Cholesky{<:Real}}, ) - allocate = any(!isassigned(Rs, i) for i in eachindex(Rs)) || any(R -> size(R, 1) != d.d, Rs) + allocate = + any(!isassigned(Rs, i) for i in eachindex(Rs)) || any(R -> size(R, 1) != d.d, Rs) return Random.rand!(rng, d, Rs, allocate) end @@ -244,7 +256,7 @@ function _lkj_cholesky_onion_tri!( @assert size(A) == (d, d) A[1, 1] = 1 d > 1 || return A - β = η + (d - 2)//2 + β = η + (d - 2) // 2 # 1. Initialization w0 = 2 * rand(rng, Beta(β, β)) - 1 @inbounds if uplo === :L @@ -254,18 +266,18 @@ function _lkj_cholesky_onion_tri!( end @inbounds A[2, 2] = sqrt(1 - w0^2) # 2. Loop, each iteration k adds row/column k+1 - for k in 2:(d - 1) + for k = 2:(d-1) # (a) - β -= 1//2 + β -= 1 // 2 # (b) - y = rand(rng, Beta(k//2, β)) + y = rand(rng, Beta(k // 2, β)) # (c)-(e) # w is directionally uniform vector of length √y - @inbounds w = @views uplo === :L ? A[k + 1, 1:k] : A[1:k, k + 1] + @inbounds w = @views uplo === :L ? A[k+1, 1:k] : A[1:k, k+1] Random.randn!(rng, w) rmul!(w, sqrt(y) / norm(w)) # normalize so new row/column has unit norm - @inbounds A[k + 1, k + 1] = sqrt(1 - y) + @inbounds A[k+1, k+1] = sqrt(1 - y) end # 3. return A diff --git a/src/common.jl b/src/common.jl index 899cca41d2..4921a595dc 100644 --- a/src/common.jl +++ b/src/common.jl @@ -12,8 +12,8 @@ multivariate (vector, `N == 1`) or matrix-variate (matrix, `N == 2`). """ abstract type ArrayLikeVariate{N} <: VariateForm end -const Univariate = ArrayLikeVariate{0} -const Multivariate = ArrayLikeVariate{1} +const Univariate = ArrayLikeVariate{0} +const Multivariate = ArrayLikeVariate{1} const Matrixvariate = ArrayLikeVariate{2} """ @@ -47,7 +47,7 @@ the natural numbers. See also: [`Continuous`](@ref), [`ValueSupport`](@ref) """ -struct Discrete <: ValueSupport end +struct Discrete <: ValueSupport end """ Continuous <: ValueSupport @@ -121,10 +121,11 @@ nsamples(::Type{D}, x::AbstractArray) where {D<:Sampleable{Univariate}} = length nsamples(::Type{D}, x::AbstractVector) where {D<:Sampleable{Multivariate}} = 1 nsamples(::Type{D}, x::AbstractMatrix) where {D<:Sampleable{Multivariate}} = size(x, 2) nsamples(::Type{D}, x::Number) where {D<:Sampleable{Matrixvariate}} = 1 -nsamples(::Type{D}, x::Array{Matrix{T}}) where {D<:Sampleable{Matrixvariate},T<:Number} = length(x) +nsamples(::Type{D}, x::Array{Matrix{T}}) where {D<:Sampleable{Matrixvariate},T<:Number} = + length(x) for func in (:(==), :isequal, :isapprox) - @eval function Base.$func(s1::A, s2::B; kwargs...) where {A<:Sampleable, B<:Sampleable} + @eval function Base.$func(s1::A, s2::B; kwargs...) where {A<:Sampleable,B<:Sampleable} nameof(A) === nameof(B) || return false fields = fieldnames(A) fields === fieldnames(B) || return false @@ -133,14 +134,16 @@ for func in (:(==), :isequal, :isapprox) isdefined(s1, f) && isdefined(s2, f) || return false # perform equivalence check to support types that have no defined equality, such # as `missing` - getfield(s1, f) === getfield(s2, f) || $func(getfield(s1, f), getfield(s2, f); kwargs...) || return false + getfield(s1, f) === getfield(s2, f) || + $func(getfield(s1, f), getfield(s2, f); kwargs...) || + return false end return true end end -function Base.hash(s::S, h::UInt) where S <: Sampleable +function Base.hash(s::S, h::UInt) where {S<:Sampleable} hashed = hash(Sampleable, h) hashed = hash(nameof(S), hashed) @@ -161,20 +164,20 @@ with `cdf`. """ abstract type Distribution{F<:VariateForm,S<:ValueSupport} <: Sampleable{F,S} end -const UnivariateDistribution{S<:ValueSupport} = Distribution{Univariate,S} +const UnivariateDistribution{S<:ValueSupport} = Distribution{Univariate,S} const MultivariateDistribution{S<:ValueSupport} = Distribution{Multivariate,S} -const MatrixDistribution{S<:ValueSupport} = Distribution{Matrixvariate,S} -const NonMatrixDistribution = Union{UnivariateDistribution, MultivariateDistribution} +const MatrixDistribution{S<:ValueSupport} = Distribution{Matrixvariate,S} +const NonMatrixDistribution = Union{UnivariateDistribution,MultivariateDistribution} -const DiscreteDistribution{F<:VariateForm} = Distribution{F,Discrete} +const DiscreteDistribution{F<:VariateForm} = Distribution{F,Discrete} const ContinuousDistribution{F<:VariateForm} = Distribution{F,Continuous} -const DiscreteUnivariateDistribution = Distribution{Univariate, Discrete} -const ContinuousUnivariateDistribution = Distribution{Univariate, Continuous} -const DiscreteMultivariateDistribution = Distribution{Multivariate, Discrete} -const ContinuousMultivariateDistribution = Distribution{Multivariate, Continuous} -const DiscreteMatrixDistribution = Distribution{Matrixvariate, Discrete} -const ContinuousMatrixDistribution = Distribution{Matrixvariate, Continuous} +const DiscreteUnivariateDistribution = Distribution{Univariate,Discrete} +const ContinuousUnivariateDistribution = Distribution{Univariate,Continuous} +const DiscreteMultivariateDistribution = Distribution{Multivariate,Discrete} +const ContinuousMultivariateDistribution = Distribution{Multivariate,Continuous} +const DiscreteMatrixDistribution = Distribution{Matrixvariate,Discrete} +const ContinuousMatrixDistribution = Distribution{Matrixvariate,Continuous} # allow broadcasting over distribution objects # to be decided: how to handle multivariate/matrixvariate distributions? @@ -218,20 +221,21 @@ usually it is sufficient to implement `logpdf`. See also: [`logpdf`](@ref). """ @inline function pdf( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real,M}, ) where {N,M} if M == N @boundscheck begin - size(x) == size(d) || - throw(DimensionMismatch("inconsistent array dimensions")) + size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end return _pdf(d, x) else @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -259,20 +263,21 @@ size of `x`. See also: [`pdf`](@ref), [`gradlogpdf`](@ref). """ @inline function logpdf( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real,M}, ) where {N,M} if M == N @boundscheck begin - size(x) == size(d) || - throw(DimensionMismatch("inconsistent array dimensions")) + size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end return _logpdf(d, x) else @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -308,7 +313,8 @@ Here, `x` can be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds function pdf( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} return map(Base.Fix1(pdf, d), x) end @@ -327,7 +333,8 @@ Here, `x` can be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds function logpdf( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} return map(Base.Fix1(logpdf, d), x) end @@ -357,7 +364,7 @@ See also: [`logpdf!`](@ref). Base.@propagate_inbounds function pdf!( out::AbstractArray{<:Real}, d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N},M} + x::AbstractArray{<:AbstractArray{<:Real,N},M}, ) where {N,M} return map!(Base.Fix1(pdf, d), out, x) end @@ -365,7 +372,7 @@ end Base.@propagate_inbounds function logpdf!( out::AbstractArray{<:Real}, d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N},M} + x::AbstractArray{<:AbstractArray{<:Real,N},M}, ) where {N,M} return map!(Base.Fix1(logpdf, d), out, x) end @@ -376,13 +383,14 @@ end x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of the variates ($M) must be greater than the dimension of the distribution ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of the variates ($M) must be greater than the dimension of the distribution ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) - length(out) == prod(i -> size(x, i), (N + 1):M) || + length(out) == prod(i -> size(x, i), (N+1):M) || throw(DimensionMismatch("inconsistent array dimensions")) end return _pdf!(out, d, x) @@ -425,13 +433,14 @@ See also: [`pdf!`](@ref). x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of the variates ($M) must be greater than the dimension of the distribution ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of the variates ($M) must be greater than the dimension of the distribution ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) - length(out) == prod(i -> size(x, i), (N + 1):M) || + length(out) == prod(i -> size(x, i), (N+1):M) || throw(DimensionMismatch("inconsistent array dimensions")) end return _logpdf!(out, d, x) @@ -459,16 +468,18 @@ be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds @inline function loglikelihood( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real,M}, ) where {N,M} if M == N return logpdf(d, x) else @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -476,7 +487,8 @@ Base.@propagate_inbounds @inline function loglikelihood( end end Base.@propagate_inbounds function loglikelihood( - d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} return sum(Base.Fix1(logpdf, d), x) end diff --git a/src/convolution.jl b/src/convolution.jl index cd429e8b42..a083c33669 100644 --- a/src/convolution.jl +++ b/src/convolution.jl @@ -45,7 +45,7 @@ function convolve(d1::Geometric, d2::Geometric) return NegativeBinomial(2, d1.p) end -convolve(d1::Poisson, d2::Poisson) = Poisson(d1.λ + d2.λ) +convolve(d1::Poisson, d2::Poisson) = Poisson(d1.λ + d2.λ) function convolve(d1::DiscreteNonParametric, d2::DiscreteNonParametric) @@ -53,12 +53,12 @@ function convolve(d1::DiscreteNonParametric, d2::DiscreteNonParametric) sort!(support_conv) #for fast index finding below probs1 = probs(d1) probs2 = probs(d2) - p_conv = zeros(Base.promote_eltype(probs1, probs2), length(support_conv)) + p_conv = zeros(Base.promote_eltype(probs1, probs2), length(support_conv)) for (s1, p1) in zip(support(d1), probs(d1)), (s2, p2) in zip(support(d2), probs(d2)) - idx = searchsortedfirst(support_conv, s1+s2) - p_conv[idx] += p1*p2 + idx = searchsortedfirst(support_conv, s1 + s2) + p_conv[idx] += p1 * p2 end - DiscreteNonParametric(support_conv, p_conv,check_args=false) + DiscreteNonParametric(support_conv, p_conv, check_args = false) end # continuous univariate @@ -83,9 +83,11 @@ function convolve(d1::MvNormal, d2::MvNormal) end function _check_convolution_args(p1, p2) - p1 ≈ p2 || throw(ArgumentError( - "$(p1) !≈ $(p2): distribution parameters must be approximately equal", - )) + p1 ≈ p2 || throw( + ArgumentError( + "$(p1) !≈ $(p2): distribution parameters must be approximately equal", + ), + ) end function _check_convolution_shape(d1, d2) diff --git a/src/deprecates.jl b/src/deprecates.jl index a360c977c8..75df5fe5e0 100644 --- a/src/deprecates.jl +++ b/src/deprecates.jl @@ -1,6 +1,6 @@ #### Deprecate on 0.6 (to be removed on 0.7) -@Base.deprecate expected_logdet meanlogdet +Base.@deprecate expected_logdet meanlogdet function probs(d::DiscreteUnivariateDistribution) Base.depwarn("probs(d::$(typeof(d))) is deprecated. Please use pdf(d) instead.", :probs) @@ -8,35 +8,62 @@ function probs(d::DiscreteUnivariateDistribution) end function Binomial(n::Real, p::Real) - Base.depwarn("Binomial(n::Real, p) is deprecated. Please use Binomial(n::Integer, p) instead.", :Binomial) + Base.depwarn( + "Binomial(n::Real, p) is deprecated. Please use Binomial(n::Integer, p) instead.", + :Binomial, + ) Binomial(Int(n), p) end function Binomial(n::Real) - Base.depwarn("Binomial(n::Real) is deprecated. Please use Binomial(n::Integer) instead.", :Binomial) + Base.depwarn( + "Binomial(n::Real) is deprecated. Please use Binomial(n::Integer) instead.", + :Binomial, + ) Binomial(Int(n)) end function BetaBinomial(n::Real, α::Real, β::Real) - Base.depwarn("BetaBinomial(n::Real, α, β) is deprecated. Please use BetaBinomial(n::Integer, α, β) instead.", :BetaBinomial) + Base.depwarn( + "BetaBinomial(n::Real, α, β) is deprecated. Please use BetaBinomial(n::Integer, α, β) instead.", + :BetaBinomial, + ) BetaBinomial(Int(n), α, β) end # vectorized versions -for fun in [:pdf, :logpdf, - :cdf, :logcdf, - :ccdf, :logccdf, - :invlogcdf, :invlogccdf, - :quantile, :cquantile] +for fun in [ + :pdf, + :logpdf, + :cdf, + :logcdf, + :ccdf, + :logccdf, + :invlogcdf, + :invlogccdf, + :quantile, + :cquantile, +] _fun! = Symbol('_', fun, '!') fun! = Symbol(fun, '!') @eval begin - @deprecate ($_fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= Base.Fix1($fun, d).(X) false - @deprecate ($fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= Base.Fix1($fun, d).(X) false - @deprecate ($fun)(d::UnivariateDistribution, X::AbstractArray{<:Real}) map(Base.Fix1($fun, d), X) + @deprecate ($_fun!)( + r::AbstractArray{<:Real}, + d::UnivariateDistribution, + X::AbstractArray{<:Real}, + ) r .= Base.Fix1($fun, d).(X) false + @deprecate ($fun!)( + r::AbstractArray{<:Real}, + d::UnivariateDistribution, + X::AbstractArray{<:Real}, + ) r .= Base.Fix1($fun, d).(X) false + @deprecate ($fun)(d::UnivariateDistribution, X::AbstractArray{<:Real}) map( + Base.Fix1($fun, d), + X, + ) end end @@ -48,23 +75,39 @@ end @deprecate Wishart(df::Real, S::Cholesky, warn::Bool) Wishart(df, S) # Deprecate 3 arguments expectation and once with function in second place -@deprecate expectation(distr::DiscreteUnivariateDistribution, g::Function, epsilon::Real) expectation(g, distr; epsilon=epsilon) false -@deprecate expectation(distr::ContinuousUnivariateDistribution, g::Function, epsilon::Real) expectation(g, distr) false -@deprecate expectation(distr::Union{UnivariateDistribution,MultivariateDistribution}, g::Function; kwargs...) expectation(g, distr; kwargs...) false +@deprecate expectation(distr::DiscreteUnivariateDistribution, g::Function, epsilon::Real) expectation( + g, + distr; + epsilon = epsilon, +) false +@deprecate expectation(distr::ContinuousUnivariateDistribution, g::Function, epsilon::Real) expectation( + g, + distr, +) false +@deprecate expectation( + distr::Union{UnivariateDistribution,MultivariateDistribution}, + g::Function; + kwargs..., +) expectation(g, distr; kwargs...) false # Deprecate `MatrixReshaped` # This is very similar to `Base.@deprecate_binding MatrixReshaped{...} ReshapedDistribution{...}` # However, `Base.@deprecate_binding` does not support type parameters export MatrixReshaped -const MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = ReshapedDistribution{2,S,D} +const MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = + ReshapedDistribution{2,S,D} Base.deprecate(@__MODULE__, :MatrixReshaped) # This is very similar to `Base.@deprecate MatrixReshaped(...) reshape(...)` # We use another (unexported!) alias here to not throw a deprecation warning/error # Unexported aliases do not affect the type printing # In Julia >= 1.6, instead of a new alias we could have defined a method for (ReshapedDistribution{2,S,D} where {S<:ValueSupport,D<:MultivariateDistribution{S}}) -const _MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = ReshapedDistribution{2,S,D} -function _MatrixReshaped(d::MultivariateDistribution, n::Integer, p::Integer=n) - Base.depwarn("`MatrixReshaped(d, n, p)` is deprecated, use `reshape(d, (n, p))` instead.", :MatrixReshaped) +const _MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = + ReshapedDistribution{2,S,D} +function _MatrixReshaped(d::MultivariateDistribution, n::Integer, p::Integer = n) + Base.depwarn( + "`MatrixReshaped(d, n, p)` is deprecated, use `reshape(d, (n, p))` instead.", + :MatrixReshaped, + ) return reshape(d, (n, p)) end diff --git a/src/eachvariate.jl b/src/eachvariate.jl index 583600f36d..37f7f35bf2 100644 --- a/src/eachvariate.jl +++ b/src/eachvariate.jl @@ -7,8 +7,12 @@ end function EachVariate{V}(x::AbstractArray{<:Real,M}) where {V,M} ax = ntuple(i -> axes(x, i + V), Val(M - V)) - T = Base.promote_op(view, typeof(x), ntuple(i -> i <= V ? Colon : eltype(axes(x, i)), Val(M))...) - return EachVariate{V,typeof(x),typeof(ax),T,M-V}(x, ax) + T = Base.promote_op( + view, + typeof(x), + ntuple(i -> i <= V ? Colon : eltype(axes(x, i)), Val(M))..., + ) + return EachVariate{V,typeof(x),typeof(ax),T,M - V}(x, ax) end Base.IteratorSize(::Type{EachVariate{V,P,A,T,N}}) where {V,P,A,T,N} = Base.HasShape{N}() @@ -20,7 +24,8 @@ Base.size(x::EachVariate, d::Int) = 1 <= ndims(x) ? length(axes(x)[d]) : 1 # We don't need `setindex!` (currently), therefore only `getindex` is implemented Base.@propagate_inbounds function Base.getindex( - x::EachVariate{V,P,A,T,N}, I::Vararg{Int,N}, + x::EachVariate{V,P,A,T,N}, + I::Vararg{Int,N}, ) where {V,P,A,T,N} return view(x.parent, ntuple(_ -> Colon(), Val(V))..., I...) end diff --git a/src/edgeworth.jl b/src/edgeworth.jl index 8ee6ef2bcc..fa41fbb3cf 100644 --- a/src/edgeworth.jl +++ b/src/edgeworth.jl @@ -13,13 +13,17 @@ struct EdgeworthZ{D<:UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 - function EdgeworthZ{D}(d::UnivariateDistribution, n::Real; check_args::Bool=true) where {D<:UnivariateDistribution} + function EdgeworthZ{D}( + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D<:UnivariateDistribution} @check_args EdgeworthZ (n, n > zero(n)) new{D}(d, n) end end -function EdgeworthZ(d::UnivariateDistribution, n::Real; check_args::Bool=true) - return EdgeworthZ{typeof(d)}(d, n; check_args=check_args) +function EdgeworthZ(d::UnivariateDistribution, n::Real; check_args::Bool = true) + return EdgeworthZ{typeof(d)}(d, n; check_args = check_args) end mean(d::EdgeworthZ) = 0.0 @@ -29,30 +33,35 @@ var(d::EdgeworthZ) = 1.0 function pdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) - x2 = x*x - pdf(Normal(0,1),x)*(1 + s*x*(x2-3.0)/6.0 + - k*(x2*(x2-6.0)+3.0)/24.0 - + s*s*(x2*(x2*(x2-15.0)+45.0)-15.0)/72.0) + x2 = x * x + pdf(Normal(0, 1), x) * ( + 1 + + s * x * (x2 - 3.0) / 6.0 + + k * (x2 * (x2 - 6.0) + 3.0) / 24.0 + + s * s * (x2 * (x2 * (x2 - 15.0) + 45.0) - 15.0) / 72.0 + ) end function cdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) - x2 = x*x - cdf(Normal(0,1),x) - - pdf(Normal(0,1),x)*( - s*(x2-1.0)/6.0 + - k*x*(x2-3.0)/24.0 - + s*s*x*(x2*(x2-10.0)+15.0)/72.0) + x2 = x * x + cdf(Normal(0, 1), x) - + pdf(Normal(0, 1), x) * ( + s * (x2 - 1.0) / 6.0 + + k * x * (x2 - 3.0) / 24.0 + + s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 + ) end function ccdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) - x2 = x*x - ccdf(Normal(0,1),x) + - pdf(Normal(0,1),x)*( - s*(x2-1.0)/6.0 + - k*x*(x2-3.0)/24.0 - + s*s*x*(x2*(x2-10.0)+15.0)/72.0) + x2 = x * x + ccdf(Normal(0, 1), x) + + pdf(Normal(0, 1), x) * ( + s * (x2 - 1.0) / 6.0 + + k * x * (x2 - 3.0) / 24.0 + + s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 + ) end @@ -60,17 +69,17 @@ end function quantile(d::EdgeworthZ, p::Float64) s = skewness(d) k = kurtosis(d) - z = quantile(Normal(0,1),p) - z2 = z*z - z + s*(z2-1)/6.0 + k*z*(z2-3)/24.0 - s*s/36.0*z*(2.0*z2-5.0) + z = quantile(Normal(0, 1), p) + z2 = z * z + z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) end function cquantile(d::EdgeworthZ, p::Float64) s = skewness(d) k = kurtosis(d) - z = cquantile(Normal(0,1),p) - z2 = z*z - z + s*(z2-1)/6.0 + k*z*(z2-3)/24.0 - s*s/36.0*z*(2.0*z2-5.0) + z = cquantile(Normal(0, 1), p) + z2 = z * z + z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) end @@ -79,30 +88,38 @@ end struct EdgeworthSum{D<:UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 - function EdgeworthSum{D}(d::UnivariateDistribution, n::Real; check_args::Bool=true) where {D<:UnivariateDistribution} + function EdgeworthSum{D}( + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D<:UnivariateDistribution} @check_args EdgeworthSum (n, n > zero(n)) new{D}(d, n) end end -function EdgeworthSum(d::UnivariateDistribution, n::Real; check_args::Bool=true) - return EdgeworthSum{typeof(d)}(d, n; check_args=check_args) +function EdgeworthSum(d::UnivariateDistribution, n::Real; check_args::Bool = true) + return EdgeworthSum{typeof(d)}(d, n; check_args = check_args) end -mean(d::EdgeworthSum) = d.n*mean(d.dist) -var(d::EdgeworthSum) = d.n*var(d.dist) +mean(d::EdgeworthSum) = d.n * mean(d.dist) +var(d::EdgeworthSum) = d.n * var(d.dist) # Edgeworth approximation of the mean struct EdgeworthMean{D<:UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 - function EdgeworthMean{D}(d::UnivariateDistribution, n::Real; check_args::Bool=true) where {D<:UnivariateDistribution} + function EdgeworthMean{D}( + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D<:UnivariateDistribution} # although n would usually be an integer, no methods are require this @check_args EdgeworthMean (n, n > zero(n), "n must be positive") new{D}(d, Float64(n)) end end -function EdgeworthMean(d::UnivariateDistribution, n::Real; check_args::Bool=true) - return EdgeworthMean{typeof(d)}(d, n; check_args=check_args) +function EdgeworthMean(d::UnivariateDistribution, n::Real; check_args::Bool = true) + return EdgeworthMean{typeof(d)}(d, n; check_args = check_args) end mean(d::EdgeworthMean) = mean(d.dist) @@ -110,12 +127,15 @@ var(d::EdgeworthMean) = var(d.dist) / d.n function pdf(d::EdgeworthAbstract, x::Float64) m, s = mean(d), std(d) - pdf(EdgeworthZ(d.dist,d.n),(x-m)/s)/s + pdf(EdgeworthZ(d.dist, d.n), (x - m) / s) / s end -cdf(d::EdgeworthAbstract, x::Float64) = cdf(EdgeworthZ(d.dist,d.n), (x-mean(d))/std(d)) +cdf(d::EdgeworthAbstract, x::Float64) = cdf(EdgeworthZ(d.dist, d.n), (x - mean(d)) / std(d)) -ccdf(d::EdgeworthAbstract, x::Float64) = ccdf(EdgeworthZ(d.dist,d.n), (x-mean(d))/std(d)) +ccdf(d::EdgeworthAbstract, x::Float64) = + ccdf(EdgeworthZ(d.dist, d.n), (x - mean(d)) / std(d)) -quantile(d::EdgeworthAbstract, p::Float64) = mean(d) + std(d)*quantile(EdgeworthZ(d.dist,d.n), p) -cquantile(d::EdgeworthAbstract, p::Float64) = mean(d) + std(d)*cquantile(EdgeworthZ(d.dist,d.n), p) +quantile(d::EdgeworthAbstract, p::Float64) = + mean(d) + std(d) * quantile(EdgeworthZ(d.dist, d.n), p) +cquantile(d::EdgeworthAbstract, p::Float64) = + mean(d) + std(d) * cquantile(EdgeworthZ(d.dist, d.n), p) diff --git a/src/functionals.jl b/src/functionals.jl index 7ccc80c0e6..37e1250e61 100644 --- a/src/functionals.jl +++ b/src/functionals.jl @@ -3,7 +3,7 @@ function expectation(g, distr::ContinuousUnivariateDistribution; kwargs...) end ## Assuming that discrete distributions only take integer values. -function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real=1e-10) +function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real = 1e-10) mindist, maxdist = extrema(distr) # We want to avoid taking values up to infinity minval = isfinite(mindist) ? mindist : quantile(distr, epsilon) @@ -11,13 +11,18 @@ function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real=1e- return sum(x -> pdf(distr, x) * g(x), minval:maxval) end -function expectation(g, distr::MultivariateDistribution; nsamples::Int=100, rng::AbstractRNG=default_rng()) +function expectation( + g, + distr::MultivariateDistribution; + nsamples::Int = 100, + rng::AbstractRNG = default_rng(), +) nsamples > 0 || throw(ArgumentError("number of samples should be > 0")) # We use a function barrier to work around type instability of `sampler(dist)` return mcexpectation(rng, g, sampler(distr), nsamples) end -mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ in 1:n) / n +mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ = 1:n) / n ## Leave undefined until we've implemented a numerical integration procedure # function entropy(distr::UnivariateDistribution) @@ -26,7 +31,11 @@ mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ in 1:n) / n # expectation(distr, x -> -log(f(x))) # end -function kldivergence(p::Distribution{V}, q::Distribution{V}; kwargs...) where {V<:VariateForm} +function kldivergence( + p::Distribution{V}, + q::Distribution{V}; + kwargs..., +) where {V<:VariateForm} return expectation(p; kwargs...) do x logp = logpdf(p, x) return (logp > oftype(logp, -Inf)) * (logp - logpdf(q, x)) diff --git a/src/genericfit.jl b/src/genericfit.jl index b6bce07a6a..42ac0901b5 100644 --- a/src/genericfit.jl +++ b/src/genericfit.jl @@ -1,6 +1,6 @@ # generic functions for distribution fitting -function suffstats(dt::Type{D}, xs...) where D<:Distribution +function suffstats(dt::Type{D}, xs...) where {D<:Distribution} argtypes = tuple(D, map(typeof, xs)...) error("suffstats is not implemented for $argtypes.") end @@ -24,11 +24,18 @@ Here, `w` should be an array with length `n`, where `n` is the number of samples """ fit_mle(D, x, w) -fit_mle(dt::Type{D}, x::AbstractArray) where {D<:UnivariateDistribution} = fit_mle(D, suffstats(D, x)) -fit_mle(dt::Type{D}, x::AbstractArray, w::AbstractArray) where {D<:UnivariateDistribution} = fit_mle(D, suffstats(D, x, w)) - -fit_mle(dt::Type{D}, x::AbstractMatrix) where {D<:MultivariateDistribution} = fit_mle(D, suffstats(D, x)) -fit_mle(dt::Type{D}, x::AbstractMatrix, w::AbstractArray) where {D<:MultivariateDistribution} = fit_mle(D, suffstats(D, x, w)) +fit_mle(dt::Type{D}, x::AbstractArray) where {D<:UnivariateDistribution} = + fit_mle(D, suffstats(D, x)) +fit_mle(dt::Type{D}, x::AbstractArray, w::AbstractArray) where {D<:UnivariateDistribution} = + fit_mle(D, suffstats(D, x, w)) + +fit_mle(dt::Type{D}, x::AbstractMatrix) where {D<:MultivariateDistribution} = + fit_mle(D, suffstats(D, x)) +fit_mle( + dt::Type{D}, + x::AbstractMatrix, + w::AbstractArray, +) where {D<:MultivariateDistribution} = fit_mle(D, suffstats(D, x, w)) """ fit(D, args...) diff --git a/src/genericrand.jl b/src/genericrand.jl index 6b3b213f16..25db1a7d69 100644 --- a/src/genericrand.jl +++ b/src/genericrand.jl @@ -34,9 +34,7 @@ function rand(rng::AbstractRNG, s::Sampleable{Univariate}, dims::Dims) out = Array{eltype(s)}(undef, dims) return @inbounds rand!(rng, sampler(s), out) end -function rand( - rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims, -) +function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims) sz = size(s) ax = map(Base.OneTo, dims) out = [Array{eltype(s)}(undef, sz) for _ in Iterators.product(ax...)] @@ -51,9 +49,7 @@ function rand(rng::AbstractRNG, s::Sampleable{Univariate,Continuous}, dims::Dims out = Array{float(eltype(s))}(undef, dims) return @inbounds rand!(rng, sampler(s), out) end -function rand( - rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims, -) +function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims) sz = size(s) ax = map(Base.OneTo, dims) out = [Array{float(eltype(s))}(undef, sz) for _ in Iterators.product(ax...)] @@ -97,10 +93,11 @@ end x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of `x` ($M) must be greater than number of dimensions of `s` ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of `x` ($M) must be greater than number of dimensions of `s` ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end diff --git a/src/matrix/inversewishart.jl b/src/matrix/inversewishart.jl index 4c605caf25..9794beadab 100644 --- a/src/matrix/inversewishart.jl +++ b/src/matrix/inversewishart.jl @@ -18,7 +18,7 @@ f(\\boldsymbol{\\Sigma}; \\nu,\\boldsymbol{\\Psi}) = ``\\mathbf{H}\\sim \\textrm{W}_p(\\nu, \\mathbf{S})`` if and only if ``\\mathbf{H}^{-1}\\sim \\textrm{IW}_p(\\nu, \\mathbf{S}^{-1})``. """ -struct InverseWishart{T<:Real, ST<:AbstractPDMat} <: ContinuousMatrixDistribution +struct InverseWishart{T<:Real,ST<:AbstractPDMat} <: ContinuousMatrixDistribution df::T # degree of freedom Ψ::ST # scale matrix logc0::T # log of normalizing constant @@ -28,13 +28,13 @@ end # Constructors # ----------------------------------------------------------------------------- -function InverseWishart(df::T, Ψ::AbstractPDMat{T}) where T<:Real +function InverseWishart(df::T, Ψ::AbstractPDMat{T}) where {T<:Real} p = size(Ψ, 1) df > p - 1 || throw(ArgumentError("df should be greater than dim - 1.")) logc0 = invwishart_logc0(df, Ψ) R = Base.promote_eltype(T, logc0) prom_Ψ = convert(AbstractArray{R}, Ψ) - InverseWishart{R, typeof(prom_Ψ)}(R(df), prom_Ψ, R(logc0)) + InverseWishart{R,typeof(prom_Ψ)}(R(df), prom_Ψ, R(logc0)) end function InverseWishart(df::Real, Ψ::AbstractPDMat) @@ -56,15 +56,15 @@ show(io::IO, d::InverseWishart) = show_multline(io, d, [(:df, d.df), (:Ψ, Matri # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{InverseWishart{T}}, d::InverseWishart) where T<:Real +function convert(::Type{InverseWishart{T}}, d::InverseWishart) where {T<:Real} P = convert(AbstractArray{T}, d.Ψ) - InverseWishart{T, typeof(P)}(T(d.df), P, T(d.logc0)) + InverseWishart{T,typeof(P)}(T(d.df), P, T(d.logc0)) end Base.convert(::Type{InverseWishart{T}}, d::InverseWishart{T}) where {T<:Real} = d -function convert(::Type{InverseWishart{T}}, df, Ψ::AbstractPDMat, logc0) where T<:Real +function convert(::Type{InverseWishart{T}}, df, Ψ::AbstractPDMat, logc0) where {T<:Real} P = convert(AbstractArray{T}, Ψ) - InverseWishart{T, typeof(P)}(T(df), P, T(logc0)) + InverseWishart{T,typeof(P)}(T(df), P, T(logc0)) end # ----------------------------------------------------------------------------- @@ -94,13 +94,15 @@ mode(d::InverseWishart) = d.Ψ * inv(d.df + size(d, 1) + 1.0) function cov(d::InverseWishart, i::Integer, j::Integer, k::Integer, l::Integer) p, ν, Ψ = (size(d, 1), d.df, Matrix(d.Ψ)) ν > p + 3 || throw(ArgumentError("cov only defined for df > dim + 3")) - inv((ν - p)*(ν - p - 3)*(ν - p - 1)^2)*(2Ψ[i,j]*Ψ[k,l] + (ν-p-1)*(Ψ[i,k]*Ψ[j,l] + Ψ[i,l]*Ψ[k,j])) + inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * + (2Ψ[i, j] * Ψ[k, l] + (ν - p - 1) * (Ψ[i, k] * Ψ[j, l] + Ψ[i, l] * Ψ[k, j])) end function var(d::InverseWishart, i::Integer, j::Integer) p, ν, Ψ = (size(d, 1), d.df, Matrix(d.Ψ)) ν > p + 3 || throw(ArgumentError("var only defined for df > dim + 3")) - inv((ν - p)*(ν - p - 3)*(ν - p - 1)^2)*((ν - p + 1)*Ψ[i,j]^2 + (ν - p - 1)*Ψ[i,i]*Ψ[j,j]) + inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * + ((ν - p + 1) * Ψ[i, j]^2 + (ν - p - 1) * Ψ[i, i] * Ψ[j, j]) end # ----------------------------------------------------------------------------- @@ -144,7 +146,7 @@ end function _rand_params(::Type{InverseWishart}, elty, n::Int, p::Int) n == p || throw(ArgumentError("dims must be equal for InverseWishart")) - ν = elty( n + 3 + abs(10randn()) ) - Ψ = (X = 2rand(elty, n, n) .- 1 ; X * X') + ν = elty(n + 3 + abs(10randn())) + Ψ = (X = 2rand(elty, n, n) .- 1; X * X') return ν, Ψ end diff --git a/src/matrix/lkj.jl b/src/matrix/lkj.jl index e56b7888e0..34672ee3a5 100644 --- a/src/matrix/lkj.jl +++ b/src/matrix/lkj.jl @@ -22,7 +22,7 @@ If ``\\eta = 1``, then the LKJ distribution is uniform over if a Cholesky factor of the correlation matrix is desired, it is more efficient to use [`LKJCholesky`](@ref), which avoids factorizing the matrix. """ -struct LKJ{T <: Real, D <: Integer} <: ContinuousMatrixDistribution +struct LKJ{T<:Real,D<:Integer} <: ContinuousMatrixDistribution d::D η::T logc0::T @@ -32,7 +32,7 @@ end # Constructors # ----------------------------------------------------------------------------- -function LKJ(d::Integer, η::Real; check_args::Bool=true) +function LKJ(d::Integer, η::Real; check_args::Bool = true) @check_args( LKJ, (d, d > 0, "matrix dimension must be positive."), @@ -40,7 +40,7 @@ function LKJ(d::Integer, η::Real; check_args::Bool=true) ) logc0 = lkj_logc0(d, η) T = Base.promote_eltype(η, logc0) - LKJ{T, typeof(d)}(d, T(η), T(logc0)) + LKJ{T,typeof(d)}(d, T(η), T(logc0)) end # ----------------------------------------------------------------------------- @@ -53,13 +53,13 @@ show(io::IO, d::LKJ) = show_multline(io, d, [(:d, d.d), (:η, d.η)]) # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{LKJ{T}}, d::LKJ) where T <: Real - LKJ{T, typeof(d.d)}(d.d, T(d.η), T(d.logc0)) +function convert(::Type{LKJ{T}}, d::LKJ) where {T<:Real} + LKJ{T,typeof(d.d)}(d.d, T(d.η), T(d.logc0)) end Base.convert(::Type{LKJ{T}}, d::LKJ{T}) where {T<:Real} = d -function convert(::Type{LKJ{T}}, d::Integer, η, logc0) where T <: Real - LKJ{T, typeof(d)}(d, T(η), T(logc0)) +function convert(::Type{LKJ{T}}, d::Integer, η, logc0) where {T<:Real} + LKJ{T,typeof(d)}(d, T(η), T(logc0)) end # ----------------------------------------------------------------------------- @@ -71,11 +71,12 @@ size(d::LKJ) = (d.d, d.d) rank(d::LKJ) = d.d -insupport(d::LKJ, R::AbstractMatrix) = isreal(R) && size(R) == size(d) && isone(Diagonal(R)) && isposdef(R) +insupport(d::LKJ, R::AbstractMatrix) = + isreal(R) && size(R) == size(d) && isone(Diagonal(R)) && isposdef(R) mean(d::LKJ) = Matrix{partype(d)}(I, d.d, d.d) -function mode(d::LKJ; check_args::Bool=true) +function mode(d::LKJ; check_args::Bool = true) @check_args( LKJ, @setup((_, η) = params(d)), @@ -93,7 +94,7 @@ end params(d::LKJ) = (d.d, d.η) -@inline partype(d::LKJ{T}) where {T <: Real} = T +@inline partype(d::LKJ{T}) where {T<:Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -134,7 +135,7 @@ function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.defa R[1, 2] = 2u - 1 R[2, 1] = R[1, 2] # 2. - for k in 2:d - 1 + for k = 2:d-1 # (a) β -= 0.5 # (b) @@ -147,8 +148,8 @@ function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.defa A = cholesky(R[1:k, 1:k]).L z = A * w # (e) - R[1:k, k + 1] = z - R[k + 1, 1:k] = z' + R[1:k, k+1] = z + R[k+1, 1:k] = z' end # 3. return R @@ -190,30 +191,36 @@ end function lkj_onion_loginvconst(d::Integer, η::Real) # Equation (17) in LKJ (2009 JMA) T = float(Base.promote_typeof(d, η)) - h = T(1//2) + h = T(1 // 2) α = η + h * d - 1 - loginvconst = (2*η + d - 3)*T(logtwo) + (T(logπ) / 4) * (d * (d - 1) - 2) + logbeta(α, α) - (d - 2) * loggamma(η + h * (d - 1)) - for k in 2:(d - 1) + loginvconst = + (2 * η + d - 3) * T(logtwo) + (T(logπ) / 4) * (d * (d - 1) - 2) + logbeta(α, α) - + (d - 2) * loggamma(η + h * (d - 1)) + for k = 2:(d-1) loginvconst += loggamma(η + h * (d - 1 - k)) end return loginvconst end -function lkj_onion_loginvconst_uniform_odd(d::Integer, ::Type{T}) where {T <: Real} +function lkj_onion_loginvconst_uniform_odd(d::Integer, ::Type{T}) where {T<:Real} # Theorem 5 in LKJ (2009 JMA) - h = T(1//2) - loginvconst = (d - 1) * ((d + 1) * (T(logπ) / 4) - (d - 1) * (T(logtwo) / 4) - loggamma(h * (d + 1))) - for k in 2:2:(d - 1) + h = T(1 // 2) + loginvconst = + (d - 1) * + ((d + 1) * (T(logπ) / 4) - (d - 1) * (T(logtwo) / 4) - loggamma(h * (d + 1))) + for k = 2:2:(d-1) loginvconst += loggamma(T(k)) end return loginvconst end -function lkj_onion_loginvconst_uniform_even(d::Integer, ::Type{T}) where {T <: Real} +function lkj_onion_loginvconst_uniform_even(d::Integer, ::Type{T}) where {T<:Real} # Theorem 5 in LKJ (2009 JMA) - h = T(1//2) - loginvconst = d * ((d - 2) * (T(logπ) / 4) + (3 * d - 4) * (T(logtwo) / 4) + loggamma(h * d)) - (d - 1) * loggamma(T(d)) - for k in 2:2:(d - 2) + h = T(1 // 2) + loginvconst = + d * ((d - 2) * (T(logπ) / 4) + (3 * d - 4) * (T(logtwo) / 4) + loggamma(h * d)) - + (d - 1) * loggamma(T(d)) + for k = 2:2:(d-2) loginvconst += loggamma(k) end return loginvconst @@ -223,7 +230,7 @@ function lkj_vine_loginvconst(d::Integer, η::Real) # Equation (16) in LKJ (2009 JMA) expsum = zero(η) betasum = zero(η) - for k in 1:d - 1 + for k = 1:d-1 α = η + 0.5(d - k - 1) expsum += (2η - 2 + d - k) * (d - k) betasum += (d - k) * logbeta(α, α) @@ -236,9 +243,9 @@ function lkj_vine_loginvconst_uniform(d::Integer) # Equation after (16) in LKJ (2009 JMA) expsum = 0.0 betasum = 0.0 - for k in 1:d - 1 + for k = 1:d-1 α = (k + 1) / 2 - expsum += k ^ 2 + expsum += k^2 betasum += k * logbeta(α, α) end loginvconst = expsum * logtwo + betasum @@ -248,8 +255,8 @@ end function lkj_loginvconst_alt(d::Integer, η::Real) # Third line in first proof of Section 3.3 in LKJ (2009 JMA) loginvconst = zero(η) - for k in 1:d - 1 - loginvconst += 0.5k*logπ + loggamma(η + 0.5(d - 1 - k)) - loggamma(η + 0.5(d - 1)) + for k = 1:d-1 + loginvconst += 0.5k * logπ + loggamma(η + 0.5(d - 1 - k)) - loggamma(η + 0.5(d - 1)) end return loginvconst end @@ -257,8 +264,8 @@ end function corr_logvolume(n::Integer) # https://doi.org/10.4169/amer.math.monthly.123.9.909 logvol = 0.0 - for k in 1:n - 1 - logvol += 0.5k*logπ + k*loggamma((k+1)/2) - k*loggamma((k+2)/2) + for k = 1:n-1 + logvol += 0.5k * logπ + k * loggamma((k + 1) / 2) - k * loggamma((k + 2) / 2) end return logvol end diff --git a/src/matrix/matrixbeta.jl b/src/matrix/matrixbeta.jl index 588c818909..1c694c1be9 100644 --- a/src/matrix/matrixbeta.jl +++ b/src/matrix/matrixbeta.jl @@ -25,7 +25,7 @@ are independent, and we use ``\\mathcal{L}(\\cdot)`` to denote the lower Cholesk has ``\\mathbf{U}\\sim \\textrm{MB}_p(n_1/2, n_2/2)``. """ -struct MatrixBeta{T <: Real, TW} <: ContinuousMatrixDistribution +struct MatrixBeta{T<:Real,TW} <: ContinuousMatrixDistribution W1::TW W2::TW logc0::T @@ -42,30 +42,30 @@ function MatrixBeta(p::Int, n1::Real, n2::Real) Ip = ScalMat(p, one(T)) W1 = Wishart(T(n1), Ip) W2 = Wishart(T(n2), Ip) - MatrixBeta{T, typeof(W1)}(W1, W2, T(logc0)) + MatrixBeta{T,typeof(W1)}(W1, W2, T(logc0)) end # ----------------------------------------------------------------------------- # REPL display # ----------------------------------------------------------------------------- - show(io::IO, d::MatrixBeta) = show_multline(io, d, [(:n1, d.W1.df), (:n2, d.W2.df)]) +show(io::IO, d::MatrixBeta) = show_multline(io, d, [(:n1, d.W1.df), (:n2, d.W2.df)]) # ----------------------------------------------------------------------------- # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixBeta{T}}, d::MatrixBeta) where T <: Real +function convert(::Type{MatrixBeta{T}}, d::MatrixBeta) where {T<:Real} W1 = convert(Wishart{T}, d.W1) W2 = convert(Wishart{T}, d.W2) - MatrixBeta{T, typeof(W1)}(W1, W2, T(d.logc0)) + MatrixBeta{T,typeof(W1)}(W1, W2, T(d.logc0)) end Base.convert(::Type{MatrixBeta{T}}, d::MatrixBeta{T}) where {T<:Real} = d -function convert(::Type{MatrixBeta{T}}, W1::Wishart, W2::Wishart, logc0) where T <: Real +function convert(::Type{MatrixBeta{T}}, W1::Wishart, W2::Wishart, logc0) where {T<:Real} WW1 = convert(Wishart{T}, W1) WW2 = convert(Wishart{T}, W2) - MatrixBeta{T, typeof(WW1)}(WW1, WW2, T(logc0)) + MatrixBeta{T,typeof(WW1)}(WW1, WW2, T(logc0)) end # ----------------------------------------------------------------------------- @@ -76,27 +76,31 @@ size(d::MatrixBeta) = size(d.W1) rank(d::MatrixBeta) = size(d, 1) -insupport(d::MatrixBeta, U::AbstractMatrix) = isreal(U) && size(U) == size(d) && isposdef(U) && isposdef(I - U) +insupport(d::MatrixBeta, U::AbstractMatrix) = + isreal(U) && size(U) == size(d) && isposdef(U) && isposdef(I - U) params(d::MatrixBeta) = (size(d, 1), d.W1.df, d.W2.df) mean(d::MatrixBeta) = ((p, n1, n2) = params(d); Matrix((n1 / (n1 + n2)) * I, p, p)) -@inline partype(d::MatrixBeta{T}) where {T <: Real} = T +@inline partype(d::MatrixBeta{T}) where {T<:Real} = T # Konno (1988 JJSS) Corollary 3.3.i function cov(d::MatrixBeta, i::Integer, j::Integer, k::Integer, l::Integer) p, n1, n2 = params(d) n = n1 + n2 Ω = Matrix{partype(d)}(I, p, p) - n1*n2*inv(n*(n - 1)*(n + 2))*(-(2/n)*Ω[i,j]*Ω[k,l] + Ω[j,l]*Ω[i,k] + Ω[i,l]*Ω[k,j]) + n1 * + n2 * + inv(n * (n - 1) * (n + 2)) * + (-(2 / n) * Ω[i, j] * Ω[k, l] + Ω[j, l] * Ω[i, k] + Ω[i, l] * Ω[k, j]) end function var(d::MatrixBeta, i::Integer, j::Integer) p, n1, n2 = params(d) n = n1 + n2 Ω = Matrix{partype(d)}(I, p, p) - n1*n2*inv(n*(n - 1)*(n + 2))*((1 - (2/n))*Ω[i,j]^2 + Ω[j,j]*Ω[i,i]) + n1 * n2 * inv(n * (n - 1) * (n + 2)) * ((1 - (2 / n)) * Ω[i, j]^2 + Ω[j, j] * Ω[i, i]) end # ----------------------------------------------------------------------------- @@ -120,10 +124,10 @@ end # Mitra (1970 Sankhyā) function _rand!(rng::AbstractRNG, d::MatrixBeta, A::AbstractMatrix) - S1 = PDMat( rand(rng, d.W1) ) - S2 = PDMat( rand(rng, d.W2) ) - S = S1 + S2 - invL = Matrix( inv(S.chol.L) ) + S1 = PDMat(rand(rng, d.W1)) + S2 = PDMat(rand(rng, d.W2)) + S = S1 + S2 + invL = Matrix(inv(S.chol.L)) A .= X_A_Xt(S1, invL) end @@ -139,7 +143,7 @@ end function _rand_params(::Type{MatrixBeta}, elty, n::Int, p::Int) n == p || throw(ArgumentError("dims must be equal for MatrixBeta")) - n1 = elty( n + 1 + abs(10randn()) ) - n2 = elty( n + 1 + abs(10randn()) ) + n1 = elty(n + 1 + abs(10randn())) + n2 = elty(n + 1 + abs(10randn())) return n, n1, n2 end diff --git a/src/matrix/matrixfdist.jl b/src/matrix/matrixfdist.jl index 5a7e18efb7..6fa9f64616 100644 --- a/src/matrix/matrixfdist.jl +++ b/src/matrix/matrixfdist.jl @@ -30,7 +30,7 @@ is given by then the marginal distribution of ``\\boldsymbol{\\Sigma}`` is ``\\textrm{MF}_{p}(n_1/2,n_2/2,\\mathbf{B})``. """ -struct MatrixFDist{T <: Real, TW <: Wishart} <: ContinuousMatrixDistribution +struct MatrixFDist{T<:Real,TW<:Wishart} <: ContinuousMatrixDistribution W::TW n2::T logc0::T @@ -42,36 +42,40 @@ end function MatrixFDist(n1::Real, n2::Real, B::AbstractPDMat) p = size(B, 1) - n1 > p - 1 || throw(ArgumentError("first degrees of freedom must be larger than $(p - 1)")) - n2 > p - 1 || throw(ArgumentError("second degrees of freedom must be larger than $(p - 1)")) + n1 > p - 1 || + throw(ArgumentError("first degrees of freedom must be larger than $(p - 1)")) + n2 > p - 1 || + throw(ArgumentError("second degrees of freedom must be larger than $(p - 1)")) logc0 = matrixfdist_logc0(n1, n2, B) T = Base.promote_eltype(n1, n2, logc0, B) prom_B = convert(AbstractArray{T}, B) W = Wishart(T(n1), prom_B) - MatrixFDist{T, typeof(W)}(W, T(n2), T(logc0)) + MatrixFDist{T,typeof(W)}(W, T(n2), T(logc0)) end -MatrixFDist(n1::Real, n2::Real, B::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixFDist(n1, n2, PDMat(B)) +MatrixFDist(n1::Real, n2::Real, B::Union{AbstractMatrix,LinearAlgebra.Cholesky}) = + MatrixFDist(n1, n2, PDMat(B)) # ----------------------------------------------------------------------------- # REPL display # ----------------------------------------------------------------------------- - show(io::IO, d::MatrixFDist) = show_multline(io, d, [(:n1, d.W.df), (:n2, d.n2), (:B, Matrix(d.W.S))]) +show(io::IO, d::MatrixFDist) = + show_multline(io, d, [(:n1, d.W.df), (:n2, d.n2), (:B, Matrix(d.W.S))]) # ----------------------------------------------------------------------------- # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixFDist{T}}, d::MatrixFDist) where T <: Real +function convert(::Type{MatrixFDist{T}}, d::MatrixFDist) where {T<:Real} W = convert(Wishart{T}, d.W) - MatrixFDist{T, typeof(W)}(W, T(d.n2), T(d.logc0)) + MatrixFDist{T,typeof(W)}(W, T(d.n2), T(d.logc0)) end Base.convert(::Type{MatrixFDist{T}}, d::MatrixFDist{T}) where {T<:Real} = d -function convert(::Type{MatrixFDist{T}}, W::Wishart, n2, logc0) where T <: Real +function convert(::Type{MatrixFDist{T}}, W::Wishart, n2, logc0) where {T<:Real} WW = convert(Wishart{T}, W) - MatrixFDist{T, typeof(WW)}(WW, T(n2), T(logc0)) + MatrixFDist{T,typeof(WW)}(WW, T(n2), T(logc0)) end # ----------------------------------------------------------------------------- @@ -82,7 +86,8 @@ size(d::MatrixFDist) = size(d.W) rank(d::MatrixFDist) = size(d, 1) -insupport(d::MatrixFDist, Σ::AbstractMatrix) = isreal(Σ) && size(Σ) == size(d) && isposdef(Σ) +insupport(d::MatrixFDist, Σ::AbstractMatrix) = + isreal(Σ) && size(Σ) == size(d) && isposdef(Σ) params(d::MatrixFDist) = (d.W.df, d.n2, d.W.S) @@ -93,7 +98,7 @@ function mean(d::MatrixFDist) return (n1 / (n2 - p - 1)) * Matrix(B) end -@inline partype(d::MatrixFDist{T}) where {T <: Real} = T +@inline partype(d::MatrixFDist{T}) where {T<:Real} = T # Konno (1988 JJSS) Corollary 2.4.i function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) @@ -102,7 +107,10 @@ function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) n2 > p + 3 || throw(ArgumentError("cov only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) - n1*(n - p - 1)*inv((n2 - p)*(n2 - p - 1)*(n2 - p - 3))*(2inv(n2 - p - 1)*B[i,j]*B[k,l] + B[j,l]*B[i,k] + B[i,l]*B[k,j]) + n1 * + (n - p - 1) * + inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * + (2inv(n2 - p - 1) * B[i, j] * B[k, l] + B[j, l] * B[i, k] + B[i, l] * B[k, j]) end function var(d::MatrixFDist, i::Integer, j::Integer) @@ -111,7 +119,10 @@ function var(d::MatrixFDist, i::Integer, j::Integer) n2 > p + 3 || throw(ArgumentError("var only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) - n1*(n - p - 1)*inv((n2 - p)*(n2 - p - 1)*(n2 - p - 3))*((2inv(n2 - p - 1) + 1)*B[i,j]^2 + B[j,j]*B[i,i]) + n1 * + (n - p - 1) * + inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * + ((2inv(n2 - p - 1) + 1) * B[i, j]^2 + B[j, j] * B[i, i]) end # ----------------------------------------------------------------------------- @@ -138,14 +149,14 @@ end function _rand!(rng::AbstractRNG, d::MatrixFDist, A::AbstractMatrix) Ψ = rand(rng, d.W) - A .= rand(rng, InverseWishart(d.n2, Ψ) ) + A .= rand(rng, InverseWishart(d.n2, Ψ)) end # ----------------------------------------------------------------------------- # Transformation # ----------------------------------------------------------------------------- -inv(d::MatrixFDist) = ( (n1, n2, B) = params(d); MatrixFDist(n2, n1, inv(B)) ) +inv(d::MatrixFDist) = ((n1, n2, B) = params(d); MatrixFDist(n2, n1, inv(B))) # ----------------------------------------------------------------------------- # Test utils @@ -161,8 +172,8 @@ end function _rand_params(::Type{MatrixFDist}, elty, n::Int, p::Int) n == p || throw(ArgumentError("dims must be equal for MatrixFDist")) - n1 = elty( n + 1 + abs(10randn()) ) - n2 = elty( n + 3 + abs(10randn()) ) + n1 = elty(n + 1 + abs(10randn())) + n2 = elty(n + 3 + abs(10randn())) B = (X = 2rand(elty, n, n) .- 1; X * X') return n1, n2, B end diff --git a/src/matrix/matrixnormal.jl b/src/matrix/matrixnormal.jl index 9d480bd69d..38e9c421d0 100644 --- a/src/matrix/matrixnormal.jl +++ b/src/matrix/matrixnormal.jl @@ -17,7 +17,8 @@ f(\\mathbf{X};\\mathbf{M}, \\mathbf{U}, \\mathbf{V}) = \\frac{\\exp\\left( -\\fr ``\\mathbf{X}\\sim \\textrm{MN}_{n,p}(\\mathbf{M},\\mathbf{U},\\mathbf{V})`` if and only if ``\\text{vec}(\\mathbf{X})\\sim \\textrm{N}(\\text{vec}(\\mathbf{M}),\\mathbf{V}\\otimes\\mathbf{U})``. """ -struct MatrixNormal{T <: Real, TM <: AbstractMatrix, TU <: AbstractPDMat, TV <: AbstractPDMat} <: ContinuousMatrixDistribution +struct MatrixNormal{T<:Real,TM<:AbstractMatrix,TU<:AbstractPDMat,TV<:AbstractPDMat} <: + ContinuousMatrixDistribution M::TM U::TU V::TV @@ -28,7 +29,11 @@ end # Constructors # ----------------------------------------------------------------------------- -function MatrixNormal(M::AbstractMatrix{T}, U::AbstractPDMat{T}, V::AbstractPDMat{T}) where T <: Real +function MatrixNormal( + M::AbstractMatrix{T}, + U::AbstractPDMat{T}, + V::AbstractPDMat{T}, +) where {T<:Real} n, p = size(M) n == size(U, 1) || throw(ArgumentError("Number of rows of M must equal dim of U.")) p == size(V, 1) || throw(ArgumentError("Number of columns of M must equal dim of V.")) @@ -37,43 +42,72 @@ function MatrixNormal(M::AbstractMatrix{T}, U::AbstractPDMat{T}, V::AbstractPDMa prom_M = convert(AbstractArray{R}, M) prom_U = convert(AbstractArray{R}, U) prom_V = convert(AbstractArray{R}, V) - MatrixNormal{R, typeof(prom_M), typeof(prom_U), typeof(prom_V)}(prom_M, prom_U, prom_V, R(logc0)) + MatrixNormal{R,typeof(prom_M),typeof(prom_U),typeof(prom_V)}( + prom_M, + prom_U, + prom_V, + R(logc0), + ) end function MatrixNormal(M::AbstractMatrix, U::AbstractPDMat, V::AbstractPDMat) T = Base.promote_eltype(M, U, V) - MatrixNormal(convert(AbstractArray{T}, M), convert(AbstractArray{T}, U), convert(AbstractArray{T}, V)) + MatrixNormal( + convert(AbstractArray{T}, M), + convert(AbstractArray{T}, U), + convert(AbstractArray{T}, V), + ) end -MatrixNormal(M::AbstractMatrix, U::Union{AbstractMatrix, LinearAlgebra.Cholesky}, V::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixNormal(M, PDMat(U), PDMat(V)) -MatrixNormal(M::AbstractMatrix, U::Union{AbstractMatrix, LinearAlgebra.Cholesky}, V::AbstractPDMat) = MatrixNormal(M, PDMat(U), V) -MatrixNormal(M::AbstractMatrix, U::AbstractPDMat, V::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixNormal(M, U, PDMat(V)) - -MatrixNormal(m::Int, n::Int) = MatrixNormal(zeros(m, n), Matrix(1.0I, m, m), Matrix(1.0I, n, n)) +MatrixNormal( + M::AbstractMatrix, + U::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + V::Union{AbstractMatrix,LinearAlgebra.Cholesky}, +) = MatrixNormal(M, PDMat(U), PDMat(V)) +MatrixNormal( + M::AbstractMatrix, + U::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + V::AbstractPDMat, +) = MatrixNormal(M, PDMat(U), V) +MatrixNormal( + M::AbstractMatrix, + U::AbstractPDMat, + V::Union{AbstractMatrix,LinearAlgebra.Cholesky}, +) = MatrixNormal(M, U, PDMat(V)) + +MatrixNormal(m::Int, n::Int) = + MatrixNormal(zeros(m, n), Matrix(1.0I, m, m), Matrix(1.0I, n, n)) # ----------------------------------------------------------------------------- # REPL display # ----------------------------------------------------------------------------- -show(io::IO, d::MatrixNormal) = show_multline(io, d, [(:M, d.M), (:U, Matrix(d.U)), (:V, Matrix(d.V))]) +show(io::IO, d::MatrixNormal) = + show_multline(io, d, [(:M, d.M), (:U, Matrix(d.U)), (:V, Matrix(d.V))]) # ----------------------------------------------------------------------------- # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixNormal{T}}, d::MatrixNormal) where T <: Real +function convert(::Type{MatrixNormal{T}}, d::MatrixNormal) where {T<:Real} MM = convert(AbstractArray{T}, d.M) UU = convert(AbstractArray{T}, d.U) VV = convert(AbstractArray{T}, d.V) - MatrixNormal{T, typeof(MM), typeof(UU), typeof(VV)}(MM, UU, VV, T(d.logc0)) + MatrixNormal{T,typeof(MM),typeof(UU),typeof(VV)}(MM, UU, VV, T(d.logc0)) end Base.convert(::Type{MatrixNormal{T}}, d::MatrixNormal{T}) where {T<:Real} = d -function convert(::Type{MatrixNormal{T}}, M::AbstractMatrix, U::AbstractPDMat, V::AbstractPDMat, logc0) where T <: Real +function convert( + ::Type{MatrixNormal{T}}, + M::AbstractMatrix, + U::AbstractPDMat, + V::AbstractPDMat, + logc0, +) where {T<:Real} MM = convert(AbstractArray{T}, M) UU = convert(AbstractArray{T}, U) VV = convert(AbstractArray{T}, V) - MatrixNormal{T, typeof(MM), typeof(UU), typeof(VV)}(MM, UU, VV, T(logc0)) + MatrixNormal{T,typeof(MM),typeof(UU),typeof(VV)}(MM, UU, VV, T(logc0)) end # ----------------------------------------------------------------------------- @@ -82,7 +116,7 @@ end size(d::MatrixNormal) = size(d.M) -rank(d::MatrixNormal) = minimum( size(d) ) +rank(d::MatrixNormal) = minimum(size(d)) insupport(d::MatrixNormal, X::AbstractMatrix) = isreal(X) && size(X) == size(d) @@ -111,8 +145,8 @@ function matrixnormal_logc0(U::AbstractPDMat, V::AbstractPDMat) end function logkernel(d::MatrixNormal, X::AbstractMatrix) - A = X - d.M - -0.5 * tr( (d.V \ A') * (d.U \ A) ) + A = X - d.M + -0.5 * tr((d.V \ A') * (d.U \ A)) end # ----------------------------------------------------------------------------- @@ -137,13 +171,15 @@ function _univariate(d::MatrixNormal) check_univariate(d) M, U, V = params(d) μ = M[1] - σ = sqrt( Matrix(U)[1] * Matrix(V)[1] ) + σ = sqrt(Matrix(U)[1] * Matrix(V)[1]) return Normal(μ, σ) end function _multivariate(d::MatrixNormal) n, p = size(d) - all([n, p] .> 1) && throw(ArgumentError("Row or col dim of `MatrixNormal` must be 1 to coerce to `MvNormal`")) + all([n, p] .> 1) && throw( + ArgumentError("Row or col dim of `MatrixNormal` must be 1 to coerce to `MvNormal`"), + ) return vec(d) end diff --git a/src/matrix/matrixtdist.jl b/src/matrix/matrixtdist.jl index e37119603c..85beb7fc1b 100644 --- a/src/matrix/matrixtdist.jl +++ b/src/matrix/matrixtdist.jl @@ -35,7 +35,8 @@ is given by then the marginal distribution of ``\\mathbf{X}`` is ``\\textrm{MT}_{n,p}(\\nu,\\mathbf{M},\\boldsymbol{\\Sigma},\\boldsymbol{\\Omega})``. """ -struct MatrixTDist{T <: Real, TM <: AbstractMatrix, TΣ <: AbstractPDMat, TΩ <: AbstractPDMat} <: ContinuousMatrixDistribution +struct MatrixTDist{T<:Real,TM<:AbstractMatrix,TΣ<:AbstractPDMat,TΩ<:AbstractPDMat} <: + ContinuousMatrixDistribution ν::T M::TM Σ::TΣ @@ -47,7 +48,12 @@ end # Constructors # ----------------------------------------------------------------------------- -function MatrixTDist(ν::T, M::AbstractMatrix{T}, Σ::AbstractPDMat{T}, Ω::AbstractPDMat{T}) where T <: Real +function MatrixTDist( + ν::T, + M::AbstractMatrix{T}, + Σ::AbstractPDMat{T}, + Ω::AbstractPDMat{T}, +) where {T<:Real} n, p = size(M) 0 < ν < Inf || throw(ArgumentError("degrees of freedom must be positive and finite.")) n == size(Σ, 1) || throw(ArgumentError("Number of rows of M must equal dim of Σ.")) @@ -57,41 +63,75 @@ function MatrixTDist(ν::T, M::AbstractMatrix{T}, Σ::AbstractPDMat{T}, Ω::Abst prom_M = convert(AbstractArray{R}, M) prom_Σ = convert(AbstractArray{R}, Σ) prom_Ω = convert(AbstractArray{R}, Ω) - MatrixTDist{R, typeof(prom_M), typeof(prom_Σ), typeof(prom_Ω)}(R(ν), prom_M, prom_Σ, prom_Ω, R(logc0)) + MatrixTDist{R,typeof(prom_M),typeof(prom_Σ),typeof(prom_Ω)}( + R(ν), + prom_M, + prom_Σ, + prom_Ω, + R(logc0), + ) end function MatrixTDist(ν::Real, M::AbstractMatrix, Σ::AbstractPDMat, Ω::AbstractPDMat) T = Base.promote_eltype(ν, M, Σ, Ω) - MatrixTDist(convert(T, ν), convert(AbstractArray{T}, M), convert(AbstractArray{T}, Σ), convert(AbstractArray{T}, Ω)) + MatrixTDist( + convert(T, ν), + convert(AbstractArray{T}, M), + convert(AbstractArray{T}, Σ), + convert(AbstractArray{T}, Ω), + ) end -MatrixTDist(ν::Real, M::AbstractMatrix, Σ::Union{AbstractMatrix, LinearAlgebra.Cholesky}, Ω::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixTDist(ν, M, PDMat(Σ), PDMat(Ω)) -MatrixTDist(ν::Real, M::AbstractMatrix, Σ::AbstractPDMat, Ω::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixTDist(ν, M, Σ, PDMat(Ω)) -MatrixTDist(ν::Real, M::AbstractMatrix, Σ::Union{AbstractMatrix, LinearAlgebra.Cholesky}, Ω::AbstractPDMat) = MatrixTDist(ν, M, PDMat(Σ), Ω) +MatrixTDist( + ν::Real, + M::AbstractMatrix, + Σ::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + Ω::Union{AbstractMatrix,LinearAlgebra.Cholesky}, +) = MatrixTDist(ν, M, PDMat(Σ), PDMat(Ω)) +MatrixTDist( + ν::Real, + M::AbstractMatrix, + Σ::AbstractPDMat, + Ω::Union{AbstractMatrix,LinearAlgebra.Cholesky}, +) = MatrixTDist(ν, M, Σ, PDMat(Ω)) +MatrixTDist( + ν::Real, + M::AbstractMatrix, + Σ::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + Ω::AbstractPDMat, +) = MatrixTDist(ν, M, PDMat(Σ), Ω) # ----------------------------------------------------------------------------- # REPL display # ----------------------------------------------------------------------------- - show(io::IO, d::MatrixTDist) = show_multline(io, d, [(:ν, d.ν), (:M, d.M), (:Σ, Matrix(d.Σ)), (:Ω, Matrix(d.Ω))]) +show(io::IO, d::MatrixTDist) = + show_multline(io, d, [(:ν, d.ν), (:M, d.M), (:Σ, Matrix(d.Σ)), (:Ω, Matrix(d.Ω))]) # ----------------------------------------------------------------------------- # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixTDist{T}}, d::MatrixTDist) where T <: Real +function convert(::Type{MatrixTDist{T}}, d::MatrixTDist) where {T<:Real} MM = convert(AbstractArray{T}, d.M) ΣΣ = convert(AbstractArray{T}, d.Σ) ΩΩ = convert(AbstractArray{T}, d.Ω) - MatrixTDist{T, typeof(MM), typeof(ΣΣ), typeof(ΩΩ)}(T(d.ν), MM, ΣΣ, ΩΩ, T(d.logc0)) + MatrixTDist{T,typeof(MM),typeof(ΣΣ),typeof(ΩΩ)}(T(d.ν), MM, ΣΣ, ΩΩ, T(d.logc0)) end Base.convert(::Type{MatrixTDist{T}}, d::MatrixTDist{T}) where {T<:Real} = d -function convert(::Type{MatrixTDist{T}}, ν, M::AbstractMatrix, Σ::AbstractPDMat, Ω::AbstractPDMat, logc0) where T <: Real +function convert( + ::Type{MatrixTDist{T}}, + ν, + M::AbstractMatrix, + Σ::AbstractPDMat, + Ω::AbstractPDMat, + logc0, +) where {T<:Real} MM = convert(AbstractArray{T}, M) ΣΣ = convert(AbstractArray{T}, Σ) ΩΩ = convert(AbstractArray{T}, Ω) - MatrixTDist{T, typeof(MM), typeof(ΣΣ), typeof(ΩΩ)}(T(ν), MM, ΣΣ, ΩΩ, T(logc0)) + MatrixTDist{T,typeof(MM),typeof(ΣΣ),typeof(ΩΩ)}(T(ν), MM, ΣΣ, ΩΩ, T(logc0)) end # ----------------------------------------------------------------------------- @@ -100,7 +140,7 @@ end size(d::MatrixTDist) = size(d.M) -rank(d::MatrixTDist) = minimum( size(d) ) +rank(d::MatrixTDist) = minimum(size(d)) insupport(d::MatrixTDist, X::Matrix) = isreal(X) && size(X) == size(d) @@ -112,15 +152,19 @@ end mode(d::MatrixTDist) = d.M -cov(d::MatrixTDist) = d.ν <= 2 ? throw(ArgumentError("cov only defined for df > 2")) : Matrix(kron(d.Ω, d.Σ)) ./ (d.ν - 2) +cov(d::MatrixTDist) = + d.ν <= 2 ? throw(ArgumentError("cov only defined for df > 2")) : + Matrix(kron(d.Ω, d.Σ)) ./ (d.ν - 2) cov(d::MatrixTDist, ::Val{false}) = ((n, p) = size(d); reshape(cov(d), n, p, n, p)) -var(d::MatrixTDist) = d.ν <= 2 ? throw(ArgumentError("var only defined for df > 2")) : reshape(diag(cov(d)), size(d)) +var(d::MatrixTDist) = + d.ν <= 2 ? throw(ArgumentError("var only defined for df > 2")) : + reshape(diag(cov(d)), size(d)) params(d::MatrixTDist) = (d.ν, d.M, d.Σ, d.Ω) -@inline partype(d::MatrixTDist{T}) where {T <: Real} = T +@inline partype(d::MatrixTDist{T}) where {T<:Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -131,8 +175,8 @@ function matrixtdist_logc0(Σ::AbstractPDMat, Ω::AbstractPDMat, ν::Real) n = size(Σ, 1) p = size(Ω, 1) term1 = logmvgamma(p, (ν + n + p - 1) / 2) - term2 = - (n * p / 2) * logπ - term3 = - logmvgamma(p, (ν + p - 1) / 2) + term2 = -(n * p / 2) * logπ + term3 = -logmvgamma(p, (ν + p - 1) / 2) term4 = (-n / 2) * logdet(Ω) term5 = (-p / 2) * logdet(Σ) term1 + term2 + term3 + term4 + term5 @@ -141,7 +185,7 @@ end function logkernel(d::MatrixTDist, X::AbstractMatrix) n, p = size(d) A = X - d.M - (-(d.ν + n + p - 1) / 2) * logdet( I + (d.Σ \ A) * (d.Ω \ A') ) + (-(d.ν + n + p - 1) / 2) * logdet(I + (d.Σ \ A) * (d.Ω \ A')) end # ----------------------------------------------------------------------------- @@ -152,8 +196,8 @@ end function _rand!(rng::AbstractRNG, d::MatrixTDist, A::AbstractMatrix) n, p = size(d) - S = rand(rng, InverseWishart(d.ν + n - 1, d.Σ) ) - A .= rand(rng, MatrixNormal(d.M, S, d.Ω) ) + S = rand(rng, InverseWishart(d.ν + n - 1, d.Σ)) + A .= rand(rng, MatrixNormal(d.M, S, d.Ω)) end # ----------------------------------------------------------------------------- @@ -165,7 +209,9 @@ end function MvTDist(MT::MatrixTDist) n, p = size(MT) - all([n, p] .> 1) && throw(ArgumentError("Row or col dim of `MatrixTDist` must be 1 to coerce to `MvTDist`")) + all([n, p] .> 1) && throw( + ArgumentError("Row or col dim of `MatrixTDist` must be 1 to coerce to `MvTDist`"), + ) ν, M, Σ, Ω = params(MT) MvTDist(ν, vec(M), (1 / ν) * kron(Σ, Ω)) end @@ -178,14 +224,14 @@ function _univariate(d::MatrixTDist) check_univariate(d) ν, M, Σ, Ω = params(d) μ = M[1] - σ = sqrt( Matrix(Σ)[1] * Matrix(Ω)[1] / ν ) + σ = sqrt(Matrix(Σ)[1] * Matrix(Ω)[1] / ν) return AffineDistribution(μ, σ, TDist(ν)) end _multivariate(d::MatrixTDist) = MvTDist(d) function _rand_params(::Type{MatrixTDist}, elty, n::Int, p::Int) - ν = elty( n + p + 1 + abs(10randn()) ) + ν = elty(n + p + 1 + abs(10randn())) M = randn(elty, n, p) Σ = (X = 2rand(elty, n, n) .- 1; X * X') Ω = (Y = 2rand(elty, p, p) .- 1; Y * Y') diff --git a/src/matrix/wishart.jl b/src/matrix/wishart.jl index e8da450060..68c1127802 100644 --- a/src/matrix/wishart.jl +++ b/src/matrix/wishart.jl @@ -30,7 +30,7 @@ has ``\\mathbf{H}\\sim \\textrm{W}_p(\\nu, \\mathbf{S})``. For non-integer ``\\nu``, Wishart matrices can be generated via the [Bartlett decomposition](https://en.wikipedia.org/wiki/Wishart_distribution#Bartlett_decomposition). """ -struct Wishart{T<:Real, ST<:AbstractPDMat, R<:Integer} <: ContinuousMatrixDistribution +struct Wishart{T<:Real,ST<:AbstractPDMat,R<:Integer} <: ContinuousMatrixDistribution df::T # degree of freedom S::ST # the scale matrix logc0::T # the logarithm of normalizing constant in pdf @@ -42,19 +42,21 @@ end # Constructors # ----------------------------------------------------------------------------- -function Wishart(df::T, S::AbstractPDMat{T}) where T<:Real +function Wishart(df::T, S::AbstractPDMat{T}) where {T<:Real} df > 0 || throw(ArgumentError("df must be positive. got $(df).")) p = size(S, 1) singular = df <= p - 1 if singular isinteger(df) || throw( - ArgumentError("df of a singular Wishart distribution must be an integer (got $df)") + ArgumentError( + "df of a singular Wishart distribution must be an integer (got $df)", + ), ) end rnk::Integer = ifelse(singular, df, p) logc0 = wishart_logc0(df, S, rnk) _df, _logc0 = promote(df, logc0) - Wishart{typeof(_df), typeof(S), typeof(rnk)}(_df, S, _logc0, rnk, singular) + Wishart{typeof(_df),typeof(S),typeof(rnk)}(_df, S, _logc0, rnk, singular) end function Wishart(df::Real, S::AbstractPDMat) @@ -75,15 +77,22 @@ show(io::IO, d::Wishart) = show_multline(io, d, [(:df, d.df), (:S, d.S)]) # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{Wishart{T}}, d::Wishart) where T<:Real +function convert(::Type{Wishart{T}}, d::Wishart) where {T<:Real} P = convert(AbstractArray{T}, d.S) - Wishart{T, typeof(P), typeof(d.rank)}(T(d.df), P, T(d.logc0), d.rank, d.singular) + Wishart{T,typeof(P),typeof(d.rank)}(T(d.df), P, T(d.logc0), d.rank, d.singular) end Base.convert(::Type{Wishart{T}}, d::Wishart{T}) where {T<:Real} = d -function convert(::Type{Wishart{T}}, df, S::AbstractPDMat, logc0, rnk, singular) where T<:Real +function convert( + ::Type{Wishart{T}}, + df, + S::AbstractPDMat, + logc0, + rnk, + singular, +) where {T<:Real} P = convert(AbstractArray{T}, S) - Wishart{T, typeof(P), typeof(rnk)}(T(df), P, T(logc0), rnk, singular) + Wishart{T,typeof(P),typeof(rnk)}(T(df), P, T(logc0), rnk, singular) end # ----------------------------------------------------------------------------- @@ -119,7 +128,7 @@ function meanlogdet(d::Wishart) p = size(d, 1) v = logdet_S + p * oftype(logdet_S, logtwo) df = oftype(logdet_S, d.df) - for i in 0:(p - 1) + for i = 0:(p-1) v += digamma((df - i) / 2) end return d.singular ? oftype(v, -Inf) : v @@ -140,7 +149,7 @@ end function var(d::Wishart, i::Integer, j::Integer) S = d.S - return d.df * (S[i, i] * S[j, j] + S[i, j] ^ 2) + return d.df * (S[i, i] * S[j, j] + S[i, j]^2) end # ----------------------------------------------------------------------------- @@ -165,16 +174,22 @@ function logkernel(d::Wishart, X::AbstractMatrix) end # Singular Wishart pdf: Theorem 6 in Uhlig (1994 AoS) -function singular_wishart_logc0(p::Integer, df::T, S::AbstractPDMat{T}, rnk::Integer) where {T<:Real} +function singular_wishart_logc0( + p::Integer, + df::T, + S::AbstractPDMat{T}, + rnk::Integer, +) where {T<:Real} logdet_S = logdet(S) h_df = oftype(logdet_S, df) / 2 - return -h_df * (logdet_S + p * oftype(logdet_S, logtwo)) - logmvgamma(rnk, h_df) + ((rnk * (rnk - p)) // 2) * oftype(logdet_S, logπ) + return -h_df * (logdet_S + p * oftype(logdet_S, logtwo)) - logmvgamma(rnk, h_df) + + ((rnk * (rnk - p)) // 2) * oftype(logdet_S, logπ) end function singular_wishart_logkernel(d::Wishart, X::AbstractMatrix) p = size(d, 1) r = rank(d) - L = eigvals(Hermitian(X), (p - r + 1):p) + L = eigvals(Hermitian(X), (p-r+1):p) return ((d.df - (p + 1)) * sum(log, L) - tr(d.S \ X)) / 2 end @@ -198,7 +213,7 @@ function _rand!(rng::AbstractRNG, d::Wishart, A::AbstractMatrix) axes2 = axes(A, 2) r = rank(d) randn!(rng, view(A, :, axes2[1:r])) - fill!(view(A, :, axes2[(r + 1):end]), zero(eltype(A))) + fill!(view(A, :, axes2[(r+1):end]), zero(eltype(A))) else _wishart_genA!(rng, A, d.df) end diff --git a/src/matrixvariates.jl b/src/matrixvariates.jl index 2e3ed72d5c..e17d96fab0 100644 --- a/src/matrixvariates.jl +++ b/src/matrixvariates.jl @@ -44,7 +44,7 @@ mean(d::MatrixDistribution) Compute the matrix of element-wise variances for distribution `d`. """ -var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i in 1:n, j in 1:p]) +var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i = 1:n, j = 1:p]) """ cov(d::MatrixDistribution) @@ -73,7 +73,7 @@ Compute the 4-dimensional array whose `(i, j, k, l)` element is `cov(X[i,j], X[k """ function cov(d::MatrixDistribution, ::Val{false}) n, p = size(d) - [cov(d, i, j, k, l) for i in 1:n, j in 1:p, k in 1:n, l in 1:p] + [cov(d, i, j, k, l) for i = 1:n, j = 1:p, k = 1:n, l = 1:p] end # pdf & logpdf @@ -83,11 +83,19 @@ _logpdf(d::MatrixDistribution, X::AbstractMatrix{<:Real}) = logkernel(d, X) + d. # for testing is_univariate(d::MatrixDistribution) = size(d) == (1, 1) -check_univariate(d::MatrixDistribution) = is_univariate(d) || throw(ArgumentError("not 1 x 1")) +check_univariate(d::MatrixDistribution) = + is_univariate(d) || throw(ArgumentError("not 1 x 1")) ##### Specific distributions ##### -for fname in ["wishart.jl", "inversewishart.jl", "matrixnormal.jl", - "matrixtdist.jl", "matrixbeta.jl", "matrixfdist.jl", "lkj.jl"] +for fname in [ + "wishart.jl", + "inversewishart.jl", + "matrixnormal.jl", + "matrixtdist.jl", + "matrixbeta.jl", + "matrixfdist.jl", + "lkj.jl", +] include(joinpath("matrix", fname)) end diff --git a/src/mixtures/mixturemodel.jl b/src/mixtures/mixturemodel.jl index 7f36642900..fb610782fb 100644 --- a/src/mixtures/mixturemodel.jl +++ b/src/mixtures/mixturemodel.jl @@ -11,7 +11,8 @@ All subtypes of `AbstractMixtureModel` should implement the following methods: - `probs(d)`: return a vector of prior probabilities over components. """ -abstract type AbstractMixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution} <: Distribution{VF, VS} end +abstract type AbstractMixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution} <: + Distribution{VF,VS} end """ MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Real} @@ -21,7 +22,8 @@ A mixture of distributions, parametrized on: * `C` distribution family of the mixture * `CT` the type for probabilities of the prior """ -struct MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Categorical} <: AbstractMixtureModel{VF,VS,C} +struct MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Categorical} <: + AbstractMixtureModel{VF,VS,C} components::Vector{C} prior::CT @@ -32,9 +34,12 @@ struct MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Categor end end -const UnivariateMixture{S<:ValueSupport, C<:Distribution} = AbstractMixtureModel{Univariate,S,C} -const MultivariateMixture{S<:ValueSupport, C<:Distribution} = AbstractMixtureModel{Multivariate,S,C} -const MatrixvariateMixture{S<:ValueSupport,C<:Distribution} = AbstractMixtureModel{Matrixvariate,S,C} +const UnivariateMixture{S<:ValueSupport,C<:Distribution} = + AbstractMixtureModel{Univariate,S,C} +const MultivariateMixture{S<:ValueSupport,C<:Distribution} = + AbstractMixtureModel{Multivariate,S,C} +const MatrixvariateMixture{S<:ValueSupport,C<:Distribution} = + AbstractMixtureModel{Matrixvariate,S,C} # Interface @@ -50,7 +55,7 @@ component_type(d::AbstractMixtureModel{VF,VS,C}) where {VF,VS,C} = C Get a list of components of the mixture model `d`. """ -components(d::AbstractMixtureModel) = [component(d, k) for k in 1:ncomponents(d)] +components(d::AbstractMixtureModel) = [component(d, k) for k = 1:ncomponents(d)] """ probs(d::AbstractMixtureModel) @@ -126,24 +131,31 @@ Construct a mixture model with component type ``C``, a vector of parameters for the components given by ``params``, and a prior probability vector. If no `prior` is provided then all components will have the same prior probabilities. """ -function MixtureModel(::Type{C}, params::AbstractArray) where C<:Distribution +function MixtureModel(::Type{C}, params::AbstractArray) where {C<:Distribution} components = C[_construct_component(C, a) for a in params] MixtureModel(components) end -function MixtureModel(components::Vector{C}, prior::Categorical) where C<:Distribution +function MixtureModel(components::Vector{C}, prior::Categorical) where {C<:Distribution} VF = variate_form(C) VS = value_support(C) MixtureModel{VF,VS,C}(components, prior) end -MixtureModel(components::Vector{C}, p::VT) where {C<:Distribution,VT<:AbstractVector{<:Real}} = +MixtureModel( + components::Vector{C}, + p::VT, +) where {C<:Distribution,VT<:AbstractVector{<:Real}} = MixtureModel(components, Categorical(p)) _construct_component(::Type{C}, arg) where {C<:Distribution} = C(arg) _construct_component(::Type{C}, args::Tuple) where {C<:Distribution} = C(args...) -function MixtureModel(::Type{C}, params::AbstractArray, p::Vector{T}) where {C<:Distribution,T<:Real} +function MixtureModel( + ::Type{C}, + params::AbstractArray, + p::Vector{T}, +) where {C<:Distribution,T<:Real} components = C[_construct_component(C, a) for a in params] MixtureModel(components, p) end @@ -230,7 +242,7 @@ function cov(d::MultivariateMixture) p = probs(d) m = zeros(length(d)) md = zeros(length(d)) - V = zeros(length(d),length(d)) + V = zeros(length(d), length(d)) for i = 1:K pi = p[i] @@ -311,7 +323,9 @@ end function _mixlogpdf1(d::AbstractMixtureModel, x) p = probs(d) - lp = logsumexp(log(pi) + logpdf(component(d, i), x) for (i, pi) in enumerate(p) if !iszero(pi)) + lp = logsumexp( + log(pi) + logpdf(component(d, i), x) for (i, pi) in enumerate(p) if !iszero(pi) + ) return lp end @@ -364,14 +378,19 @@ end pdf(d::UnivariateMixture, x::Real) = _mixpdf1(d, x) logpdf(d::UnivariateMixture, x::Real) = _mixlogpdf1(d, x) -_pdf!(r::AbstractArray{<:Real}, d::UnivariateMixture{Discrete}, x::UnitRange) = _mixpdf!(r, d, x) -_pdf!(r::AbstractArray{<:Real}, d::UnivariateMixture, x::AbstractArray{<:Real}) = _mixpdf!(r, d, x) -_logpdf!(r::AbstractArray{<:Real}, d::UnivariateMixture, x::AbstractArray{<:Real}) = _mixlogpdf!(r, d, x) +_pdf!(r::AbstractArray{<:Real}, d::UnivariateMixture{Discrete}, x::UnitRange) = + _mixpdf!(r, d, x) +_pdf!(r::AbstractArray{<:Real}, d::UnivariateMixture, x::AbstractArray{<:Real}) = + _mixpdf!(r, d, x) +_logpdf!(r::AbstractArray{<:Real}, d::UnivariateMixture, x::AbstractArray{<:Real}) = + _mixlogpdf!(r, d, x) _pdf(d::MultivariateMixture, x::AbstractVector{<:Real}) = _mixpdf1(d, x) _logpdf(d::MultivariateMixture, x::AbstractVector{<:Real}) = _mixlogpdf1(d, x) -_pdf!(r::AbstractArray{<:Real}, d::MultivariateMixture, x::AbstractMatrix{<:Real}) = _mixpdf!(r, d, x) -_logpdf!(r::AbstractArray{<:Real}, d::MultivariateMixture, x::AbstractMatrix{<:Real}) = _mixlogpdf!(r, d, x) +_pdf!(r::AbstractArray{<:Real}, d::MultivariateMixture, x::AbstractMatrix{<:Real}) = + _mixpdf!(r, d, x) +_logpdf!(r::AbstractArray{<:Real}, d::MultivariateMixture, x::AbstractMatrix{<:Real}) = + _mixlogpdf!(r, d, x) ## component-wise pdf and logpdf @@ -400,9 +419,9 @@ function _cwise_pdf!(r::AbstractMatrix, d::AbstractMixtureModel, X) size(r) == (n, K) || error("The size of r is incorrect.") for i = 1:K if d isa UnivariateMixture - view(r,:,i) .= Base.Fix1(pdf, component(d, i)).(X) + view(r, :, i) .= Base.Fix1(pdf, component(d, i)).(X) else - pdf!(view(r,:,i),component(d, i), X) + pdf!(view(r, :, i), component(d, i), X) end end r @@ -414,38 +433,54 @@ function _cwise_logpdf!(r::AbstractMatrix, d::AbstractMixtureModel, X) size(r) == (n, K) || error("The size of r is incorrect.") for i = 1:K if d isa UnivariateMixture - view(r,:,i) .= Base.Fix1(logpdf, component(d, i)).(X) + view(r, :, i) .= Base.Fix1(logpdf, component(d, i)).(X) else - logpdf!(view(r,:,i), component(d, i), X) + logpdf!(view(r, :, i), component(d, i), X) end end r end componentwise_pdf!(r::AbstractVector, d::UnivariateMixture, x::Real) = _cwise_pdf1!(r, d, x) -componentwise_pdf!(r::AbstractVector, d::MultivariateMixture, x::AbstractVector) = _cwise_pdf1!(r, d, x) -componentwise_pdf!(r::AbstractMatrix, d::UnivariateMixture, x::AbstractVector) = _cwise_pdf!(r, d, x) -componentwise_pdf!(r::AbstractMatrix, d::MultivariateMixture, x::AbstractMatrix) = _cwise_pdf!(r, d, x) - -componentwise_logpdf!(r::AbstractVector, d::UnivariateMixture, x::Real) = _cwise_logpdf1!(r, d, x) -componentwise_logpdf!(r::AbstractVector, d::MultivariateMixture, x::AbstractVector) = _cwise_logpdf1!(r, d, x) -componentwise_logpdf!(r::AbstractMatrix, d::UnivariateMixture, x::AbstractVector) = _cwise_logpdf!(r, d, x) -componentwise_logpdf!(r::AbstractMatrix, d::MultivariateMixture, x::AbstractMatrix) = _cwise_logpdf!(r, d, x) - -componentwise_pdf(d::UnivariateMixture, x::Real) = componentwise_pdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) -componentwise_pdf(d::UnivariateMixture, x::AbstractVector) = componentwise_pdf!(Matrix{eltype(x)}(undef, length(x), ncomponents(d)), d, x) -componentwise_pdf(d::MultivariateMixture, x::AbstractVector) = componentwise_pdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) -componentwise_pdf(d::MultivariateMixture, x::AbstractMatrix) = componentwise_pdf!(Matrix{eltype(x)}(undef, size(x,2), ncomponents(d)), d, x) - -componentwise_logpdf(d::UnivariateMixture, x::Real) = componentwise_logpdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) -componentwise_logpdf(d::UnivariateMixture, x::AbstractVector) = componentwise_logpdf!(Matrix{eltype(x)}(undef, length(x), ncomponents(d)), d, x) -componentwise_logpdf(d::MultivariateMixture, x::AbstractVector) = componentwise_logpdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) -componentwise_logpdf(d::MultivariateMixture, x::AbstractMatrix) = componentwise_logpdf!(Matrix{eltype(x)}(undef, size(x,2), ncomponents(d)), d, x) +componentwise_pdf!(r::AbstractVector, d::MultivariateMixture, x::AbstractVector) = + _cwise_pdf1!(r, d, x) +componentwise_pdf!(r::AbstractMatrix, d::UnivariateMixture, x::AbstractVector) = + _cwise_pdf!(r, d, x) +componentwise_pdf!(r::AbstractMatrix, d::MultivariateMixture, x::AbstractMatrix) = + _cwise_pdf!(r, d, x) + +componentwise_logpdf!(r::AbstractVector, d::UnivariateMixture, x::Real) = + _cwise_logpdf1!(r, d, x) +componentwise_logpdf!(r::AbstractVector, d::MultivariateMixture, x::AbstractVector) = + _cwise_logpdf1!(r, d, x) +componentwise_logpdf!(r::AbstractMatrix, d::UnivariateMixture, x::AbstractVector) = + _cwise_logpdf!(r, d, x) +componentwise_logpdf!(r::AbstractMatrix, d::MultivariateMixture, x::AbstractMatrix) = + _cwise_logpdf!(r, d, x) + +componentwise_pdf(d::UnivariateMixture, x::Real) = + componentwise_pdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) +componentwise_pdf(d::UnivariateMixture, x::AbstractVector) = + componentwise_pdf!(Matrix{eltype(x)}(undef, length(x), ncomponents(d)), d, x) +componentwise_pdf(d::MultivariateMixture, x::AbstractVector) = + componentwise_pdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) +componentwise_pdf(d::MultivariateMixture, x::AbstractMatrix) = + componentwise_pdf!(Matrix{eltype(x)}(undef, size(x, 2), ncomponents(d)), d, x) + +componentwise_logpdf(d::UnivariateMixture, x::Real) = + componentwise_logpdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) +componentwise_logpdf(d::UnivariateMixture, x::AbstractVector) = + componentwise_logpdf!(Matrix{eltype(x)}(undef, length(x), ncomponents(d)), d, x) +componentwise_logpdf(d::MultivariateMixture, x::AbstractVector) = + componentwise_logpdf!(Vector{eltype(x)}(undef, ncomponents(d)), d, x) +componentwise_logpdf(d::MultivariateMixture, x::AbstractMatrix) = + componentwise_logpdf!(Matrix{eltype(x)}(undef, size(x, 2), ncomponents(d)), d, x) function quantile(d::UnivariateMixture{Continuous}, p::Real) ps = probs(d) - min_q, max_q = extrema(quantile(component(d, i), p) for (i, pi) in enumerate(ps) if pi > 0) + min_q, max_q = + extrema(quantile(component(d, i), p) for (i, pi) in enumerate(ps) if pi > 0) quantile_bisect(d, p, min_q, max_q) end @@ -454,7 +489,7 @@ end function median(d::UnivariateMixture{Continuous}) ps = probs(d) min_q, max_q = extrema(median(component(d, i)) for (i, pi) in enumerate(ps) if pi > 0) - quantile_bisect(d, 1//2, min_q, max_q) + quantile_bisect(d, 1 // 2, min_q, max_q) end ## Sampling diff --git a/src/mixtures/unigmm.jl b/src/mixtures/unigmm.jl index f1d808ec18..355f6f9cae 100644 --- a/src/mixtures/unigmm.jl +++ b/src/mixtures/unigmm.jl @@ -1,16 +1,25 @@ # Univariate Gaussian Mixture Models -struct UnivariateGMM{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real},C<:Categorical} <: UnivariateMixture{Continuous,Normal} +struct UnivariateGMM{ + VT1<:AbstractVector{<:Real}, + VT2<:AbstractVector{<:Real}, + C<:Categorical, +} <: UnivariateMixture{Continuous,Normal} K::Int means::VT1 stds::VT2 prior::C - function UnivariateGMM(ms::VT1, ss::VT2, pri::C) where {VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real},C<:Categorical} + function UnivariateGMM( + ms::VT1, + ss::VT2, + pri::C, + ) where {VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real},C<:Categorical} K = length(ms) length(ss) == K || throw(DimensionMismatch()) - ncategories(pri) == K || - error("The number of categories in pri should be equal to the number of components.") + ncategories(pri) == K || error( + "The number of categories in pri should be equal to the number of components.", + ) new{VT1,VT2,C}(K, ms, ss, pri) end end @@ -32,7 +41,8 @@ rand(rng::AbstractRNG, d::UnivariateGMM) = params(d::UnivariateGMM) = (d.means, d.stds, d.prior) -struct UnivariateGMMSampler{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real}} <: Sampleable{Univariate,Continuous} +struct UnivariateGMMSampler{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real}} <: + Sampleable{Univariate,Continuous} means::VT1 stds::VT2 psampler::AliasTable diff --git a/src/multivariate/dirichlet.jl b/src/multivariate/dirichlet.jl index b24980ec98..7bb749fd1f 100644 --- a/src/multivariate/dirichlet.jl +++ b/src/multivariate/dirichlet.jl @@ -20,12 +20,13 @@ Dirichlet(alpha) # Dirichlet distribution with parameter vector alpha Dirichlet(k, a) # Dirichlet distribution with parameter a * ones(k) ``` """ -struct Dirichlet{T<:Real,Ts<:AbstractVector{T},S<:Real} <: ContinuousMultivariateDistribution +struct Dirichlet{T<:Real,Ts<:AbstractVector{T},S<:Real} <: + ContinuousMultivariateDistribution alpha::Ts alpha0::T lmnB::S - function Dirichlet{T}(alpha::AbstractVector{T}; check_args::Bool=true) where T + function Dirichlet{T}(alpha::AbstractVector{T}; check_args::Bool = true) where {T} @check_args( Dirichlet, (alpha, all(x -> x > zero(x), alpha), "alpha must be a positive vector."), @@ -36,12 +37,12 @@ struct Dirichlet{T<:Real,Ts<:AbstractVector{T},S<:Real} <: ContinuousMultivariat end end -function Dirichlet(alpha::AbstractVector{T}; check_args::Bool=true) where {T<:Real} - Dirichlet{T}(alpha; check_args=check_args) +function Dirichlet(alpha::AbstractVector{T}; check_args::Bool = true) where {T<:Real} + Dirichlet{T}(alpha; check_args = check_args) end -function Dirichlet(d::Integer, alpha::Real; check_args::Bool=true) +function Dirichlet(d::Integer, alpha::Real; check_args::Bool = true) @check_args Dirichlet (d, d > zero(d)) (alpha, alpha > zero(alpha)) - return Dirichlet{typeof(alpha)}(Fill(alpha, d); check_args=false) + return Dirichlet{typeof(alpha)}(Fill(alpha, d); check_args = false) end struct DirichletCanon{T<:Real,Ts<:AbstractVector{T}} @@ -61,8 +62,7 @@ convert(::Type{Dirichlet{T}}, d::Dirichlet{<:Real}) where {T<:Real} = Dirichlet(convert(AbstractVector{T}, d.alpha)) convert(::Type{Dirichlet{T}}, cf::DirichletCanon{T}) where {T<:Real} = Dirichlet(cf.alpha) -convert(::Type{Dirichlet{T}}, alpha::AbstractVector{T}) where {T<:Real} = - Dirichlet(alpha) +convert(::Type{Dirichlet{T}}, alpha::AbstractVector{T}) where {T<:Real} = Dirichlet(alpha) convert(::Type{Dirichlet{T}}, d::Dirichlet{T}) where {T<:Real} = d Base.show(io::IO, d::Dirichlet) = show(io, d, (:alpha,)) @@ -94,12 +94,12 @@ function cov(d::Dirichlet) for j = 1:k αj = α[j] αjc = αj * c - for i in 1:(j-1) - @inbounds C[i,j] = C[j,i] + for i = 1:(j-1) + @inbounds C[i, j] = C[j, i] end - @inbounds C[j,j] = (α0 - αj) * αjc - for i in (j+1):k - @inbounds C[i,j] = - α[i] * αjc + @inbounds C[j, j] = (α0 - αj) * αjc + for i = (j+1):k + @inbounds C[i, j] = -α[i] * αjc end end @@ -154,18 +154,22 @@ end # sampling -function _rand!(rng::AbstractRNG, - d::Union{Dirichlet,DirichletCanon}, - x::AbstractVector{<:Real}) +function _rand!( + rng::AbstractRNG, + d::Union{Dirichlet,DirichletCanon}, + x::AbstractVector{<:Real}, +) for (i, αi) in zip(eachindex(x), d.alpha) @inbounds x[i] = rand(rng, Gamma(αi)) end lmul!(inv(sum(x)), x) # this returns x end -function _rand!(rng::AbstractRNG, - d::Dirichlet{T,<:FillArrays.AbstractFill{T}}, - x::AbstractVector{<:Real}) where {T<:Real} +function _rand!( + rng::AbstractRNG, + d::Dirichlet{T,<:FillArrays.AbstractFill{T}}, + x::AbstractVector{<:Real}, +) where {T<:Real} rand!(rng, Gamma(FillArrays.getindex_value(d.alpha)), x) lmul!(inv(sum(x)), x) # this returns x end @@ -193,28 +197,31 @@ function suffstats(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}) slogp = zeros(K) for i = 1:n for k = 1:K - @inbounds slogp[k] += log(P[k,i]) + @inbounds slogp[k] += log(P[k, i]) end end DirichletStats(slogp, n) end -function suffstats(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}, - w::AbstractArray{Float64}) +function suffstats( + ::Type{<:Dirichlet}, + P::AbstractMatrix{Float64}, + w::AbstractArray{Float64}, +) K = size(P, 1) n = size(P, 2) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) end - tw = 0. + tw = 0.0 slogp = zeros(K) for i = 1:n @inbounds wi = w[i] tw += wi for k = 1:K - @inbounds slogp[k] += log(P[k,i]) * wi + @inbounds slogp[k] += log(P[k, i]) * wi end end DirichletStats(slogp, tw) @@ -227,7 +234,7 @@ end function _dirichlet_mle_init2(μ::Vector{Float64}, γ::Vector{Float64}) K = length(μ) - α0 = 0. + α0 = 0.0 for k = 1:K @inbounds μk = μ[k] @inbounds γk = γ[k] @@ -243,8 +250,8 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}) K = size(P, 1) n = size(P, 2) - μ = vec(sum(P, dims=2)) # E[p] - γ = vec(sum(abs2, P, dims=2)) # E[p^2] + μ = vec(sum(P, dims = 2)) # E[p] + γ = vec(sum(abs2, P, dims = 2)) # E[p^2] c = 1.0 / n μ .*= c @@ -261,10 +268,10 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}, w::AbstractArray{Float64 γ = zeros(K) # E[p^2] tw = 0.0 - for i in 1:n + for i = 1:n @inbounds wi = w[i] tw += wi - for k in 1:K + for k = 1:K pk = P[k, i] @inbounds μ[k] += pk * wi @inbounds γ[k] += pk * pk * wi @@ -280,8 +287,13 @@ end ## Newton-Ralphson algorithm -function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; - maxiter::Int=25, tol::Float64=1.0e-12, debug::Bool=false) +function fit_dirichlet!( + elogp::Vector{Float64}, + α::Vector{Float64}; + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, +) # This function directly overrides α K = length(elogp) @@ -305,14 +317,14 @@ function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; digam_α0 = digamma(α0) iz = 1.0 / trigamma(α0) - gnorm = 0. - b = 0. - iqs = 0. + gnorm = 0.0 + b = 0.0 + iqs = 0.0 for k = 1:K @inbounds ak = α[k] @inbounds g[k] = gk = digam_α0 - digamma(ak) + elogp[k] - @inbounds iq[k] = - 1.0 / trigamma(ak) + @inbounds iq[k] = -1.0 / trigamma(ak) @inbounds b += gk * iq[k] @inbounds iqs += iq[k] @@ -337,8 +349,13 @@ function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; if debug prev_objv = objv objv = dot(α .- 1.0, elogp) + loggamma(α0) - sum(loggamma, α) - @printf("Iter %4d: objv = %.4e ch = %.3e gnorm = %.3e\n", - t, objv, objv - prev_objv, gnorm) + @printf( + "Iter %4d: objv = %.4e ch = %.3e gnorm = %.3e\n", + t, + objv, + objv - prev_objv, + gnorm + ) end # determine convergence @@ -354,24 +371,34 @@ function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; end -function fit_mle(::Type{T}, P::AbstractMatrix{Float64}; - init::Vector{Float64}=Float64[], maxiter::Int=25, tol::Float64=1.0e-12, - debug::Bool=false) where {T<:Dirichlet} +function fit_mle( + ::Type{T}, + P::AbstractMatrix{Float64}; + init::Vector{Float64} = Float64[], + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, +) where {T<:Dirichlet} α = isempty(init) ? dirichlet_mle_init(P) : init elogp = mean_logp(suffstats(T, P)) - fit_dirichlet!(elogp, α; maxiter=maxiter, tol=tol, debug=debug) + fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) end -function fit_mle(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}, - w::AbstractArray{Float64}; - init::Vector{Float64}=Float64[], maxiter::Int=25, tol::Float64=1.0e-12, - debug::Bool=false) +function fit_mle( + ::Type{<:Dirichlet}, + P::AbstractMatrix{Float64}, + w::AbstractArray{Float64}; + init::Vector{Float64} = Float64[], + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, +) n = size(P, 2) length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions.")) α = isempty(init) ? dirichlet_mle_init(P, w) : init elogp = mean_logp(suffstats(Dirichlet, P, w)) - fit_dirichlet!(elogp, α; maxiter=maxiter, tol=tol, debug=debug) + fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) end diff --git a/src/multivariate/dirichletmultinomial.jl b/src/multivariate/dirichletmultinomial.jl index eb15990cb2..73c1c08370 100644 --- a/src/multivariate/dirichletmultinomial.jl +++ b/src/multivariate/dirichletmultinomial.jl @@ -30,23 +30,25 @@ DirichletMultinomial(n, k) # Dirichlet-multinomial distribution with n trials an vector of length k of ones. ``` """ -struct DirichletMultinomial{T <: Real} <: DiscreteMultivariateDistribution +struct DirichletMultinomial{T<:Real} <: DiscreteMultivariateDistribution n::Int α::Vector{T} α0::T - function DirichletMultinomial{T}(n::Integer, α::Vector{T}) where T + function DirichletMultinomial{T}(n::Integer, α::Vector{T}) where {T} α0 = sum(abs, α) sum(α) == α0 || throw(ArgumentError("alpha must be a positive vector.")) n > 0 || throw(ArgumentError("n must be a positive integer.")) new{T}(Int(n), α, α0) end end -DirichletMultinomial(n::Integer, α::Vector{T}) where {T <: Real} = DirichletMultinomial{T}(n, α) -DirichletMultinomial(n::Integer, α::Vector{T}) where {T <: Integer} = DirichletMultinomial(n, float(α)) +DirichletMultinomial(n::Integer, α::Vector{T}) where {T<:Real} = + DirichletMultinomial{T}(n, α) +DirichletMultinomial(n::Integer, α::Vector{T}) where {T<:Integer} = + DirichletMultinomial(n, float(α)) DirichletMultinomial(n::Integer, k::Integer) = DirichletMultinomial(n, ones(k)) -Base.show(io::IO, d::DirichletMultinomial) = show(io, d, (:n, :α,)) +Base.show(io::IO, d::DirichletMultinomial) = show(io, d, (:n, :α)) # Parameters @@ -58,7 +60,7 @@ params(d::DirichletMultinomial) = (d.n, d.α) # Statistics mean(d::DirichletMultinomial) = d.α .* (d.n / d.α0) -function var(d::DirichletMultinomial{T}) where T <: Real +function var(d::DirichletMultinomial{T}) where {T<:Real} v = fill(d.n * (d.n + d.α0) / (1 + d.α0), length(d)) p = d.α / d.α0 for i in eachindex(v) @@ -78,7 +80,7 @@ end # Evaluation -function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where T<:Real +function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where {T<:Real} k = length(d) length(x) == k || return false for xi in x @@ -86,7 +88,7 @@ function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where T<:Real end return sum(x) == ntrials(d) end -function _logpdf(d::DirichletMultinomial{S}, x::AbstractVector{T}) where {T<:Real, S<:Real} +function _logpdf(d::DirichletMultinomial{S}, x::AbstractVector{T}) where {T<:Real,S<:Real} c = loggamma(S(d.n + 1)) + loggamma(d.α0) - loggamma(d.n + d.α0) for j in eachindex(x) @inbounds xj, αj = x[j], d.α[j] @@ -108,39 +110,47 @@ struct DirichletMultinomialStats <: SufficientStats tw::Float64 DirichletMultinomialStats(n::Int, s::Matrix{Float64}, tw::Real) = new(n, s, Float64(tw)) end -function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}) where T<:Real - ns = sum(x, dims=1) # get ntrials for each observation +function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}) where {T<:Real} + ns = sum(x, dims = 1) # get ntrials for each observation n = ns[1] # use ntrails from first ob., then check all equal all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k in 1:n, i in 1:m, j in 1:d + @inbounds for k = 1:n, i = 1:m, j = 1:d if x[j, i] >= k s[j, k] += 1.0 end end DirichletMultinomialStats(n, s, m) end -function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}, w::Array{Float64}) where T<:Real +function suffstats( + ::Type{<:DirichletMultinomial}, + x::Matrix{T}, + w::Array{Float64}, +) where {T<:Real} length(w) == size(x, 2) || throw(DimensionMismatch("Inconsistent argument dimensions.")) - ns = sum(x, dims=1) + ns = sum(x, dims = 1) n = ns[1] all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k in 1:n, i in 1:m, j in 1:d + @inbounds for k = 1:n, i = 1:m, j = 1:d if x[j, i] >= k s[j, k] += w[i] end end DirichletMultinomialStats(n, s, sum(w)) end -function fit_mle(::Type{<:DirichletMultinomial}, ss::DirichletMultinomialStats; - tol::Float64 = 1e-8, maxiter::Int = 1000) +function fit_mle( + ::Type{<:DirichletMultinomial}, + ss::DirichletMultinomialStats; + tol::Float64 = 1e-8, + maxiter::Int = 1000, +) k = size(ss.s, 2) α = ones(size(ss.s, 1)) - rng = 0.0:(k - 1) - @inbounds for iter in 1:maxiter + rng = 0.0:(k-1) + @inbounds for iter = 1:maxiter α_old = copy(α) αsum = sum(α) denom = ss.tw * sum(inv, αsum .+ rng) diff --git a/src/multivariate/jointorderstatistics.jl b/src/multivariate/jointorderstatistics.jl index 1fbed0d1b6..25780e589e 100644 --- a/src/multivariate/jointorderstatistics.jl +++ b/src/multivariate/jointorderstatistics.jl @@ -35,7 +35,8 @@ JointOrderStatistics(Cauchy(), 10, (1, 10)) # joint distribution of only the ex ``` """ struct JointOrderStatistics{ - D<:ContinuousUnivariateDistribution,R<:Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}} + D<:ContinuousUnivariateDistribution, + R<:Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}}, } <: ContinuousMultivariateDistribution dist::D n::Int @@ -43,8 +44,8 @@ struct JointOrderStatistics{ function JointOrderStatistics( dist::ContinuousUnivariateDistribution, n::Int, - ranks::Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}}=Base.OneTo(n); - check_args::Bool=true, + ranks::Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}} = Base.OneTo(n); + check_args::Bool = true, ) @check_args( JointOrderStatistics, @@ -64,7 +65,10 @@ _islesseq(x, y) = isless(x, y) || isequal(x, y) function _are_ranks_valid(ranks, n) # this is equivalent to but faster than # issorted(ranks) && allunique(ranks) - !isempty(ranks) && first(ranks) ≥ 1 && last(ranks) ≤ n && issorted(ranks; lt=_islesseq) + !isempty(ranks) && + first(ranks) ≥ 1 && + last(ranks) ≤ n && + issorted(ranks; lt = _islesseq) end function _are_ranks_valid(ranks::AbstractRange, n) !isempty(ranks) && first(ranks) ≥ 1 && last(ranks) ≤ n && step(ranks) > 0 diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index c4db44b85b..47c4a505e9 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -19,13 +19,17 @@ Multinomial(n, k) # Multinomial distribution for n trials with equal probabili # over 1:k ``` """ -struct Multinomial{T<:Real, TV<:AbstractVector{T}} <: DiscreteMultivariateDistribution +struct Multinomial{T<:Real,TV<:AbstractVector{T}} <: DiscreteMultivariateDistribution n::Int p::TV - Multinomial{T, TV}(n::Int, p::TV) where {T <: Real, TV <: AbstractVector{T}} = new{T, TV}(n, p) + Multinomial{T,TV}(n::Int, p::TV) where {T<:Real,TV<:AbstractVector{T}} = new{T,TV}(n, p) end -function Multinomial(n::Integer, p::AbstractVector{T}; check_args::Bool=true) where {T<:Real} +function Multinomial( + n::Integer, + p::AbstractVector{T}; + check_args::Bool = true, +) where {T<:Real} @check_args( Multinomial, (n, n >= 0), @@ -34,9 +38,9 @@ function Multinomial(n::Integer, p::AbstractVector{T}; check_args::Bool=true) wh return Multinomial{T,typeof(p)}(n, p) end -function Multinomial(n::Integer, k::Integer; check_args::Bool=true) +function Multinomial(n::Integer, k::Integer; check_args::Bool = true) @check_args Multinomial (n, n >= 0) (k, k >= 1) - return Multinomial{Float64, Vector{Float64}}(round(Int, n), fill(1.0 / k, k)) + return Multinomial{Float64,Vector{Float64}}(round(Int, n), fill(1.0 / k, k)) end # Parameters @@ -50,18 +54,27 @@ params(d::Multinomial) = (d.n, d.p) @inline partype(d::Multinomial{T}) where {T<:Real} = T ### Conversions -convert(::Type{Multinomial{T, TV}}, d::Multinomial) where {T<:Real, TV<:AbstractVector{T}} = Multinomial(d.n, TV(d.p)) -convert(::Type{Multinomial{T, TV}}, d::Multinomial{T, TV}) where {T<:Real, TV<:AbstractVector{T}} = d -convert(::Type{Multinomial{T, TV}}, n, p::AbstractVector) where {T<:Real, TV<:AbstractVector} = Multinomial(n, TV(p)) +convert(::Type{Multinomial{T,TV}}, d::Multinomial) where {T<:Real,TV<:AbstractVector{T}} = + Multinomial(d.n, TV(d.p)) +convert( + ::Type{Multinomial{T,TV}}, + d::Multinomial{T,TV}, +) where {T<:Real,TV<:AbstractVector{T}} = d +convert( + ::Type{Multinomial{T,TV}}, + n, + p::AbstractVector, +) where {T<:Real,TV<:AbstractVector} = Multinomial(n, TV(p)) convert(::Type{Multinomial{T}}, d::Multinomial) where {T<:Real} = Multinomial(d.n, T.(d.p)) convert(::Type{Multinomial{T}}, d::Multinomial{T}) where {T<:Real} = d -convert(::Type{Multinomial{T}}, n, p::AbstractVector) where {T<:Real} = Multinomial(n, T.(p)) +convert(::Type{Multinomial{T}}, n, p::AbstractVector) where {T<:Real} = + Multinomial(n, T.(p)) # Statistics mean(d::Multinomial) = d.n .* d.p -function var(d::Multinomial{T}) where T<:Real +function var(d::Multinomial{T}) where {T<:Real} p = probs(d) k = length(p) n = ntrials(d) @@ -74,7 +87,7 @@ function var(d::Multinomial{T}) where T<:Real v end -function cov(d::Multinomial{T}) where T<:Real +function cov(d::Multinomial{T}) where {T<:Real} p = probs(d) k = length(p) n = ntrials(d) @@ -83,35 +96,35 @@ function cov(d::Multinomial{T}) where T<:Real for j = 1:k pj = p[j] for i = 1:j-1 - @inbounds C[i,j] = - n * p[i] * pj + @inbounds C[i, j] = -n * p[i] * pj end - @inbounds C[j,j] = n * pj * (1-pj) + @inbounds C[j, j] = n * pj * (1 - pj) end for j = 1:k-1 for i = j+1:k - @inbounds C[i,j] = C[j,i] + @inbounds C[i, j] = C[j, i] end end C end -function mgf(d::Multinomial{T}, t::AbstractVector) where T<:Real +function mgf(d::Multinomial{T}, t::AbstractVector) where {T<:Real} p = probs(d) n = ntrials(p) s = zero(T) - for i in 1:length(p) + for i = 1:length(p) s += p[i] * exp(t[i]) end return s^n end -function cf(d::Multinomial{T}, t::AbstractVector) where T<:Real +function cf(d::Multinomial{T}, t::AbstractVector) where {T<:Real} p = probs(d) n = ntrials(d) s = zero(Complex{T}) - for i in 1:length(p) + for i = 1:length(p) s += p[i] * exp(im * t[i]) end return s^n @@ -119,11 +132,11 @@ end function entropy(d::Multinomial) n, p = params(d) - s = -loggamma(n+1) + n*entropy(p) + s = -loggamma(n + 1) + n * entropy(p) for pr in p b = Binomial(n, pr) - for x in 0:n - s += pdf(b, x) * loggamma(x+1) + for x = 0:n + s += pdf(b, x) * loggamma(x + 1) end end return s @@ -132,7 +145,7 @@ end # Evaluation -function insupport(d::Multinomial, x::AbstractVector{T}) where T<:Real +function insupport(d::Multinomial, x::AbstractVector{T}) where {T<:Real} k = length(d) length(x) == k || return false s = 0.0 @@ -146,12 +159,12 @@ function insupport(d::Multinomial, x::AbstractVector{T}) where T<:Real return s == ntrials(d) # integer computation would not yield truncation errors end -function _logpdf(d::Multinomial, x::AbstractVector{T}) where T<:Real +function _logpdf(d::Multinomial, x::AbstractVector{T}) where {T<:Real} p = probs(d) n = ntrials(d) S = eltype(p) R = promote_type(T, S) - insupport(d,x) || return -R(Inf) + insupport(d, x) || return -R(Inf) s = R(loggamma(n + 1)) for i = 1:length(p) @inbounds xi = x[i] @@ -181,15 +194,15 @@ struct MultinomialStats <: SufficientStats MultinomialStats(n::Int, scnts::Vector{Float64}, tw::Real) = new(n, scnts, Float64(tw)) end -function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where T<:Real +function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where {T<:Real} K = size(x, 1) n::T = zero(T) scnts = zeros(K) - for j = 1:size(x,2) + for j = 1:size(x, 2) nj = zero(T) for i = 1:K - @inbounds xi = x[i,j] + @inbounds xi = x[i, j] @inbounds scnts[i] += xi nj += xi end @@ -200,23 +213,23 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where T<:Real error("Each sample in X should sum to the same value.") end end - MultinomialStats(n, scnts, size(x,2)) + MultinomialStats(n, scnts, size(x, 2)) end -function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where T<:Real +function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where {T<:Real} length(w) == size(x, 2) || throw(DimensionMismatch("Inconsistent argument dimensions.")) K = size(x, 1) n::T = zero(T) scnts = zeros(K) - tw = 0. + tw = 0.0 - for j = 1:size(x,2) + for j = 1:size(x, 2) nj = zero(T) @inbounds wj = w[j] tw += wj for i = 1:K - @inbounds xi = x[i,j] + @inbounds xi = x[i, j] @inbounds scnts[i] += xi * wj nj += xi end @@ -230,7 +243,8 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where MultinomialStats(n, scnts, tw) end -fit_mle(::Type{<:Multinomial}, ss::MultinomialStats) = Multinomial(ss.n, ss.scnts * inv(ss.tw * ss.n)) +fit_mle(::Type{<:Multinomial}, ss::MultinomialStats) = + Multinomial(ss.n, ss.scnts * inv(ss.tw * ss.n)) function fit_mle(::Type{<:Multinomial}, x::Matrix{<:Real}) ss = suffstats(Multinomial, x) diff --git a/src/multivariate/mvlogitnormal.jl b/src/multivariate/mvlogitnormal.jl index 0d60ddf654..f13bf35fca 100644 --- a/src/multivariate/mvlogitnormal.jl +++ b/src/multivariate/mvlogitnormal.jl @@ -29,10 +29,10 @@ end MvLogitNormal(d::AbstractMvNormal) = MvLogitNormal{typeof(d)}(d) MvLogitNormal(args...) = MvLogitNormal(MvNormal(args...)) -function Base.show(io::IO, d::MvLogitNormal; indent::String=" ") +function Base.show(io::IO, d::MvLogitNormal; indent::String = " ") print(io, distrname(d)) println(io, "(") - normstr = strip(sprint(show, d.normal; context=IOContext(io))) + normstr = strip(sprint(show, d.normal; context = IOContext(io))) normstr = replace(normstr, "\n" => "\n$indent") print(io, indent) println(io, normstr) @@ -121,8 +121,8 @@ function _softmax1!(x::AbstractMatrix, y::AbstractMatrix) return x end -_drop1(x::AbstractVector) = @views x[firstindex(x, 1):(end - 1)] -_drop1(x::AbstractMatrix) = @views x[firstindex(x, 1):(end - 1), :] +_drop1(x::AbstractVector) = @views x[firstindex(x, 1):(end-1)] +_drop1(x::AbstractMatrix) = @views x[firstindex(x, 1):(end-1), :] _last1(x::AbstractVector) = x[end] _last1(x::AbstractMatrix) = @views x[end, :] diff --git a/src/multivariate/mvlognormal.jl b/src/multivariate/mvlognormal.jl index 1eecd38c2f..198a759456 100644 --- a/src/multivariate/mvlognormal.jl +++ b/src/multivariate/mvlognormal.jl @@ -25,49 +25,81 @@ abstract type AbstractMvLogNormal <: ContinuousMultivariateDistribution end -function insupport(::Type{D},x::AbstractVector{T}) where {T<:Real,D<:AbstractMvLogNormal} - for i=1:length(x) - @inbounds 0.0 zero(df) || error("df must be positive") - new{T,Cov,Mean}(df, dim, μ, Σ) + function GenericMvTDist{T,Cov,Mean}( + df::T, + dim::Int, + μ::Mean, + Σ::AbstractPDMat{T}, + ) where {T,Cov,Mean} + df > zero(df) || error("df must be positive") + new{T,Cov,Mean}(df, dim, μ, Σ) end end -function GenericMvTDist(df::T, μ::Mean, Σ::Cov) where {Cov<:AbstractPDMat, Mean<:AbstractVector, T<:Real} +function GenericMvTDist( + df::T, + μ::Mean, + Σ::Cov, +) where {Cov<:AbstractPDMat,Mean<:AbstractVector,T<:Real} d = length(μ) - size(Σ, 1) == d || throw(DimensionMismatch("The dimensions of μ and Σ are inconsistent.")) + size(Σ, 1) == d || + throw(DimensionMismatch("The dimensions of μ and Σ are inconsistent.")) R = Base.promote_eltype(T, μ, Σ) S = convert(AbstractArray{R}, Σ) m = convert(AbstractArray{R}, μ) - GenericMvTDist{R, typeof(S), typeof(m)}(R(df), d, m, S) + GenericMvTDist{R,typeof(S),typeof(m)}(R(df), d, m, S) end function GenericMvTDist(df::Real, Σ::AbstractPDMat) @@ -31,27 +41,33 @@ function GenericMvTDist(df::Real, Σ::AbstractPDMat) end GenericMvTDist{T,Cov,Mean}(df, μ, Σ) where {T,Cov,Mean} = - GenericMvTDist(convert(T,df), convert(Mean, μ), convert(Cov, Σ)) + GenericMvTDist(convert(T, df), convert(Mean, μ), convert(Cov, Σ)) ### Conversion -function convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist) where T<:Real +function convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist) where {T<:Real} S = convert(AbstractArray{T}, d.Σ) m = convert(AbstractArray{T}, d.μ) - GenericMvTDist{T, typeof(S), typeof(m)}(T(d.df), d.dim, m, S) + GenericMvTDist{T,typeof(S),typeof(m)}(T(d.df), d.dim, m, S) end Base.convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist{T}) where {T<:Real} = d -function convert(::Type{GenericMvTDist{T}}, df, dim, μ::AbstractVector, Σ::AbstractPDMat) where T<:Real +function convert( + ::Type{GenericMvTDist{T}}, + df, + dim, + μ::AbstractVector, + Σ::AbstractPDMat, +) where {T<:Real} S = convert(AbstractArray{T}, Σ) m = convert(AbstractArray{T}, μ) - GenericMvTDist{T, typeof(S), typeof(m)}(T(df), dim, m, S) + GenericMvTDist{T,typeof(S),typeof(m)}(T(df), dim, m, S) end ## Construction of multivariate normal with specific covariance type -const IsoTDist = GenericMvTDist{Float64, ScalMat{Float64}, Vector{Float64}} -const DiagTDist = GenericMvTDist{Float64, PDiagMat{Float64,Vector{Float64}}, Vector{Float64}} -const MvTDist = GenericMvTDist{Float64, PDMat{Float64,Matrix{Float64}}, Vector{Float64}} +const IsoTDist = GenericMvTDist{Float64,ScalMat{Float64},Vector{Float64}} +const DiagTDist = GenericMvTDist{Float64,PDiagMat{Float64,Vector{Float64}},Vector{Float64}} +const MvTDist = GenericMvTDist{Float64,PDMat{Float64,Matrix{Float64}},Vector{Float64}} MvTDist(df::Real, μ::Vector{<:Real}, C::PDMat) = GenericMvTDist(df, μ, C) MvTDist(df::Real, C::PDMat) = GenericMvTDist(df, C) @@ -60,11 +76,13 @@ MvTDist(df::Real, Σ::Matrix{<:Real}) = GenericMvTDist(df, PDMat(Σ)) DiagTDist(df::Real, μ::Vector{<:Real}, C::PDiagMat) = GenericMvTDist(df, μ, C) DiagTDist(df::Real, C::PDiagMat) = GenericMvTDist(df, C) -DiagTDist(df::Real, μ::Vector{<:Real}, σ::Vector{<:Real}) = GenericMvTDist(df, μ, PDiagMat(abs2.(σ))) +DiagTDist(df::Real, μ::Vector{<:Real}, σ::Vector{<:Real}) = + GenericMvTDist(df, μ, PDiagMat(abs2.(σ))) IsoTDist(df::Real, μ::Vector{<:Real}, C::ScalMat) = GenericMvTDist(df, μ, C) IsoTDist(df::Real, C::ScalMat) = GenericMvTDist(df, C) -IsoTDist(df::Real, μ::Vector{<:Real}, σ::Real) = GenericMvTDist(df, μ, ScalMat(length(μ), abs2(σ))) +IsoTDist(df::Real, μ::Vector{<:Real}, σ::Real) = + GenericMvTDist(df, μ, ScalMat(length(μ), abs2(σ))) IsoTDist(df::Real, d::Int, σ::Real) = GenericMvTDist(df, ScalMat(d, abs2(σ))) ## convenient function to construct distributions of proper type based on arguments @@ -88,16 +106,19 @@ mvtdist(df::Real, Σ::Matrix{<:Real}) = MvTDist(df, Σ) length(d::GenericMvTDist) = d.dim -mean(d::GenericMvTDist) = d.df>1 ? d.μ : NaN +mean(d::GenericMvTDist) = d.df > 1 ? d.μ : NaN mode(d::GenericMvTDist) = d.μ modes(d::GenericMvTDist) = [mode(d)] -var(d::GenericMvTDist) = d.df>2 ? (d.df/(d.df-2))*diag(d.Σ) : Float64[NaN for i = 1:d.dim] +var(d::GenericMvTDist) = + d.df > 2 ? (d.df / (d.df - 2)) * diag(d.Σ) : Float64[NaN for i = 1:d.dim] scale(d::GenericMvTDist) = Matrix(d.Σ) -cov(d::GenericMvTDist) = d.df>2 ? (d.df/(d.df-2))*Matrix(d.Σ) : NaN*ones(d.dim, d.dim) +cov(d::GenericMvTDist) = + d.df > 2 ? (d.df / (d.df - 2)) * Matrix(d.Σ) : NaN * ones(d.dim, d.dim) invscale(d::GenericMvTDist) = Matrix(inv(d.Σ)) -invcov(d::GenericMvTDist) = d.df>2 ? ((d.df-2)/d.df)*Matrix(inv(d.Σ)) : NaN*ones(d.dim, d.dim) -logdet_cov(d::GenericMvTDist) = d.df>2 ? logdet((d.df/(d.df-2))*d.Σ) : NaN +invcov(d::GenericMvTDist) = + d.df > 2 ? ((d.df - 2) / d.df) * Matrix(inv(d.Σ)) : NaN * ones(d.dim, d.dim) +logdet_cov(d::GenericMvTDist) = d.df > 2 ? logdet((d.df / (d.df - 2)) * d.Σ) : NaN params(d::GenericMvTDist) = (d.df, d.μ, d.Σ) @inline partype(d::GenericMvTDist{T}) where {T} = T @@ -105,9 +126,10 @@ Base.eltype(::Type{<:GenericMvTDist{T}}) where {T} = T # For entropy calculations see "Multivariate t Distributions and their Applications", S. Kotz & S. Nadarajah function entropy(d::GenericMvTDist) - hdf, hdim = 0.5*d.df, 0.5*d.dim - shdfhdim = hdf+hdim - 0.5*logdet(d.Σ) + hdim*log(d.df*pi) + logbeta(hdim, hdf) - loggamma(hdim) + shdfhdim*(digamma(shdfhdim) - digamma(hdf)) + hdf, hdim = 0.5 * d.df, 0.5 * d.dim + shdfhdim = hdf + hdim + 0.5 * logdet(d.Σ) + hdim * log(d.df * pi) + logbeta(hdim, hdf) - loggamma(hdim) + + shdfhdim * (digamma(shdfhdim) - digamma(hdf)) end # evaluation (for GenericMvTDist) @@ -121,7 +143,8 @@ function sqmahal!(r::AbstractArray, d::GenericMvTDist, x::AbstractMatrix{<:Real} invquad!(r, d.Σ, x .- d.μ) end -sqmahal(d::AbstractMvTDist, x::AbstractMatrix{T}) where {T<:Real} = sqmahal!(Vector{T}(undef, size(x, 2)), d, x) +sqmahal(d::AbstractMvTDist, x::AbstractMatrix{T}) where {T<:Real} = + sqmahal!(Vector{T}(undef, size(x, 2)), d, x) function mvtdist_consts(d::AbstractMvTDist) @@ -130,11 +153,13 @@ function mvtdist_consts(d::AbstractMvTDist) hdf = H * d.df hdim = H * d.dim shdfhdim = hdf + hdim - v = loggamma(shdfhdim) - loggamma(hdf) - hdim*log(d.df) - hdim*logpi - H*logdet(d.Σ) + v = + loggamma(shdfhdim) - loggamma(hdf) - hdim * log(d.df) - hdim * logpi - + H * logdet(d.Σ) return (shdfhdim, v) end -function _logpdf(d::AbstractMvTDist, x::AbstractVector{T}) where T<:Real +function _logpdf(d::AbstractMvTDist, x::AbstractVector{T}) where {T<:Real} shdfhdim, v = mvtdist_consts(d) v - shdfhdim * log1p(sqmahal(d, x) / d.df) end @@ -150,7 +175,7 @@ end function gradlogpdf(d::GenericMvTDist, x::AbstractVector{<:Real}) z = x - d.μ - prz = invscale(d)*z + prz = invscale(d) * z -((d.df + d.dim) / (d.df + dot(z, prz))) * prz end @@ -163,8 +188,8 @@ function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractVector{<:Real}) x end -function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{T}) where T<:Real - cols = size(x,2) +function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{T}) where {T<:Real} + cols = size(x, 2) chisqd = Chisq{partype(d)}(d.df) y = Matrix{T}(undef, 1, cols) unwhiten!(d.Σ, randn!(rng, x)) diff --git a/src/multivariate/product.jl b/src/multivariate/product.jl index ada1c4e5f4..8730428486 100644 --- a/src/multivariate/product.jl +++ b/src/multivariate/product.jl @@ -15,28 +15,30 @@ Product(Uniform.(rand(10), 1)) # A 10-dimensional Product from 10 independent `U `Product` is deprecated and will be removed in the next breaking release. Use [`product_distribution`](@ref) instead. """ -struct Product{ - S<:ValueSupport, - T<:UnivariateDistribution{S}, - V<:AbstractVector{T}, -} <: MultivariateDistribution{S} +struct Product{S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} <: + MultivariateDistribution{S} v::V - function Product{S,T,V}(v::V) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} + function Product{S,T,V}( + v::V, + ) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} return new{S,T,V}(v) end end -function Product(v::V) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} +function Product( + v::V, +) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} Base.depwarn( "`Product(v)` is deprecated, please use `product_distribution(v)`", :Product, ) - return Product{S, T, V}(v) + return Product{S,T,V}(v) end length(d::Product) = length(d.v) -function Base.eltype(::Type{<:Product{S,T}}) where {S<:ValueSupport, - T<:UnivariateDistribution{S}} +function Base.eltype( + ::Type{<:Product{S,T}}, +) where {S<:ValueSupport,T<:UnivariateDistribution{S}} return eltype(T) end @@ -62,6 +64,8 @@ maximum(d::Product) = map(maximum, d.v) # will be removed when `Product` is removed # it will return a `ProductDistribution` then which is already the default for # higher-dimensional arrays and distributions -function product_distribution(dists::V) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} +function product_distribution( + dists::V, +) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} return Product{S,T,V}(dists) end diff --git a/src/multivariate/vonmisesfisher.jl b/src/multivariate/vonmisesfisher.jl index e4fe981fe6..da3768261d 100644 --- a/src/multivariate/vonmisesfisher.jl +++ b/src/multivariate/vonmisesfisher.jl @@ -17,7 +17,7 @@ struct VonMisesFisher{T<:Real} <: ContinuousMultivariateDistribution κ::T logCκ::T - function VonMisesFisher{T}(μ::Vector{T}, κ::T; checknorm::Bool=true) where T + function VonMisesFisher{T}(μ::Vector{T}, κ::T; checknorm::Bool = true) where {T} if checknorm isunitvec(μ) || error("μ must be a unit vector") end @@ -42,9 +42,11 @@ end show(io::IO, d::VonMisesFisher) = show(io, d, (:μ, :κ)) ### Conversions -convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher) where {T<:Real} = VonMisesFisher{T}(convert(Vector{T}, d.μ), T(d.κ); checknorm=false) +convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher) where {T<:Real} = + VonMisesFisher{T}(convert(Vector{T}, d.μ), T(d.κ); checknorm = false) Base.convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher{T}) where {T<:Real} = d -convert(::Type{VonMisesFisher{T}}, μ::Vector, κ, logCκ) where {T<:Real} = VonMisesFisher{T}(convert(Vector{T}, μ), T(κ)) +convert(::Type{VonMisesFisher{T}}, μ::Vector, κ, logCκ) where {T<:Real} = + VonMisesFisher{T}(convert(Vector{T}, μ), T(κ)) @@ -63,30 +65,29 @@ params(d::VonMisesFisher) = (d.μ, d.κ) function _vmflck(p, κ) T = typeof(κ) - hp = T(p/2) + hp = T(p / 2) q = hp - 1 q * log(κ) - hp * log2π - log(besselix(q, κ)) - κ end _vmflck3(κ) = log(κ) - log2π - κ - log1mexp(-2κ) vmflck(p, κ) = (p == 3 ? _vmflck3(κ) : _vmflck(p, κ)) -_logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = d.logCκ + d.κ * dot(d.μ, x) +_logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = + d.logCκ + d.κ * dot(d.μ, x) ### Sampling sampler(d::VonMisesFisher) = VonMisesFisherSampler(d.μ, d.κ) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) = - _rand!(rng, sampler(d), x) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractMatrix) = - _rand!(rng, sampler(d), x) +_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) = _rand!(rng, sampler(d), x) +_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractMatrix) = _rand!(rng, sampler(d), x) ### Estimation function fit_mle(::Type{<:VonMisesFisher}, X::Matrix{Float64}) - r = vec(sum(X, dims=2)) + r = vec(sum(X, dims = 2)) n = size(X, 2) r_nrm = norm(r) μ = rmul!(r, 1.0 / r_nrm) @@ -95,7 +96,8 @@ function fit_mle(::Type{<:VonMisesFisher}, X::Matrix{Float64}) VonMisesFisher(μ, κ) end -fit_mle(::Type{<:VonMisesFisher}, X::Matrix{T}) where {T<:Real} = fit_mle(VonMisesFisher, Float64(X)) +fit_mle(::Type{<:VonMisesFisher}, X::Matrix{T}) where {T<:Real} = + fit_mle(VonMisesFisher, Float64(X)) function _vmf_estkappa(p::Int, ρ::Float64) # Using the fixed-point iteration algorithm in the following paper: diff --git a/src/multivariates.jl b/src/multivariates.jl index 4d98ebe110..9f95429a03 100644 --- a/src/multivariates.jl +++ b/src/multivariates.jl @@ -31,20 +31,23 @@ rand(rng::AbstractRNG, s::Sampleable{Multivariate,Continuous}, n::Int) = If ``x`` is a vector, it returns whether x is within the support of ``d``. If ``x`` is a matrix, it returns whether every column in ``x`` is within the support of ``d``. """ -insupport{D<:MultivariateDistribution}(d::Union{D, Type{D}}, x::AbstractArray) +insupport{D<:MultivariateDistribution}(d::Union{D,Type{D}}, x::AbstractArray) -function insupport!(r::AbstractArray, d::Union{D,Type{D}}, X::AbstractMatrix) where D<:MultivariateDistribution +function insupport!( + r::AbstractArray, + d::Union{D,Type{D}}, + X::AbstractMatrix, +) where {D<:MultivariateDistribution} n = length(r) - size(X) == (length(d),n) || - throw(DimensionMismatch("Inconsistent array dimensions.")) - for i in 1:n + size(X) == (length(d), n) || throw(DimensionMismatch("Inconsistent array dimensions.")) + for i = 1:n @inbounds r[i] = insupport(d, view(X, :, i)) end return r end insupport(d::Union{D,Type{D}}, X::AbstractMatrix) where {D<:MultivariateDistribution} = - insupport!(BitArray(undef, size(X,2)), d, X) + insupport!(BitArray(undef, size(X, 2)), d, X) ## statistics @@ -116,16 +119,18 @@ end ##### Specific distributions ##### -for fname in ["dirichlet.jl", - "multinomial.jl", - "dirichletmultinomial.jl", - "jointorderstatistics.jl", - "mvnormal.jl", - "mvnormalcanon.jl", - "mvlogitnormal.jl", - "mvlognormal.jl", - "mvtdist.jl", - "product.jl", # deprecated - "vonmisesfisher.jl"] +for fname in [ + "dirichlet.jl", + "multinomial.jl", + "dirichletmultinomial.jl", + "jointorderstatistics.jl", + "mvnormal.jl", + "mvnormalcanon.jl", + "mvlogitnormal.jl", + "mvlognormal.jl", + "mvtdist.jl", + "product.jl", # deprecated + "vonmisesfisher.jl", +] include(joinpath("multivariate", fname)) end diff --git a/src/namedtuple/productnamedtuple.jl b/src/namedtuple/productnamedtuple.jl index 95f2fa1d4d..fbcbda714b 100644 --- a/src/namedtuple/productnamedtuple.jl +++ b/src/namedtuple/productnamedtuple.jl @@ -43,7 +43,7 @@ struct ProductNamedTupleDistribution{Tnames,Tdists,S<:ValueSupport,eltypes} <: dists::NamedTuple{Tnames,Tdists} end function ProductNamedTupleDistribution( - dists::NamedTuple{K,V} + dists::NamedTuple{K,V}, ) where {K,V<:Tuple{Distribution,Vararg{Distribution}}} vs = _product_valuesupport(values(dists)) eltypes = _product_namedtuple_eltype(values(dists)) @@ -81,7 +81,7 @@ The function falls back to constructing a [`ProductNamedTupleDistribution`](@ref distribution but specialized methods can be defined. """ function product_distribution( - dists::NamedTuple{<:Any,<:Tuple{Distribution,Vararg{Distribution}}} + dists::NamedTuple{<:Any,<:Tuple{Distribution,Vararg{Distribution}}}, ) return ProductNamedTupleDistribution(dists) end @@ -142,7 +142,8 @@ std(d::ProductNamedTupleDistribution) = map(std, d.dists) entropy(d::ProductNamedTupleDistribution) = sum(entropy, values(d.dists)) function kldivergence( - d1::ProductNamedTupleDistribution{K}, d2::ProductNamedTupleDistribution + d1::ProductNamedTupleDistribution{K}, + d2::ProductNamedTupleDistribution, ) where {K} _named_fields_match(d1.dists, d2.dists) || throw( ArgumentError( @@ -164,7 +165,9 @@ function Base.rand(rng::AbstractRNG, d::ProductNamedTupleDistribution{K}) where return NamedTuple{K}(map(Base.Fix1(rand, rng), d.dists)) end function Base.rand( - rng::AbstractRNG, d::ProductNamedTupleDistribution{K}, dims::Dims + rng::AbstractRNG, + d::ProductNamedTupleDistribution{K}, + dims::Dims, ) where {K} return convert(AbstractArray{<:NamedTuple{K}}, _rand(rng, sampler(d), dims)) end diff --git a/src/pdfnorm.jl b/src/pdfnorm.jl index 1bfba6ee4f..5174990584 100644 --- a/src/pdfnorm.jl +++ b/src/pdfnorm.jl @@ -15,7 +15,7 @@ pdfsquaredL2norm(d::Bernoulli) = @evalpoly d.p 1 -2 2 function pdfsquaredL2norm(d::Beta) α, β = params(d) - z = beta(2 * α - 1, 2 * β - 1) / beta(α, β) ^ 2 + z = beta(2 * α - 1, 2 * β - 1) / beta(α, β)^2 # L2 norm of the pdf converges only for α > 0.5 and β > 0.5 return α > 0.5 && β > 0.5 ? z : oftype(z, Inf) end @@ -26,14 +26,14 @@ pdfsquaredL2norm(d::Cauchy) = inv2π / d.σ function pdfsquaredL2norm(d::Chi) ν = d.ν - z = (2 ^ (1 - ν) * gamma((2 * ν - 1) / 2)) / gamma(ν / 2) ^ 2 + z = (2^(1 - ν) * gamma((2 * ν - 1) / 2)) / gamma(ν / 2)^2 # L2 norm of the pdf converges only for ν > 0.5 return d.ν > 0.5 ? z : oftype(z, Inf) end function pdfsquaredL2norm(d::Chisq) ν = d.ν - z = gamma(d.ν - 1) / (gamma(d.ν / 2) ^ 2 * 2 ^ d.ν) + z = gamma(d.ν - 1) / (gamma(d.ν / 2)^2 * 2^d.ν) # L2 norm of the pdf converges only for ν > 1 return ν > 1 ? z : oftype(z, Inf) end @@ -44,12 +44,12 @@ pdfsquaredL2norm(d::Exponential) = 1 / (2 * d.θ) function pdfsquaredL2norm(d::Gamma) α, θ = params(d) - z = (2^(1 - 2 * α) * gamma(2 * α - 1)) / (gamma(α) ^ 2 * θ) + z = (2^(1 - 2 * α) * gamma(2 * α - 1)) / (gamma(α)^2 * θ) # L2 norm of the pdf converges only for α > 0.5 return α > 0.5 ? z : oftype(z, Inf) end -pdfsquaredL2norm(d::Geometric) = d.p ^ 2 / (2 * d.p - d.p ^ 2) +pdfsquaredL2norm(d::Geometric) = d.p^2 / (2 * d.p - d.p^2) pdfsquaredL2norm(d::Logistic) = 1 / (6 * d.θ) diff --git a/src/product.jl b/src/product.jl index 7129a097be..57730a8d57 100644 --- a/src/product.jl +++ b/src/product.jl @@ -13,7 +13,11 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar function ProductDistribution{N,M,D}(dists::D) where {N,M,D} if isempty(dists) - throw(ArgumentError("a product distribution must consist of at least one distribution")) + throw( + ArgumentError( + "a product distribution must consist of at least one distribution", + ), + ) end return new{N,M,D,_product_valuesupport(dists),_product_eltype(dists)}( dists, @@ -22,11 +26,18 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar end end -function ProductDistribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}) where {M,N} +function ProductDistribution( + dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}, +) where {M,N} return ProductDistribution{M + N,M,typeof(dists)}(dists) end -function ProductDistribution(dists::Tuple{Distribution{<:ArrayLikeVariate{M}},Vararg{Distribution{<:ArrayLikeVariate{M}}}}) where {M} +function ProductDistribution( + dists::Tuple{ + Distribution{<:ArrayLikeVariate{M}}, + Vararg{Distribution{<:ArrayLikeVariate{M}}}, + }, +) where {M} return ProductDistribution{M + 1,M,typeof(dists)}(dists) end @@ -50,15 +61,24 @@ function __product_promote_type(f::F, ::Type{T}) where {F,T} ) end -function _product_size(dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}) where {M,N} +function _product_size( + dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}, +) where {M,N} size_d = size(first(dists)) - all(size(d) == size_d for d in dists) || error("all distributions must be of the same size") + all(size(d) == size_d for d in dists) || + error("all distributions must be of the same size") size_dists = size(dists) return ntuple(i -> i <= M ? size_d[i] : size_dists[i-M], Val(M + N)) end -function _product_size(dists::Tuple{Distribution{<:ArrayLikeVariate{M}},Vararg{Distribution{<:ArrayLikeVariate{M}}, N}}) where {M,N} +function _product_size( + dists::Tuple{ + Distribution{<:ArrayLikeVariate{M}}, + Vararg{Distribution{<:ArrayLikeVariate{M}},N}, + }, +) where {M,N} size_d = size(first(dists)) - all(size(d) == size_d for d in dists) || error("all distributions must be of the same size") + all(size(d) == size_d for d in dists) || + error("all distributions must be of the same size") return ntuple(i -> i <= M ? size_d[i] : N + 1, Val(M + 1)) end @@ -67,10 +87,13 @@ const VectorOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{ const MatrixOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{2,0,D,S,T} const ArrayOfUnivariateDistribution{N,D,S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} -const FillArrayOfUnivariateDistribution{N,D<:Fill{<:Any,N},S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} +const FillArrayOfUnivariateDistribution{N,D<:Fill{<:Any,N},S<:ValueSupport,T} = + ProductDistribution{N,0,D,S,T} ## General definitions -function Base.eltype(::Type{<:ProductDistribution{<:Any,<:Any,<:Any,<:ValueSupport,T}}) where {T} +function Base.eltype( + ::Type{<:ProductDistribution{<:Any,<:Any,<:Any,<:ValueSupport,T}}, +) where {T} return T end @@ -88,7 +111,10 @@ std(d::VectorOfUnivariateDistribution{<:Tuple}) = collect(map(std, d.dists)) var(d::ArrayOfUnivariateDistribution) = map(var, d.dists) var(d::VectorOfUnivariateDistribution{<:Tuple}) = collect(map(var, d.dists)) -function insupport(d::ArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}) where {N} +function insupport( + d::ArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real,N}, +) where {N} size(d) == size(x) && all(insupport(vi, xi) for (vi, xi) in zip(d.dists, x)) end @@ -147,13 +173,15 @@ end # more efficient implementation of `_logpdf` for `Fill` array of univariate distributions # we have to fix a method ambiguity function _logpdf( - d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N} + d::FillArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real,N}, ) where {N} return __logpdf(d, x) end _logpdf(d::FillArrayOfUnivariateDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) function __logpdf( - d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N} + d::FillArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real,N}, ) where {N} return @inbounds loglikelihood(first(d.dists), x) end @@ -174,15 +202,11 @@ end # we have to fix some method ambiguities _logpdf(d::ProductDistribution{N}, x::AbstractArray{<:Real,N}) where {N} = __logpdf(d, x) _logpdf(d::ProductDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) -function __logpdf( - d::ProductDistribution{N,M}, - x::AbstractArray{<:Real,N}, -) where {N,M} +function __logpdf(d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}) where {N,M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` - @inbounds broadcasted = Broadcast.broadcasted( - logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M}), - ) + @inbounds broadcasted = + Broadcast.broadcasted(logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M})) return sum(Broadcast.instantiate(broadcasted)) end @@ -198,16 +222,10 @@ end # more efficient implementation of `_logpdf` for `Fill` arrays of distributions # we have to fix a method ambiguity -function _logpdf( - d::ProductDistribution{N,M,<:Fill}, - x::AbstractArray{<:Real,N}, -) where {N,M} +function _logpdf(d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}) where {N,M} return __logpdf(d, x) end -function _logpdf( - d::ProductDistribution{2,M,<:Fill}, - x::AbstractMatrix{<:Real}, -) where {M} +function _logpdf(d::ProductDistribution{2,M,<:Fill}, x::AbstractMatrix{<:Real}) where {M} return __logpdf(d, x) end function __logpdf( @@ -231,7 +249,8 @@ function product_distribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVar end function product_distribution( - dist::Distribution{ArrayLikeVariate{N}}, dists::Distribution{ArrayLikeVariate{N}}..., + dist::Distribution{ArrayLikeVariate{N}}, + dists::Distribution{ArrayLikeVariate{N}}..., ) where {N} return ProductDistribution((dist, dists...)) end diff --git a/src/qq.jl b/src/qq.jl index 6431ed2bf9..d9744178c6 100644 --- a/src/qq.jl +++ b/src/qq.jl @@ -1,11 +1,11 @@ -struct QQPair{U<:AbstractVector, V<:AbstractVector} +struct QQPair{U<:AbstractVector,V<:AbstractVector} qx::U qy::V end function qqbuild(x::AbstractVector, y::AbstractVector) n = min(length(x), length(y)) - grid = [0.0:(1 / (n - 1)):1.0;] + grid = [0.0:(1/(n-1)):1.0;] qx = quantile(x, grid) qy = quantile(y, grid) return QQPair(qx, qy) @@ -27,7 +27,7 @@ https://ch.mathworks.com/help/stats/probplot.html """ function ppoints(n::Int) m = 2 * n - return (1:2:(m - 1)) ./ m + return (1:2:(m-1)) ./ m end diff --git a/src/quantilealgs.jl b/src/quantilealgs.jl index 8aa9f1b89a..39f5c6f350 100644 --- a/src/quantilealgs.jl +++ b/src/quantilealgs.jl @@ -1,6 +1,11 @@ # Various algorithms for computing quantile -function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx::T) where {T<:Real} +function quantile_bisect( + d::ContinuousUnivariateDistribution, + p::Real, + lx::T, + rx::T, +) where {T<:Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` @@ -18,10 +23,13 @@ function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx # find quantile using bisect algorithm cl = cdf(d, lx) cr = cdf(d, rx) - cl <= p <= cr || - throw(ArgumentError("[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`")) + cl <= p <= cr || throw( + ArgumentError( + "[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`", + ), + ) while rx - lx > tol - m = (lx + rx)/2 + m = (lx + rx) / 2 c = cdf(d, m) if p > c cl = c @@ -31,7 +39,7 @@ function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx rx = m end end - return (lx + rx)/2 + return (lx + rx) / 2 end function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::Real, rx::Real) @@ -47,12 +55,17 @@ quantile_bisect(d::ContinuousUnivariateDistribution, p::Real) = # Distribution, with Application to the Inverse Gaussian Distribution # http://www.statsci.org/smyth/pubs/qinvgaussPreprint.pdf -function quantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real=mode(d), tol::Real=1e-12) +function quantile_newton( + d::ContinuousUnivariateDistribution, + p::Real, + xs::Real = mode(d), + tol::Real = 1e-12, +) x = xs + (p - cdf(d, xs)) / pdf(d, xs) T = typeof(x) if 0 < p < 1 x0 = T(xs) - while abs(x-x0) > max(abs(x),abs(x0)) * tol + while abs(x - x0) > max(abs(x), abs(x0)) * tol x0 = x x = x0 + (p - cdf(d, x0)) / pdf(d, x0) end @@ -66,14 +79,19 @@ function quantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real= end end -function cquantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real=mode(d), tol::Real=1e-12) - x = xs + (ccdf(d, xs)-p) / pdf(d, xs) +function cquantile_newton( + d::ContinuousUnivariateDistribution, + p::Real, + xs::Real = mode(d), + tol::Real = 1e-12, +) + x = xs + (ccdf(d, xs) - p) / pdf(d, xs) T = typeof(x) if 0 < p < 1 x0 = T(xs) - while abs(x-x0) > max(abs(x),abs(x0)) * tol + while abs(x - x0) > max(abs(x), abs(x0)) * tol x0 = x - x = x0 + (ccdf(d, x0)-p) / pdf(d, x0) + x = x0 + (ccdf(d, x0) - p) / pdf(d, x0) end return x elseif p == 1 @@ -85,21 +103,26 @@ function cquantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real end end -function invlogcdf_newton(d::ContinuousUnivariateDistribution, lp::Real, xs::Real=mode(d), tol::Real=1e-12) - T = typeof(lp - logpdf(d,xs)) +function invlogcdf_newton( + d::ContinuousUnivariateDistribution, + lp::Real, + xs::Real = mode(d), + tol::Real = 1e-12, +) + T = typeof(lp - logpdf(d, xs)) if -Inf < lp < 0 x0 = T(xs) - if lp < logcdf(d,x0) - x = x0 - exp(lp - logpdf(d,x0) + logexpm1(max(logcdf(d,x0)-lp,0))) - while abs(x-x0) >= max(abs(x),abs(x0)) * tol + if lp < logcdf(d, x0) + x = x0 - exp(lp - logpdf(d, x0) + logexpm1(max(logcdf(d, x0) - lp, 0))) + while abs(x - x0) >= max(abs(x), abs(x0)) * tol x0 = x - x = x0 - exp(lp - logpdf(d,x0) + logexpm1(max(logcdf(d,x0)-lp,0))) + x = x0 - exp(lp - logpdf(d, x0) + logexpm1(max(logcdf(d, x0) - lp, 0))) end else - x = x0 + exp(lp - logpdf(d,x0) + log1mexp(min(logcdf(d,x0)-lp,0))) - while abs(x-x0) >= max(abs(x),abs(x0))*tol + x = x0 + exp(lp - logpdf(d, x0) + log1mexp(min(logcdf(d, x0) - lp, 0))) + while abs(x - x0) >= max(abs(x), abs(x0)) * tol x0 = x - x = x0 + exp(lp - logpdf(d,x0) + log1mexp(min(logcdf(d,x0)-lp,0))) + x = x0 + exp(lp - logpdf(d, x0) + log1mexp(min(logcdf(d, x0) - lp, 0))) end end return x @@ -112,21 +135,26 @@ function invlogcdf_newton(d::ContinuousUnivariateDistribution, lp::Real, xs::Rea end end -function invlogccdf_newton(d::ContinuousUnivariateDistribution, lp::Real, xs::Real=mode(d), tol::Real=1e-12) - T = typeof(lp - logpdf(d,xs)) +function invlogccdf_newton( + d::ContinuousUnivariateDistribution, + lp::Real, + xs::Real = mode(d), + tol::Real = 1e-12, +) + T = typeof(lp - logpdf(d, xs)) if -Inf < lp < 0 x0 = T(xs) - if lp < logccdf(d,x0) - x = x0 + exp(lp - logpdf(d,x0) + logexpm1(max(logccdf(d,x0)-lp,0))) - while abs(x-x0) >= max(abs(x),abs(x0)) * tol + if lp < logccdf(d, x0) + x = x0 + exp(lp - logpdf(d, x0) + logexpm1(max(logccdf(d, x0) - lp, 0))) + while abs(x - x0) >= max(abs(x), abs(x0)) * tol x0 = x - x = x0 + exp(lp - logpdf(d,x0) + logexpm1(max(logccdf(d,x0)-lp,0))) + x = x0 + exp(lp - logpdf(d, x0) + logexpm1(max(logccdf(d, x0) - lp, 0))) end else - x = x0 - exp(lp - logpdf(d,x0) + log1mexp(min(logccdf(d,x0)-lp,0))) - while abs(x-x0) >= max(abs(x),abs(x0)) * tol + x = x0 - exp(lp - logpdf(d, x0) + log1mexp(min(logccdf(d, x0) - lp, 0))) + while abs(x - x0) >= max(abs(x), abs(x0)) * tol x0 = x - x = x0 - exp(lp - logpdf(d,x0) + log1mexp(min(logccdf(d,x0)-lp,0))) + x = x0 - exp(lp - logpdf(d, x0) + log1mexp(min(logccdf(d, x0) - lp, 0))) end end return x @@ -143,9 +171,9 @@ end # is computed using the newton method macro quantile_newton(D) esc(quote - quantile(d::$D, p::Real) = quantile_newton(d,p) - cquantile(d::$D, p::Real) = cquantile_newton(d,p) - invlogcdf(d::$D, lp::Real) = invlogcdf_newton(d,lp) - invlogccdf(d::$D, lp::Real) = invlogccdf_newton(d,lp) + quantile(d::$D, p::Real) = quantile_newton(d, p) + cquantile(d::$D, p::Real) = cquantile_newton(d, p) + invlogcdf(d::$D, lp::Real) = invlogcdf_newton(d, lp) + invlogccdf(d::$D, lp::Real) = invlogccdf_newton(d, lp) end) end diff --git a/src/reshaped.jl b/src/reshaped.jl index 23f35fb46a..c1f492ce63 100644 --- a/src/reshaped.jl +++ b/src/reshaped.jl @@ -8,19 +8,26 @@ It is recommended to not use `reshape` instead of the constructor of `ReshapedDi directly since `reshape` can return more optimized distributions for specific types of `d` and number of dimensions `N`. """ -struct ReshapedDistribution{N,S<:ValueSupport,D<:Distribution{<:ArrayLikeVariate,S}} <: Distribution{ArrayLikeVariate{N},S} +struct ReshapedDistribution{N,S<:ValueSupport,D<:Distribution{<:ArrayLikeVariate,S}} <: + Distribution{ArrayLikeVariate{N},S} dist::D dims::Dims{N} - function ReshapedDistribution(dist::Distribution{<:ArrayLikeVariate,S}, dims::Dims{N}) where {N,S<:ValueSupport} + function ReshapedDistribution( + dist::Distribution{<:ArrayLikeVariate,S}, + dims::Dims{N}, + ) where {N,S<:ValueSupport} _reshape_check_dims(dist, dims) return new{N,S,typeof(dist)}(dist, dims) end end function _reshape_check_dims(dist::Distribution{<:ArrayLikeVariate}, dims::Dims) - (all(d > 0 for d in dims) && length(dist) == prod(dims)) || - throw(ArgumentError("dimensions $(dims) do not match size of source distribution $(size(dist))")) + (all(d > 0 for d in dims) && length(dist) == prod(dims)) || throw( + ArgumentError( + "dimensions $(dims) do not match size of source distribution $(size(dist))", + ), + ) end Base.size(d::ReshapedDistribution) = d.dims @@ -62,8 +69,7 @@ end x::AbstractArray{<:Real,N}, ) where {N} @boundscheck begin - size(x) == size(d) || - throw(DimensionMismatch("inconsistent array dimensions")) + size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end dist = d.dist return @inbounds loglikelihood(dist, reshape(x, size(dist))) @@ -73,10 +79,11 @@ end x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin - M > N || - throw(DimensionMismatch( - "number of dimensions of `x` ($M) must be greater than number of dimensions of `d` ($N)" - )) + M > N || throw( + DimensionMismatch( + "number of dimensions of `x` ($M) must be greater than number of dimensions of `d` ($N)", + ), + ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -89,7 +96,7 @@ end function _rand!( rng::AbstractRNG, d::ReshapedDistribution{N}, - x::AbstractArray{<:Real,N} + x::AbstractArray{<:Real,N}, ) where {N} dist = d.dist @inbounds rand!(rng, dist, reshape(x, size(dist))) diff --git a/src/samplers.jl b/src/samplers.jl index fa667fff2b..21c954a6f7 100644 --- a/src/samplers.jl +++ b/src/samplers.jl @@ -14,19 +14,20 @@ Samples from the sampler and returns the result. """ rand(::AbstractRNG, ::Sampleable) -for fname in ["aliastable.jl", - "binomial.jl", - "poissonbinomial.jl", - "poisson.jl", - "exponential.jl", - "gamma.jl", - "multinomial.jl", - "vonmises.jl", - "vonmisesfisher.jl", - "discretenonparametric.jl", - "categorical.jl", - "productnamedtuple.jl", - ] +for fname in [ + "aliastable.jl", + "binomial.jl", + "poissonbinomial.jl", + "poisson.jl", + "exponential.jl", + "gamma.jl", + "multinomial.jl", + "vonmises.jl", + "vonmisesfisher.jl", + "discretenonparametric.jl", + "categorical.jl", + "productnamedtuple.jl", +] include(joinpath("samplers", fname)) end diff --git a/src/samplers/aliastable.jl b/src/samplers/aliastable.jl index 1f633cf716..495d83f957 100644 --- a/src/samplers/aliastable.jl +++ b/src/samplers/aliastable.jl @@ -1,5 +1,5 @@ struct AliasTable <: Sampleable{Univariate,Discrete} - at::AliasTables.AliasTable{UInt64, Int} + at::AliasTables.AliasTable{UInt64,Int} AliasTable(probs::AbstractVector{<:Real}) = new(AliasTables.AliasTable(probs)) end ncategories(s::AliasTable) = length(s.at) diff --git a/src/samplers/binomial.jl b/src/samplers/binomial.jl index 340a07424c..34b5167567 100644 --- a/src/samplers/binomial.jl +++ b/src/samplers/binomial.jl @@ -1,7 +1,7 @@ # compute probability vector of a Binomial distribution function binompvec(n::Int, p::Float64) - pv = Vector{Float64}(undef, n+1) + pv = Vector{Float64}(undef, n + 1) if p == 0.0 fill!(pv, 0.0) pv[1] = 1.0 @@ -11,7 +11,7 @@ function binompvec(n::Int, p::Float64) else q = 1.0 - p a = p / q - @inbounds pv[1] = pk = q ^ n + @inbounds pv[1] = pk = q^n for k = 1:n @inbounds pv[k+1] = (pk *= ((n - k + 1) / k) * a) end @@ -36,10 +36,10 @@ BinomialGeomSampler() = BinomialGeomSampler(false, 0, 0.0) function BinomialGeomSampler(n::Int, prob::Float64) if prob <= 0.5 comp = false - scale = -1.0/log1p(-prob) + scale = -1.0 / log1p(-prob) else comp = true - scale = prob < 1.0 ? -1.0/log(prob) : Inf + scale = prob < 1.0 ? -1.0 / log(prob) : Inf end BinomialGeomSampler(comp, n, scale) end @@ -54,7 +54,7 @@ function rand(rng::AbstractRNG, s::BinomialGeomSampler) if v > n # in case when v is very large or infinity break end - y += ceil(Int,v) + y += ceil(Int, v) if y > n break end @@ -93,9 +93,25 @@ struct BinomialTPESampler <: Sampleable{Univariate,Discrete} λR::Float64 end -BinomialTPESampler() = - BinomialTPESampler(false, 0, 0., 0., 0., 0., 0, - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.) +BinomialTPESampler() = BinomialTPESampler( + false, + 0, + 0.0, + 0.0, + 0.0, + 0.0, + 0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, +) function BinomialTPESampler(n::Int, prob::Float64) if prob <= 0.5 @@ -108,79 +124,78 @@ function BinomialTPESampler(n::Int, prob::Float64) q = prob end - nrq = n*r*q - fM = (n+1)*r # + nrq = n * r * q + fM = (n + 1) * r # M = floor(fM) Mi = round(Integer, M) - p1 = floor(2.195*sqrt(nrq)-4.6*q) + 0.5 - xM = M+0.5 - xL = xM-p1 - xR = xM+p1 - c = 0.134 + 20.5/(15.3+M) - a = (fM-xL)/(fM-xL*r) # - λL = a*(1.0 + 0.5*a) - a = (xR-fM)/(xR*q) # - λR = a*(1.0 + 0.5*a) - p2 = p1*(1.0 + 2.0*c) - p3 = p2 + c/λL - p4 = p3 + c/λR + p1 = floor(2.195 * sqrt(nrq) - 4.6 * q) + 0.5 + xM = M + 0.5 + xL = xM - p1 + xR = xM + p1 + c = 0.134 + 20.5 / (15.3 + M) + a = (fM - xL) / (fM - xL * r) # + λL = a * (1.0 + 0.5 * a) + a = (xR - fM) / (xR * q) # + λR = a * (1.0 + 0.5 * a) + p2 = p1 * (1.0 + 2.0 * c) + p3 = p2 + c / λL + p4 = p3 + c / λR - BinomialTPESampler(comp,n,r,q,nrq,M,Mi,p1,p2,p3,p4, - xM,xL,xR,c,λL,λR) + BinomialTPESampler(comp, n, r, q, nrq, M, Mi, p1, p2, p3, p4, xM, xL, xR, c, λL, λR) end function rand(rng::AbstractRNG, s::BinomialTPESampler) y = 0 while true # Step 1 - u = s.p4*rand(rng) + u = s.p4 * rand(rng) v = rand(rng) if u <= s.p1 - y = floor(Int,s.xM-s.p1*v+u) + y = floor(Int, s.xM - s.p1 * v + u) # Goto 6 break elseif u <= s.p2 # Step 2 - x = s.xL + (u-s.p1)/s.c - v = v*s.c+1.0-abs(s.M-x+0.5)/s.p1 + x = s.xL + (u - s.p1) / s.c + v = v * s.c + 1.0 - abs(s.M - x + 0.5) / s.p1 if v > 1 # Goto 1 continue end - y = floor(Int,x) + y = floor(Int, x) # Goto 5 elseif u <= s.p3 # Step 3 - y = floor(Int,s.xL + log(v)/s.λL) + y = floor(Int, s.xL + log(v) / s.λL) if y < 0 # Goto 1 continue end - v *= (u-s.p2)*s.λL + v *= (u - s.p2) * s.λL # Goto 5 else # Step 4 - y = floor(Int,s.xR-log(v)/s.λR) + y = floor(Int, s.xR - log(v) / s.λR) if y > s.n # Goto 1 continue end - v *= (u-s.p3)*s.λR + v *= (u - s.p3) * s.λR # Goto 5 end # Step 5 # 5.0 - k = abs(y-s.Mi) - if (k <= 20) || (k >= 0.5*s.nrq-1) + k = abs(y - s.Mi) + if (k <= 20) || (k >= 0.5 * s.nrq - 1) # 5.1 - S = s.r/s.q - a = S*(s.n+1) + S = s.r / s.q + a = S * (s.n + 1) F = 1.0 if s.Mi < y for i = (s.Mi+1):y - F *= a/i-S + F *= a / i - S end elseif s.Mi > y for i = (y+1):s.Mi - F /= a/i-S + F /= a / i - S end end if v > F @@ -191,8 +206,8 @@ function rand(rng::AbstractRNG, s::BinomialTPESampler) break else # 5.2 - ρ = (k/s.nrq)*((k*(k/3.0+0.625)+1.0/6.0)/s.nrq+0.5) - t = -k^2/(2.0*s.nrq) + ρ = (k / s.nrq) * ((k * (k / 3.0 + 0.625) + 1.0 / 6.0) / s.nrq + 0.5) + t = -k^2 / (2.0 * s.nrq) A = log(v) if A < t - ρ # Goto 6 @@ -203,13 +218,20 @@ function rand(rng::AbstractRNG, s::BinomialTPESampler) end # 5.3 - x1 = Float64(y+1) - f1 = Float64(s.Mi+1) - z = Float64(s.n+1-s.Mi) - w = Float64(s.n-y+1) + x1 = Float64(y + 1) + f1 = Float64(s.Mi + 1) + z = Float64(s.n + 1 - s.Mi) + w = Float64(s.n - y + 1) - if A > (s.xM*log(f1/x1) + ((s.n-s.Mi)+0.5)*log(z/w) + (y-s.Mi)*log(w*s.r/(x1*s.q)) + - lstirling_asym(f1) + lstirling_asym(z) + lstirling_asym(x1) + lstirling_asym(w)) + if A > ( + s.xM * log(f1 / x1) + + ((s.n - s.Mi) + 0.5) * log(z / w) + + (y - s.Mi) * log(w * s.r / (x1 * s.q)) + + lstirling_asym(f1) + + lstirling_asym(z) + + lstirling_asym(x1) + + lstirling_asym(w) + ) # Goto 1 continue end diff --git a/src/samplers/categorical.jl b/src/samplers/categorical.jl index ae4f472d21..287d0c87c1 100644 --- a/src/samplers/categorical.jl +++ b/src/samplers/categorical.jl @@ -1,10 +1,10 @@ #### naive sampling -struct CategoricalDirectSampler{T<:Real,Ts<:AbstractVector{T}} <: Sampleable{Univariate,Discrete} +struct CategoricalDirectSampler{T<:Real,Ts<:AbstractVector{T}} <: + Sampleable{Univariate,Discrete} prob::Ts - function CategoricalDirectSampler{T,Ts}(p::Ts) where { - T<:Real,Ts<:AbstractVector{T}} + function CategoricalDirectSampler{T,Ts}(p::Ts) where {T<:Real,Ts<:AbstractVector{T}} isempty(p) && throw(ArgumentError("p is empty.")) new{T,Ts}(p) end @@ -22,7 +22,7 @@ function rand(rng::AbstractRNG, s::CategoricalDirectSampler) c = p[1] u = rand(rng) while c < u && i < n - c += p[i += 1] + c += p[i+=1] end return i end diff --git a/src/samplers/discretenonparametric.jl b/src/samplers/discretenonparametric.jl index ce964a3ef8..3bc02bd8b5 100644 --- a/src/samplers/discretenonparametric.jl +++ b/src/samplers/discretenonparametric.jl @@ -4,20 +4,24 @@ Data structure for efficiently sampling from an arbitrary probability mass function defined by support `xs` and probabilities `ps`. """ -struct DiscreteNonParametricSampler{T<:Real, S<:AbstractVector{T}, A<:AliasTable} <: Sampleable{Univariate,Discrete} +struct DiscreteNonParametricSampler{T<:Real,S<:AbstractVector{T},A<:AliasTable} <: + Sampleable{Univariate,Discrete} support::S aliastable::A - function DiscreteNonParametricSampler{T,S}(support::S, probs::AbstractVector{<:Real} + function DiscreteNonParametricSampler{T,S}( + support::S, + probs::AbstractVector{<:Real}, ) where {T<:Real,S<:AbstractVector{T}} aliastable = AliasTable(probs) new{T,S,typeof(aliastable)}(support, aliastable) end end -DiscreteNonParametricSampler(support::S, probs::AbstractVector{<:Real} - ) where {T<:Real,S<:AbstractVector{T}} = - DiscreteNonParametricSampler{T,S}(support, probs) +DiscreteNonParametricSampler( + support::S, + probs::AbstractVector{<:Real}, +) where {T<:Real,S<:AbstractVector{T}} = DiscreteNonParametricSampler{T,S}(support, probs) rand(rng::AbstractRNG, s::DiscreteNonParametricSampler) = (@inbounds v = s.support[rand(rng, s.aliastable)]; v) diff --git a/src/samplers/gamma.jl b/src/samplers/gamma.jl index 1cd62f22ba..6719eb51f4 100644 --- a/src/samplers/gamma.jl +++ b/src/samplers/gamma.jl @@ -24,54 +24,65 @@ function GammaGDSampler(g::Gamma{T}) where {T} # Step 1 s2 = a - 0.5 s = sqrt(s2) - i2s = 0.5/s + i2s = 0.5 / s d = 5.656854249492381 - 12.0s # 4*sqrt(2) - 12s # Step 4 - ia = 1.0/a - q0 = ia*@horner(ia, - 0.0416666664, - 0.0208333723, - 0.0079849875, - 0.0015746717, - -0.0003349403, - 0.0003340332, - 0.0006053049, - -0.0004701849, - 0.0001710320) + ia = 1.0 / a + q0 = + ia * @horner( + ia, + 0.0416666664, + 0.0208333723, + 0.0079849875, + 0.0015746717, + -0.0003349403, + 0.0003340332, + 0.0006053049, + -0.0004701849, + 0.0001710320 + ) if a <= 3.686 b = 0.463 + s + 0.178s2 σ = 1.235 - c = 0.195/s - 0.079 + 0.16s + c = 0.195 / s - 0.079 + 0.16s elseif a <= 13.022 b = 1.654 + 0.0076s2 - σ = 1.68/s + 0.275 - c = 0.062/s + 0.024 + σ = 1.68 / s + 0.275 + c = 0.062 / s + 0.024 else b = 1.77 σ = 0.75 - c = 0.1515/s + c = 0.1515 / s end GammaGDSampler(T(a), T(s2), T(s), T(i2s), T(d), T(q0), T(b), T(σ), T(c), scale(g)) end function calc_q(s::GammaGDSampler, t) - v = t*s.i2s + v = t * s.i2s if abs(v) > 0.25 - return s.q0 - s.s*t + 0.25*t*t + 2.0*s.s2*log1p(v) + return s.q0 - s.s * t + 0.25 * t * t + 2.0 * s.s2 * log1p(v) else - return s.q0 + 0.5*t*t*(v*@horner(v, - 0.333333333, - -0.249999949, - 0.199999867, - -0.1666774828, - 0.142873973, - -0.124385581, - 0.110368310, - -0.112750886, - 0.10408986)) + return s.q0 + + 0.5 * + t * + t * + ( + v * @horner( + v, + 0.333333333, + -0.249999949, + 0.199999867, + -0.1666774828, + 0.142873973, + -0.124385581, + 0.110368310, + -0.112750886, + 0.10408986 + ) + ) end end @@ -79,18 +90,18 @@ function rand(rng::AbstractRNG, s::GammaGDSampler) # Step 2 t = randn(rng) x = s.s + 0.5t - t >= 0.0 && return x*x*s.scale + t >= 0.0 && return x * x * s.scale # Step 3 u = rand(rng) - s.d*u <= t*t*t && return x*x*s.scale + s.d * u <= t * t * t && return x * x * s.scale # Step 5 if x > 0.0 # Step 6 q = calc_q(s, t) # Step 7 - log1p(-u) <= q && return x*x*s.scale + log1p(-u) <= q && return x * x * s.scale end # Step 8 @@ -101,7 +112,7 @@ function rand(rng::AbstractRNG, s::GammaGDSampler) while true e = randexp(rng) u = 2.0rand(rng) - 1.0 - t = s.b + e*s.σ*sign(u) + t = s.b + e * s.σ * sign(u) # Step 9 t ≥ -0.718_744_837_717_19 && break end @@ -110,12 +121,12 @@ function rand(rng::AbstractRNG, s::GammaGDSampler) q = calc_q(s, t) # Step 11 - (q > 0.0) && (s.c*abs(u) ≤ expm1(q)*exp(e-0.5t*t)) && break + (q > 0.0) && (s.c * abs(u) ≤ expm1(q) * exp(e - 0.5t * t)) && break end # Step 12 - x = s.s+0.5t - return x*x*s.scale + x = s.s + 0.5t + return x * x * s.scale end # "Computer methods for sampling from gamma, beta, poisson and bionomial distributions" @@ -134,23 +145,23 @@ end function GammaGSSampler(d::Gamma) a = shape(d) ia = 1.0 / a - b = 1.0+0.36787944117144233 * a + b = 1.0 + 0.36787944117144233 * a GammaGSSampler(a, ia, b, scale(d)) end function rand(rng::AbstractRNG, s::GammaGSSampler) while true # step 1 - p = s.b*rand(rng) + p = s.b * rand(rng) e = randexp(rng) if p <= 1.0 # step 2 - x = exp(log(p)*s.ia) - e < x || return s.scale*x + x = exp(log(p) * s.ia) + e < x || return s.scale * x else # step 3 - x = -log(s.ia*(s.b-p)) - e < log(x)*(1.0-s.a) || return s.scale*x + x = -log(s.ia * (s.b - p)) + e < log(x) * (1.0 - s.a) || return s.scale * x end end end @@ -172,14 +183,14 @@ end function GammaMTSampler(g::Gamma) # Setup (Step 1) - d = shape(g) - 1//3 + d = shape(g) - 1 // 3 c = inv(3 * sqrt(d)) # Pre-compute scaling factor κ = d * scale(g) # We also pre-compute the factor in the squeeze function - return GammaMTSampler(promote(d, c, κ, 331//10_000)...) + return GammaMTSampler(promote(d, c, κ, 331 // 10_000)...) end function rand(rng::AbstractRNG, s::GammaMTSampler{T}) where {T<:Real} @@ -211,7 +222,8 @@ end # Inverse Power sampler # uses the x*u^(1/a) trick from Marsaglia and Tsang (2000) for when shape < 1 -struct GammaIPSampler{S<:Sampleable{Univariate,Continuous},T<:Real} <: Sampleable{Univariate,Continuous} +struct GammaIPSampler{S<:Sampleable{Univariate,Continuous},T<:Real} <: + Sampleable{Univariate,Continuous} s::S #sampler for Gamma(1+shape,scale) nia::T #-1/scale end @@ -226,5 +238,5 @@ end function rand(rng::AbstractRNG, s::GammaIPSampler) x = rand(rng, s.s) e = randexp(rng, typeof(x)) - x*exp(s.nia*e) + x * exp(s.nia * e) end diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index 7ce8b89730..2a2f4bde51 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -1,5 +1,9 @@ -function multinom_rand!(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}, - x::AbstractVector{<:Real}) +function multinom_rand!( + rng::AbstractRNG, + n::Int, + p::AbstractVector{<:Real}, + x::AbstractVector{<:Real}, +) k = length(p) length(x) == k || throw(DimensionMismatch("Invalid argument dimension.")) @@ -31,7 +35,7 @@ function multinom_rand!(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}, @inbounds x[k] = n else # n must have been zero z = zero(eltype(x)) - for j = i+1 : k + for j = i+1:k @inbounds x[j] = z end end @@ -49,8 +53,7 @@ function MultinomialSampler(n::Int, prob::Vector{<:Real}) return MultinomialSampler(n, prob, AliasTable(prob)) end -function _rand!(rng::AbstractRNG, s::MultinomialSampler, - x::AbstractVector{<:Real}) +function _rand!(rng::AbstractRNG, s::MultinomialSampler, x::AbstractVector{<:Real}) n = s.n k = length(s) if n^2 > k diff --git a/src/samplers/obsoleted.jl b/src/samplers/obsoleted.jl index 463e00736e..cfa28813a4 100644 --- a/src/samplers/obsoleted.jl +++ b/src/samplers/obsoleted.jl @@ -13,13 +13,13 @@ struct DiscreteDistributionTable <: Sampler{Univariate,Discrete} end # TODO: Test if bit operations can speed up Base64 mod's and fld's -function DiscreteDistributionTable(probs::Vector{T}) where T <: Real +function DiscreteDistributionTable(probs::Vector{T}) where {T<:Real} # Cache the cardinality of the outcome set n = length(probs) # Convert all Float64's into integers vals = Vector{Int64}(undef, n) - for i in 1:n + for i = 1:n vals[i] = round(Int, probs[i] * 64^9) end @@ -28,14 +28,14 @@ function DiscreteDistributionTable(probs::Vector{T}) where T <: Real bounds = zeros(Int64, 9) # Special case for deterministic distributions - for i in 1:n + for i = 1:n if vals[i] == 64^9 table[1] = Vector{Int64}(undef, 64) - for j in 1:64 + for j = 1:64 table[1][j] = i end bounds[1] = 64^9 - for j in 2:9 + for j = 2:9 table[j] = Vector{Int64}() bounds[j] = 64^9 end @@ -45,14 +45,14 @@ function DiscreteDistributionTable(probs::Vector{T}) where T <: Real # Fill tables multiplier = 1 - for index in 9:-1:1 + for index = 9:-1:1 counts = Vector{Int64}() - for i in 1:n + for i = 1:n digit = mod(vals[i], 64) # vals[i] = fld(vals[i], 64) vals[i] >>= 6 bounds[index] += digit - for itr in 1:digit + for itr = 1:digit push!(counts, i) end end @@ -70,7 +70,7 @@ end function rand(table::DiscreteDistributionTable) # 64^9 - 1 == 0x003fffffffffffff - i = rand(1:(64^9 - 1)) + i = rand(1:(64^9-1)) # if i == 64^9 # return table.table[9][rand(1:length(table.table[9]))] # end @@ -79,7 +79,7 @@ function rand(table::DiscreteDistributionTable) bound += 1 end if bound > 1 - index = fld(i - table.bounds[bound - 1] - 1, 64^(9 - bound)) + 1 + index = fld(i - table.bounds[bound-1] - 1, 64^(9 - bound)) + 1 else index = fld(i - 1, 64^(9 - bound)) + 1 end @@ -103,13 +103,14 @@ struct HuffmanBranch{T} <: HuffmanNode{T} right::HuffmanNode{T} weight::UInt64 end -HuffmanBranch(ha::HuffmanNode{T},hb::HuffmanNode{T}) where {T} = HuffmanBranch(ha, hb, ha.weight + hb.weight) +HuffmanBranch(ha::HuffmanNode{T}, hb::HuffmanNode{T}) where {T} = + HuffmanBranch(ha, hb, ha.weight + hb.weight) -Base.isless(ha::HuffmanNode{T}, hb::HuffmanNode{T}) where {T} = isless(ha.weight,hb.weight) -Base.show(io::IO, t::HuffmanNode{T}) where {T} = show(io,typeof(t)) +Base.isless(ha::HuffmanNode{T}, hb::HuffmanNode{T}) where {T} = isless(ha.weight, hb.weight) +Base.show(io::IO, t::HuffmanNode{T}) where {T} = show(io, typeof(t)) -function Base.getindex(h::HuffmanBranch{T},u::UInt64) where T - while isa(h,HuffmanBranch{T}) +function Base.getindex(h::HuffmanBranch{T}, u::UInt64) where {T} + while isa(h, HuffmanBranch{T}) if u < h.left.weight h = h.left else @@ -122,36 +123,40 @@ end # build the huffman tree # could be slightly more efficient using a Deque. -function huffman(values::AbstractVector{T},weights::AbstractVector{UInt64}) where T - leafs = [HuffmanLeaf{T}(values[i],weights[i]) for i = 1:length(weights)] - sort!(leafs; rev=true) +function huffman(values::AbstractVector{T}, weights::AbstractVector{UInt64}) where {T} + leafs = [HuffmanLeaf{T}(values[i], weights[i]) for i = 1:length(weights)] + sort!(leafs; rev = true) branches = Vector{HuffmanBranch{T}}() while !isempty(leafs) || length(branches) > 1 - left = isempty(branches) || (!isempty(leafs) && first(leafs) < first(branches)) ? pop!(leafs) : pop!(branches) - right = isempty(branches) || (!isempty(leafs) && first(leafs) < first(branches)) ? pop!(leafs) : pop!(branches) - pushfirst!(branches,HuffmanBranch(left,right)) + left = + isempty(branches) || (!isempty(leafs) && first(leafs) < first(branches)) ? + pop!(leafs) : pop!(branches) + right = + isempty(branches) || (!isempty(leafs) && first(leafs) < first(branches)) ? + pop!(leafs) : pop!(branches) + pushfirst!(branches, HuffmanBranch(left, right)) end pop!(branches) end -function rand(h::HuffmanNode{T}) where T +function rand(h::HuffmanNode{T}) where {T} w = h.weight # generate uniform UInt64 objects on the range 0:(w-1) # unfortunately we can't use Range objects, as they don't have sufficient length u = rand(UInt64) - if (w & (w-1)) == 0 + if (w & (w - 1)) == 0 # power of 2 - u = u & (w-1) + u = u & (w - 1) else m = typemax(UInt64) - lim = m - (rem(m,w)+1) + lim = m - (rem(m, w) + 1) while u > lim u = rand(UInt64) end - u = rem(u,w) + u = rem(u, w) end h[u] end diff --git a/src/samplers/poisson.jl b/src/samplers/poisson.jl index 64898ec2fe..201c4b7ac3 100644 --- a/src/samplers/poisson.jl +++ b/src/samplers/poisson.jl @@ -1,7 +1,7 @@ function poissonpvec(μ::Float64, n::Int) # Poisson probabilities, from 0 to n - pv = Vector{Float64}(undef, n+1) + pv = Vector{Float64}(undef, n + 1) @inbounds pv[1] = p = exp(-μ) for i = 1:n @inbounds pv[i+1] = (p *= (μ / i)) @@ -102,7 +102,7 @@ function rand(rng::AbstractRNG, sampler::PoissonADSampler) c = 0.1069 / μ # Step H - if c*abs(U) <= py*exp(px + E) - fy*exp(fx + E) + if c * abs(U) <= py * exp(px + E) - fy * exp(fx + E) return K end end @@ -111,28 +111,28 @@ end # Procedure F function procf(μ, K::Int, s) # can be pre-computed, but does not seem to affect performance - ω = 0.3989422804014327/s - b1 = 0.041666666666666664/μ - b2 = 0.3*b1*b1 - c3 = 0.14285714285714285*b1*b2 + ω = 0.3989422804014327 / s + b1 = 0.041666666666666664 / μ + b2 = 0.3 * b1 * b1 + c3 = 0.14285714285714285 * b1 * b2 c2 = b2 - 15 * c3 c1 = b1 - 6 * b2 + 45 * c3 c0 = 1 - b1 + 3 * b2 - 15 * c3 if K < 10 px = -μ - py = μ^K/factorial(K) + py = μ^K / factorial(K) else - δ = 0.08333333333333333/K - δ -= 4.8*δ^3 - V = (μ-K)/K - px = K*log1pmx(V) - δ # avoids need for table - py = 0.3989422804014327/sqrt(K) + δ = 0.08333333333333333 / K + δ -= 4.8 * δ^3 + V = (μ - K) / K + px = K * log1pmx(V) - δ # avoids need for table + py = 0.3989422804014327 / sqrt(K) end - X = (K-μ+0.5)/s + X = (K - μ + 0.5) / s X2 = X^2 fx = -X2 / 2 # missing negation in pseudo-algorithm, but appears in fortran code. - fy = ω*(((c3*X2+c2)*X2+c1)*X2+c0) - return px,py,fx,fy + fy = ω * (((c3 * X2 + c2) * X2 + c1) * X2 + c0) + return px, py, fx, fy end diff --git a/src/samplers/productnamedtuple.jl b/src/samplers/productnamedtuple.jl index f46c1c2aad..3d07fec0e1 100644 --- a/src/samplers/productnamedtuple.jl +++ b/src/samplers/productnamedtuple.jl @@ -14,7 +14,9 @@ function _rand(rng::AbstractRNG, spl::ProductNamedTupleSampler, dims::Dims) end function _rand!( - rng::AbstractRNG, spl::ProductNamedTupleSampler, xs::AbstractArray{<:NamedTuple{K}} + rng::AbstractRNG, + spl::ProductNamedTupleSampler, + xs::AbstractArray{<:NamedTuple{K}}, ) where {K} for i in eachindex(xs) xs[i] = NamedTuple{K}(rand(rng, spl)) diff --git a/src/samplers/vonmisesfisher.jl b/src/samplers/vonmisesfisher.jl index fd3eb2df08..dabad736f5 100644 --- a/src/samplers/vonmisesfisher.jl +++ b/src/samplers/vonmisesfisher.jl @@ -52,7 +52,7 @@ _vmf_bval(p::Int, κ::Real) = (p - 1) / (2.0κ + sqrt(4 * abs2(κ) + abs2(p - 1) function _vmf_genw3(rng::AbstractRNG, p, b, x0, c, κ) ξ = rand(rng) - w = 1.0 + (log(ξ + (1.0 - ξ)*exp(-2κ))/κ) + w = 1.0 + (log(ξ + (1.0 - ξ) * exp(-2κ)) / κ) return w::Float64 end @@ -91,10 +91,10 @@ function _vmf_householder_vec(μ::Vector{Float64}) p = length(μ) v = similar(μ) v[1] = μ[1] - 1.0 - s = sqrt(-2*v[1]) + s = sqrt(-2 * v[1]) v[1] /= s - @inbounds for i in 2:p + @inbounds for i = 2:p v[i] = μ[i] / s end diff --git a/src/show.jl b/src/show.jl index 31bc5cd92c..5db81a5fe1 100644 --- a/src/show.jl +++ b/src/show.jl @@ -67,7 +67,7 @@ function show_oneline(io::IO, d::Distribution, namevals) print(io, ')') end -function show_multline(io::IO, d::Distribution, namevals; newline=true) +function show_multline(io::IO, d::Distribution, namevals; newline = true) print(io, distrname(d)) println(io, "(") for (p, pv) in namevals diff --git a/src/statsapi.jl b/src/statsapi.jl index 18112b3dc8..cabf6928fe 100644 --- a/src/statsapi.jl +++ b/src/statsapi.jl @@ -4,19 +4,27 @@ function _check_tail(tail::Symbol) end end -function StatsAPI.pvalue(dist::DiscreteUnivariateDistribution, x::Number; tail::Symbol=:both) +function StatsAPI.pvalue( + dist::DiscreteUnivariateDistribution, + x::Number; + tail::Symbol = :both, +) _check_tail(tail) if tail === :both - p = 2 * min(ccdf(dist, x-1), cdf(dist, x)) + p = 2 * min(ccdf(dist, x - 1), cdf(dist, x)) min(p, oneunit(p)) # if P(X = x) > 0, then possibly p > 1 elseif tail === :left cdf(dist, x) else # tail === :right - ccdf(dist, x-1) + ccdf(dist, x - 1) end end -function StatsAPI.pvalue(dist::ContinuousUnivariateDistribution, x::Number; tail::Symbol=:both) +function StatsAPI.pvalue( + dist::ContinuousUnivariateDistribution, + x::Number; + tail::Symbol = :both, +) _check_tail(tail) if tail === :both p = 2 * min(cdf(dist, x), ccdf(dist, x)) diff --git a/src/test_utils.jl b/src/test_utils.jl index 6f74f80ec3..63a3a0b0b2 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -9,7 +9,7 @@ if isdefined(Base, :get_extension) && isdefined(Base.Experimental, :register_err # Better error message if users forget to load Test Base.Experimental.register_error_hint(MethodError) do io, exc, _, _ if exc.f === test_mvnormal && - (Base.get_extension(Distributions, :DistributionsTestExt) === nothing) + (Base.get_extension(Distributions, :DistributionsTestExt) === nothing) print(io, "\nDid you forget to load Test?") end end diff --git a/src/truncate.jl b/src/truncate.jl index 48d62b015b..8118cce148 100644 --- a/src/truncate.jl +++ b/src/truncate.jl @@ -35,8 +35,8 @@ function truncated(d::UnivariateDistribution, l::Real, u::Real) end function truncated( d::UnivariateDistribution; - lower::Union{Real,Nothing}=nothing, - upper::Union{Real,Nothing}=nothing, + lower::Union{Real,Nothing} = nothing, + upper::Union{Real,Nothing} = nothing, ) return truncated(d, lower, upper) end @@ -60,7 +60,7 @@ function truncated(d::UnivariateDistribution, l::Real, ::Nothing) Truncated(d, l, nothing, loglcdf, lcdf, ucdf, tp, logtp) end truncated(d::UnivariateDistribution, ::Nothing, ::Nothing) = d -function truncated(d::UnivariateDistribution, l::T, u::T) where {T <: Real} +function truncated(d::UnivariateDistribution, l::T, u::T) where {T<:Real} l <= u || error("the lower bound must be less or equal than the upper bound") # (log)lcdf = (log) P(X < l) where X ~ d @@ -86,7 +86,13 @@ Generic wrapper for a truncated distribution. The *truncated normal distribution* is a particularly important one in the family of truncated distributions. Unlike the general case, truncated normal distributions support `mean`, `mode`, `modes`, `var`, `std`, and `entropy`. """ -struct Truncated{D<:UnivariateDistribution, S<:ValueSupport, T<: Real, TL<:Union{T,Nothing}, TU<:Union{T,Nothing}} <: UnivariateDistribution{S} +struct Truncated{ + D<:UnivariateDistribution, + S<:ValueSupport, + T<:Real, + TL<:Union{T,Nothing}, + TU<:Union{T,Nothing}, +} <: UnivariateDistribution{S} untruncated::D # the original distribution (untruncated) lower::TL # lower bound upper::TU # upper bound @@ -97,17 +103,45 @@ struct Truncated{D<:UnivariateDistribution, S<:ValueSupport, T<: Real, TL<:Union tp::T # the probability of the truncated part, i.e. ucdf - lcdf logtp::T # log(tp), i.e. log(ucdf - lcdf) - function Truncated(d::UnivariateDistribution, l::TL, u::TU, loglcdf::T, lcdf::T, ucdf::T, tp::T, logtp::T) where {T <: Real, TL <: Union{T,Nothing}, TU <: Union{T,Nothing}} - new{typeof(d), value_support(typeof(d)), T, TL, TU}(d, l, u, loglcdf, lcdf, ucdf, tp, logtp) + function Truncated( + d::UnivariateDistribution, + l::TL, + u::TU, + loglcdf::T, + lcdf::T, + ucdf::T, + tp::T, + logtp::T, + ) where {T<:Real,TL<:Union{T,Nothing},TU<:Union{T,Nothing}} + new{typeof(d),value_support(typeof(d)),T,TL,TU}( + d, + l, + u, + loglcdf, + lcdf, + ucdf, + tp, + logtp, + ) end end -const LeftTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = Truncated{D,S,T,T,Nothing} -const RightTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = Truncated{D,S,T,Nothing,T} +const LeftTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = + Truncated{D,S,T,T,Nothing} +const RightTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = + Truncated{D,S,T,Nothing,T} ### Constructors of `Truncated` are deprecated - users should call `truncated` @deprecate Truncated(d::UnivariateDistribution, l::Real, u::Real) truncated(d, l, u) -@deprecate Truncated(d::UnivariateDistribution, l::T, u::T, lcdf::T, ucdf::T, tp::T, logtp::T) where {T <: Real} Truncated(d, l, u, log(lcdf), lcdf, ucdf, tp, logtp) +@deprecate Truncated( + d::UnivariateDistribution, + l::T, + u::T, + lcdf::T, + ucdf::T, + tp::T, + logtp::T, +) where {T<:Real} Truncated(d, l, u, log(lcdf), lcdf, ucdf, tp, logtp) function truncated(d::Truncated, l::T, u::T) where {T<:Real} return truncated( @@ -124,7 +158,8 @@ function truncated(d::Truncated, l::Real, ::Nothing) end params(d::Truncated) = tuple(params(d.untruncated)..., d.lower, d.upper) -partype(d::Truncated{<:UnivariateDistribution,<:ValueSupport,T}) where {T<:Real} = promote_type(partype(d.untruncated), T) +partype(d::Truncated{<:UnivariateDistribution,<:ValueSupport,T}) where {T<:Real} = + promote_type(partype(d.untruncated), T) Base.eltype(::Type{<:Truncated{D}}) where {D<:UnivariateDistribution} = eltype(D) Base.eltype(d::Truncated) = eltype(d.untruncated) @@ -143,7 +178,10 @@ minimum(d::Truncated) = max(minimum(d.untruncated), d.lower) maximum(d::LeftTruncated) = maximum(d.untruncated) maximum(d::Truncated) = min(maximum(d.untruncated), d.upper) -function insupport(d::Truncated{<:UnivariateDistribution,<:Union{Discrete,Continuous}}, x::Real) +function insupport( + d::Truncated{<:UnivariateDistribution,<:Union{Discrete,Continuous}}, + x::Real, +) return _in_closed_interval(x, d.lower, d.upper) && insupport(d.untruncated, x) end @@ -239,8 +277,7 @@ function show(io::IO, d::Truncated) print(io, "Truncated(") d0 = d.untruncated uml, namevals = _use_multline_show(d0) - uml ? show_multline(io, d0, namevals) : - show_oneline(io, d0, namevals) + uml ? show_multline(io, d0, namevals) : show_oneline(io, d0, namevals) if d.lower === nothing print(io, "; upper=$(d.upper))") elseif d.upper === nothing diff --git a/src/truncated/discrete_uniform.jl b/src/truncated/discrete_uniform.jl index a22ac390d6..6c54e5bca5 100644 --- a/src/truncated/discrete_uniform.jl +++ b/src/truncated/discrete_uniform.jl @@ -2,7 +2,7 @@ ##### Shortcut for truncating discrete uniform distributions. ##### -function truncated(d::DiscreteUniform, l::T, u::T) where {T <: Real} +function truncated(d::DiscreteUniform, l::T, u::T) where {T<:Real} a = ceil(Int, max(l, d.a)) b = floor(Int, min(u, d.b)) return DiscreteUniform(a, b) diff --git a/src/truncated/exponential.jl b/src/truncated/exponential.jl index c04c90d870..2af92ab080 100644 --- a/src/truncated/exponential.jl +++ b/src/truncated/exponential.jl @@ -6,13 +6,13 @@ function mean(d::Truncated{<:Exponential,Continuous}) θ = d.untruncated.θ l, r = extrema(d) # l is always finite if isfinite(r) - if abs(l-r) ≤ √eps()*θ + if abs(l - r) ≤ √eps() * θ # linear is a good approximation, just take the midpoint middle(l, r) else Δ = r - l - R = -Δ/θ - θ + middle(l, r) + Δ/2*(exp(R)+1)/expm1(R) + R = -Δ / θ + θ + middle(l, r) + Δ / 2 * (exp(R) + 1) / expm1(R) end else θ + l diff --git a/src/truncated/loguniform.jl b/src/truncated/loguniform.jl index 0087d01dfb..6743e44fc3 100644 --- a/src/truncated/loguniform.jl +++ b/src/truncated/loguniform.jl @@ -1,3 +1,4 @@ -truncated(d::LogUniform, lo::T, hi::T) where {T<:Real} = LogUniform(max(d.a, lo), min(d.b, hi)) +truncated(d::LogUniform, lo::T, hi::T) where {T<:Real} = + LogUniform(max(d.a, lo), min(d.b, hi)) truncated(d::LogUniform, lo::Real, ::Nothing) = LogUniform(max(d.a, lo), d.b) truncated(d::LogUniform, ::Nothing, hi::Real) = LogUniform(d.a, min(d.b, hi)) diff --git a/src/truncated/normal.jl b/src/truncated/normal.jl index 6fb3342731..9dab23d902 100644 --- a/src/truncated/normal.jl +++ b/src/truncated/normal.jl @@ -115,7 +115,7 @@ function entropy(d::Truncated{<:Normal{<:Real},Continuous}) b = (upper - μ) / σ aφa = isinf(a) ? 0.0 : a * normpdf(a) bφb = isinf(b) ? 0.0 : b * normpdf(b) - 0.5 * (log2π + 1.) + log(σ * z) + (aφa - bφb) / (2.0 * z) + 0.5 * (log2π + 1.0) + log(σ * z) + (aφa - bφb) / (2.0 * z) end @@ -154,8 +154,9 @@ function randnt(rng::AbstractRNG, lb::Float64, ub::Float64, tp::Float64) else span = ub - lb - if lb > 0 && span > 2.0 / (lb + sqrt(lb^2 + 4.0)) * exp((lb^2 - lb * sqrt(lb^2 + 4.0)) / 4.0) - a = (lb + sqrt(lb^2 + 4.0))/2.0 + if lb > 0 && + span > 2.0 / (lb + sqrt(lb^2 + 4.0)) * exp((lb^2 - lb * sqrt(lb^2 + 4.0)) / 4.0) + a = (lb + sqrt(lb^2 + 4.0)) / 2.0 while true r = rand(rng, Exponential(1.0 / a)) + lb u = rand(rng) @@ -163,7 +164,9 @@ function randnt(rng::AbstractRNG, lb::Float64, ub::Float64, tp::Float64) return r end end - elseif ub < 0 && ub - lb > 2.0 / (-ub + sqrt(ub^2 + 4.0)) * exp((ub^2 + ub * sqrt(ub^2 + 4.0)) / 4.0) + elseif ub < 0 && + ub - lb > + 2.0 / (-ub + sqrt(ub^2 + 4.0)) * exp((ub^2 + ub * sqrt(ub^2 + 4.0)) / 4.0) a = (-ub + sqrt(ub^2 + 4.0)) / 2.0 while true r = rand(rng, Exponential(1.0 / a)) - ub diff --git a/src/truncated/uniform.jl b/src/truncated/uniform.jl index bb56609928..cf67fc8e5a 100644 --- a/src/truncated/uniform.jl +++ b/src/truncated/uniform.jl @@ -2,6 +2,6 @@ ##### Shortcut for truncating uniform distributions. ##### -truncated(d::Uniform, l::T, u::T) where {T <: Real} = Uniform(max(l, d.a), min(u, d.b)) +truncated(d::Uniform, l::T, u::T) where {T<:Real} = Uniform(max(l, d.a), min(u, d.b)) truncated(d::Uniform, l::Real, ::Nothing) = Uniform(max(l, d.a), d.b) truncated(d::Uniform, ::Nothing, u::Real) = Uniform(d.a, min(u, d.b)) diff --git a/src/univariate/continuous/arcsine.jl b/src/univariate/continuous/arcsine.jl index b0a8d26625..8e802a7ae6 100644 --- a/src/univariate/continuous/arcsine.jl +++ b/src/univariate/continuous/arcsine.jl @@ -31,20 +31,22 @@ struct Arcsine{T<:Real} <: ContinuousUnivariateDistribution Arcsine{T}(a::T, b::T) where {T<:Real} = new{T}(a, b) end -function Arcsine(a::T, b::T; check_args::Bool=true) where {T <: Real} +function Arcsine(a::T, b::T; check_args::Bool = true) where {T<:Real} @check_args Arcsine a < b return Arcsine{T}(a, b) end -Arcsine(a::Real, b::Real; check_args::Bool=true) = Arcsine(promote(a, b)...; check_args=check_args) -Arcsine(a::Integer, b::Integer; check_args::Bool=true) = Arcsine(float(a), float(b); check_args=check_args) -Arcsine(b::Real; check_args::Bool=true) = Arcsine(zero(b), b; check_args=check_args) +Arcsine(a::Real, b::Real; check_args::Bool = true) = + Arcsine(promote(a, b)...; check_args = check_args) +Arcsine(a::Integer, b::Integer; check_args::Bool = true) = + Arcsine(float(a), float(b); check_args = check_args) +Arcsine(b::Real; check_args::Bool = true) = Arcsine(zero(b), b; check_args = check_args) Arcsine() = Arcsine{Float64}(0.0, 1.0) @distr_support Arcsine d.a d.b #### Conversions -function convert(::Type{Arcsine{T}}, a::Real, b::Real) where T<:Real +function convert(::Type{Arcsine{T}}, a::Real, b::Real) where {T<:Real} Arcsine(T(a), T(b)) end Base.convert(::Type{Arcsine{T}}, d::Arcsine) where {T<:Real} = Arcsine{T}(T(d.a), T(d.b)) @@ -67,7 +69,7 @@ modes(d::Arcsine) = [d.a, d.b] var(d::Arcsine) = abs2(d.b - d.a) / 8 skewness(d::Arcsine{T}) where {T} = zero(T) -kurtosis(d::Arcsine{T}) where {T} = -T(3/2) +kurtosis(d::Arcsine{T}) where {T} = -T(3 / 2) entropy(d::Arcsine) = -0.24156447527049044469 + log(scale(d)) @@ -78,17 +80,17 @@ function pdf(d::Arcsine, x::Real) insupport(d, x) ? one(d.a) / (π * sqrt((x - d.a) * (d.b - x))) : zero(d.a) end -function logpdf(d::Arcsine{T}, x::Real) where T<:Real - insupport(d, x) ? -(logπ + log((x - d.a) * (d.b - x))/2) : -T(Inf) +function logpdf(d::Arcsine{T}, x::Real) where {T<:Real} + insupport(d, x) ? -(logπ + log((x - d.a) * (d.b - x)) / 2) : -T(Inf) end -cdf(d::Arcsine{T}, x::Real) where {T<:Real} = x < d.a ? zero(T) : - x > d.b ? one(T) : - 0.636619772367581343 * asin(sqrt((x - d.a) / (d.b - d.a))) +cdf(d::Arcsine{T}, x::Real) where {T<:Real} = + x < d.a ? zero(T) : + x > d.b ? one(T) : 0.636619772367581343 * asin(sqrt((x - d.a) / (d.b - d.a))) quantile(d::Arcsine, p::Real) = location(d) + abs2(sin(halfπ * p)) * scale(d) -function gradlogpdf(d::Arcsine{T}, x::R) where {T, R <: Real} +function gradlogpdf(d::Arcsine{T}, x::R) where {T,R<:Real} TP = promote_type(T, R) (a, b) = extrema(d) # on the bounds, we consider the gradient limit inside the domain diff --git a/src/univariate/continuous/beta.jl b/src/univariate/continuous/beta.jl index fd2420d815..812f16e105 100644 --- a/src/univariate/continuous/beta.jl +++ b/src/univariate/continuous/beta.jl @@ -32,23 +32,25 @@ struct Beta{T<:Real} <: ContinuousUnivariateDistribution Beta{T}(α::T, β::T) where {T} = new{T}(α, β) end -function Beta(α::T, β::T; check_args::Bool=true) where {T<:Real} +function Beta(α::T, β::T; check_args::Bool = true) where {T<:Real} @check_args Beta (α, α > zero(α)) (β, β > zero(β)) return Beta{T}(α, β) end -Beta(α::Real, β::Real; check_args::Bool=true) = Beta(promote(α, β)...; check_args=check_args) -Beta(α::Integer, β::Integer; check_args::Bool=true) = Beta(float(α), float(β); check_args=check_args) -function Beta(α::Real; check_args::Bool=true) +Beta(α::Real, β::Real; check_args::Bool = true) = + Beta(promote(α, β)...; check_args = check_args) +Beta(α::Integer, β::Integer; check_args::Bool = true) = + Beta(float(α), float(β); check_args = check_args) +function Beta(α::Real; check_args::Bool = true) @check_args Beta (α, α > zero(α)) - Beta(α, α; check_args=false) + Beta(α, α; check_args = false) end Beta() = Beta{Float64}(1.0, 1.0) @distr_support Beta 0.0 1.0 #### Conversions -function convert(::Type{Beta{T}}, α::Real, β::Real) where T<:Real +function convert(::Type{Beta{T}}, α::Real, β::Real) where {T<:Real} Beta(T(α), T(β)) end Base.convert(::Type{Beta{T}}, d::Beta) where {T<:Real} = Beta{T}(T(d.α), T(d.β)) @@ -62,9 +64,10 @@ params(d::Beta) = (d.α, d.β) #### Statistics -mean(d::Beta) = ((α, β) = params(d); α / (α + β)) +mean(d::Beta) = ((α, β) = params(d); +α / (α + β)) -function mode(d::Beta; check_args::Bool=true) +function mode(d::Beta; check_args::Bool = true) α, β = params(d) @check_args( Beta, @@ -82,9 +85,11 @@ function var(d::Beta) return (α * β) / (abs2(s) * (s + 1)) end -meanlogx(d::Beta) = ((α, β) = params(d); digamma(α) - digamma(α + β)) +meanlogx(d::Beta) = ((α, β) = params(d); +digamma(α) - digamma(α + β)) -varlogx(d::Beta) = ((α, β) = params(d); trigamma(α) - trigamma(α + β)) +varlogx(d::Beta) = ((α, β) = params(d); +trigamma(α) - trigamma(α + β)) stdlogx(d::Beta) = sqrt(varlogx(d)) function skewness(d::Beta) @@ -107,30 +112,33 @@ end function entropy(d::Beta) α, β = params(d) s = α + β - logbeta(α, β) - (α - 1) * digamma(α) - (β - 1) * digamma(β) + - (s - 2) * digamma(s) + logbeta(α, β) - (α - 1) * digamma(α) - (β - 1) * digamma(β) + (s - 2) * digamma(s) end function kldivergence(p::Beta, q::Beta) αp, βp = params(p) αq, βq = params(q) - return logbeta(αq, βq) - logbeta(αp, βp) + (αp - αq) * digamma(αp) + - (βp - βq) * digamma(βp) + (αq - αp + βq - βp) * digamma(αp + βp) + return logbeta(αq, βq) - logbeta(αp, βp) + + (αp - αq) * digamma(αp) + + (βp - βq) * digamma(βp) + + (αq - αp + βq - βp) * digamma(αp + βp) end #### Evaluation @_delegate_statsfuns Beta beta α β -gradlogpdf(d::Beta{T}, x::Real) where {T<:Real} = - ((α, β) = params(d); 0 <= x <= 1 ? (α - 1) / x - (β - 1) / (1 - x) : zero(T)) +gradlogpdf(d::Beta{T}, x::Real) where {T<:Real} = ((α, β) = params(d); +0 <= x <= 1 ? (α - 1) / x - (β - 1) / (1 - x) : zero(T)) #### Sampling -struct BetaSampler{T<:Real, S1 <: Sampleable{Univariate,Continuous}, - S2 <: Sampleable{Univariate,Continuous}} <: - Sampleable{Univariate,Continuous} +struct BetaSampler{ + T<:Real, + S1<:Sampleable{Univariate,Continuous}, + S2<:Sampleable{Univariate,Continuous}, +} <: Sampleable{Univariate,Continuous} γ::Bool iα::T iβ::T @@ -138,15 +146,18 @@ struct BetaSampler{T<:Real, S1 <: Sampleable{Univariate,Continuous}, s2::S2 end -function sampler(d::Beta{T}) where T +function sampler(d::Beta{T}) where {T} (α, β) = params(d) if (α ≤ 1.0) && (β ≤ 1.0) - return BetaSampler(false, inv(α), inv(β), - sampler(Uniform()), sampler(Uniform())) + return BetaSampler(false, inv(α), inv(β), sampler(Uniform()), sampler(Uniform())) else - return BetaSampler(true, inv(α), inv(β), - sampler(Gamma(α, one(T))), - sampler(Gamma(β, one(T)))) + return BetaSampler( + true, + inv(α), + inv(β), + sampler(Gamma(α, one(T))), + sampler(Gamma(β, one(T))), + ) end end @@ -180,7 +191,7 @@ function rand(rng::AbstractRNG, s::BetaSampler) end end -function rand(rng::AbstractRNG, d::Beta{T}) where T +function rand(rng::AbstractRNG, d::Beta{T}) where {T} (α, β) = params(d) if (α ≤ 1.0) && (β ≤ 1.0) while true @@ -210,27 +221,35 @@ end -function fit_mle(::Type{<:Beta}, x::AbstractArray{T}; - maxiter::Int=1000, tol::Float64=1e-14) where T<:Real +function fit_mle( + ::Type{<:Beta}, + x::AbstractArray{T}; + maxiter::Int = 1000, + tol::Float64 = 1e-14, +) where {T<:Real} - α₀,β₀ = params(fit(Beta,x)) #initial guess of parameters + α₀, β₀ = params(fit(Beta, x)) #initial guess of parameters g₁ = mean(log.(x)) g₂ = mean(log.(one(T) .- x)) - θ= [α₀ ; β₀ ] + θ = [α₀; β₀] converged = false - t=0 + t = 0 while !converged && t < maxiter #newton method - t+=1 - temp1 = digamma(θ[1]+θ[2]) - temp2 = trigamma(θ[1]+θ[2]) - grad = [g₁+temp1-digamma(θ[1]) - temp1+g₂-digamma(θ[2])] - hess = [temp2-trigamma(θ[1]) temp2 - temp2 temp2-trigamma(θ[2])] - Δθ = hess\grad #newton step + t += 1 + temp1 = digamma(θ[1] + θ[2]) + temp2 = trigamma(θ[1] + θ[2]) + grad = [ + g₁ + temp1 - digamma(θ[1]) + temp1 + g₂ - digamma(θ[2]) + ] + hess = [ + temp2-trigamma(θ[1]) temp2 + temp2 temp2-trigamma(θ[2]) + ] + Δθ = hess \ grad #newton step θ .-= Δθ - converged = dot(Δθ,Δθ) < 2*tol #stopping criterion + converged = dot(Δθ, Δθ) < 2 * tol #stopping criterion end return Beta(θ[1], θ[2]) @@ -238,7 +257,7 @@ end -function fit(::Type{<:Beta}, x::AbstractArray{T}) where T<:Real +function fit(::Type{<:Beta}, x::AbstractArray{T}) where {T<:Real} x_bar = mean(x) v_bar = varm(x, x_bar) temp = ((x_bar * (one(T) - x_bar)) / v_bar) - one(T) diff --git a/src/univariate/continuous/betaprime.jl b/src/univariate/continuous/betaprime.jl index 1ca65b5e85..5b9596be9f 100644 --- a/src/univariate/continuous/betaprime.jl +++ b/src/univariate/continuous/betaprime.jl @@ -32,26 +32,29 @@ struct BetaPrime{T<:Real} <: ContinuousUnivariateDistribution BetaPrime{T}(α::T, β::T) where {T} = new{T}(α, β) end -function BetaPrime(α::T, β::T; check_args::Bool=true) where {T<:Real} +function BetaPrime(α::T, β::T; check_args::Bool = true) where {T<:Real} @check_args BetaPrime (α, α > zero(α)) (β, β > zero(β)) return BetaPrime{T}(α, β) end -BetaPrime(α::Real, β::Real; check_args::Bool=true) = BetaPrime(promote(α, β)...; check_args=check_args) -BetaPrime(α::Integer, β::Integer; check_args::Bool=true) = BetaPrime(float(α), float(β); check_args=check_args) -function BetaPrime(α::Real; check_args::Bool=true) +BetaPrime(α::Real, β::Real; check_args::Bool = true) = + BetaPrime(promote(α, β)...; check_args = check_args) +BetaPrime(α::Integer, β::Integer; check_args::Bool = true) = + BetaPrime(float(α), float(β); check_args = check_args) +function BetaPrime(α::Real; check_args::Bool = true) @check_args BetaPrime (α, α > zero(α)) - BetaPrime(α, α; check_args=false) + BetaPrime(α, α; check_args = false) end BetaPrime() = BetaPrime{Float64}(1.0, 1.0) @distr_support BetaPrime 0.0 Inf #### Conversions -function convert(::Type{BetaPrime{T}}, α::Real, β::Real) where T<:Real +function convert(::Type{BetaPrime{T}}, α::Real, β::Real) where {T<:Real} BetaPrime(T(α), T(β)) end -Base.convert(::Type{BetaPrime{T}}, d::BetaPrime) where {T<:Real} = BetaPrime{T}(T(d.α), T(d.β)) +Base.convert(::Type{BetaPrime{T}}, d::BetaPrime) where {T<:Real} = + BetaPrime{T}(T(d.α), T(d.β)) Base.convert(::Type{BetaPrime{T}}, d::BetaPrime{T}) where {T<:Real} = d #### Parameters @@ -61,20 +64,22 @@ params(d::BetaPrime) = (d.α, d.β) #### Statistics -function mean(d::BetaPrime{T}) where T<:Real - ((α, β) = params(d); β > 1 ? α / (β - 1) : T(NaN)) +function mean(d::BetaPrime{T}) where {T<:Real} + ((α, β) = params(d); + β > 1 ? α / (β - 1) : T(NaN)) end -function mode(d::BetaPrime{T}) where T<:Real - ((α, β) = params(d); α > 1 ? (α - 1) / (β + 1) : zero(T)) +function mode(d::BetaPrime{T}) where {T<:Real} + ((α, β) = params(d); + α > 1 ? (α - 1) / (β + 1) : zero(T)) end -function var(d::BetaPrime{T}) where T<:Real +function var(d::BetaPrime{T}) where {T<:Real} (α, β) = params(d) β > 2 ? α * (α + β - 1) / ((β - 2) * (β - 1)^2) : T(NaN) end -function skewness(d::BetaPrime{T}) where T<:Real +function skewness(d::BetaPrime{T}) where {T<:Real} (α, β) = params(d) if β > 3 s = α + β - 1 @@ -103,11 +108,11 @@ end xval(::BetaPrime, z::Real) = z / (1 - z) for f in (:cdf, :ccdf, :logcdf, :logccdf) - @eval $f(d::BetaPrime, x::Real) = $f(Beta(d.α, d.β; check_args=false), zval(d, x)) + @eval $f(d::BetaPrime, x::Real) = $f(Beta(d.α, d.β; check_args = false), zval(d, x)) end for f in (:quantile, :cquantile, :invlogcdf, :invlogccdf) - @eval $f(d::BetaPrime, p::Real) = xval(d, $f(Beta(d.α, d.β; check_args=false), p)) + @eval $f(d::BetaPrime, p::Real) = xval(d, $f(Beta(d.α, d.β; check_args = false), p)) end #### Sampling diff --git a/src/univariate/continuous/biweight.jl b/src/univariate/continuous/biweight.jl index 93c6785ef4..6df7b84fdd 100644 --- a/src/univariate/continuous/biweight.jl +++ b/src/univariate/continuous/biweight.jl @@ -4,17 +4,19 @@ struct Biweight{T<:Real} <: ContinuousUnivariateDistribution μ::T σ::T - Biweight{T}(µ::T, σ::T) where {T <: Real} = new{T}(µ, σ) + Biweight{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) end -function Biweight(μ::T, σ::T; check_args::Bool=true) where {T<:Real} +function Biweight(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Biweight (σ, σ > zero(σ)) return Biweight{T}(μ, σ) end -Biweight(μ::Real, σ::Real; check_args::Bool=true) = Biweight(promote(μ, σ)...; check_args=check_args) -Biweight(μ::Integer, σ::Integer; check_args::Bool=true) = Biweight(float(μ), float(σ); check_args=check_args) -Biweight(μ::Real=0.0) = Biweight(μ, one(μ); check_args=false) +Biweight(μ::Real, σ::Real; check_args::Bool = true) = + Biweight(promote(μ, σ)...; check_args = check_args) +Biweight(μ::Integer, σ::Integer; check_args::Bool = true) = + Biweight(float(μ), float(σ); check_args = check_args) +Biweight(μ::Real = 0.0) = Biweight(μ, one(μ); check_args = false) @distr_support Biweight d.μ - d.σ d.μ + d.σ @@ -29,41 +31,36 @@ mode(d::Biweight) = d.μ var(d::Biweight) = d.σ^2 / 7 skewness(d::Biweight{T}) where {T<:Real} = zero(T) -kurtosis(d::Biweight{T}) where {T<:Real} = T(1)/21 - 3 +kurtosis(d::Biweight{T}) where {T<:Real} = T(1) / 21 - 3 ## Functions -function pdf(d::Biweight{T}, x::Real) where T<:Real +function pdf(d::Biweight{T}, x::Real) where {T<:Real} u = abs(x - d.μ) / d.σ - u >= 1 ? zero(T) : (15//16) * (1 - u^2)^2 / d.σ + u >= 1 ? zero(T) : (15 // 16) * (1 - u^2)^2 / d.σ end logpdf(d::Biweight, x::Real) = log(pdf(d, x)) -function cdf(d::Biweight{T}, x::Real) where T<:Real +function cdf(d::Biweight{T}, x::Real) where {T<:Real} u = (x - d.μ) / d.σ - u ≤ -1 ? zero(T) : - u ≥ 1 ? one(T) : - (u + 1)^3/16 * @horner(u,8,-9,3) + u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) end -function ccdf(d::Biweight{T}, x::Real) where T<:Real +function ccdf(d::Biweight{T}, x::Real) where {T<:Real} u = (d.μ - x) / d.σ - u ≤ -1 ? zero(T) : - u ≥ 1 ? one(T) : - (u + 1)^3/16 * @horner(u, 8, -9, 3) + u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) end @quantile_newton Biweight -function mgf(d::Biweight{T}, t::Real) where T<:Real - a = d.σ*t +function mgf(d::Biweight{T}, t::Real) where {T<:Real} + a = d.σ * t a2 = a^2 - a == 0 ? one(T) : - 15exp(d.μ * t) * (-3cosh(a) + (a + 3/a) * sinh(a)) / (a2^2) + a == 0 ? one(T) : 15exp(d.μ * t) * (-3cosh(a) + (a + 3 / a) * sinh(a)) / (a2^2) end -function cf(d::Biweight{T}, t::Real) where T<:Real +function cf(d::Biweight{T}, t::Real) where {T<:Real} a = d.σ * t a2 = a^2 - a == 0 ? one(T)+zero(T)*im : - -15cis(d.μ * t) * (3cos(a) + (a - 3/a) * sin(a)) / (a2^2) + a == 0 ? one(T) + zero(T) * im : + -15cis(d.μ * t) * (3cos(a) + (a - 3 / a) * sin(a)) / (a2^2) end diff --git a/src/univariate/continuous/cauchy.jl b/src/univariate/continuous/cauchy.jl index 85f8e592ca..8995650154 100644 --- a/src/univariate/continuous/cauchy.jl +++ b/src/univariate/continuous/cauchy.jl @@ -28,19 +28,21 @@ struct Cauchy{T<:Real} <: ContinuousUnivariateDistribution Cauchy{T}(µ, σ) where {T} = new{T}(µ, σ) end -function Cauchy(μ::T, σ::T; check_args::Bool=true) where {T<:Real} +function Cauchy(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Cauchy (σ, σ > zero(σ)) return Cauchy{T}(μ, σ) end -Cauchy(μ::Real, σ::Real; check_args::Bool=true) = Cauchy(promote(μ, σ)...; check_args=check_args) -Cauchy(μ::Integer, σ::Integer; check_args::Bool=true) = Cauchy(float(μ), float(σ); check_args=check_args) -Cauchy(μ::Real=0.0) = Cauchy(μ, one(μ); check_args=false) +Cauchy(μ::Real, σ::Real; check_args::Bool = true) = + Cauchy(promote(μ, σ)...; check_args = check_args) +Cauchy(μ::Integer, σ::Integer; check_args::Bool = true) = + Cauchy(float(μ), float(σ); check_args = check_args) +Cauchy(μ::Real = 0.0) = Cauchy(μ, one(μ); check_args = false) @distr_support Cauchy -Inf Inf #### Conversions -function convert(::Type{Cauchy{T}}, μ::Real, σ::Real) where T<:Real +function convert(::Type{Cauchy{T}}, μ::Real, σ::Real) where {T<:Real} Cauchy(T(μ), T(σ)) end Base.convert(::Type{Cauchy{T}}, d::Cauchy) where {T<:Real} = Cauchy{T}(T(d.μ), T(d.σ)) @@ -74,26 +76,26 @@ zval(d::Cauchy, x::Real) = (x - d.μ) / d.σ xval(d::Cauchy, z::Real) = d.μ + z * d.σ pdf(d::Cauchy, x::Real) = 1 / (π * scale(d) * (1 + zval(d, x)^2)) -logpdf(d::Cauchy, x::Real) = - (log1psq(zval(d, x)) + logπ + log(d.σ)) +logpdf(d::Cauchy, x::Real) = -(log1psq(zval(d, x)) + logπ + log(d.σ)) function cdf(d::Cauchy, x::Real) μ, σ = params(d) - invπ * atan(x - μ, σ) + 1//2 + invπ * atan(x - μ, σ) + 1 // 2 end function ccdf(d::Cauchy, x::Real) μ, σ = params(d) - invπ * atan(μ - x, σ) + 1//2 + invπ * atan(μ - x, σ) + 1 // 2 end function quantile(d::Cauchy, p::Real) μ, σ = params(d) - μ + σ * tan(π * (p - 1//2)) + μ + σ * tan(π * (p - 1 // 2)) end function cquantile(d::Cauchy, p::Real) μ, σ = params(d) - μ + σ * tan(π * (1//2 - p)) + μ + σ * tan(π * (1 // 2 - p)) end mgf(d::Cauchy{T}, t::Real) where {T<:Real} = t == zero(t) ? one(T) : T(NaN) @@ -107,7 +109,7 @@ Base.:*(c::Real, d::Cauchy) = Cauchy(c * d.μ, abs(c) * d.σ) #### Fitting # Note: this is not a Maximum Likelihood estimator -function fit(::Type{<:Cauchy}, x::AbstractArray{T}) where T<:Real +function fit(::Type{<:Cauchy}, x::AbstractArray{T}) where {T<:Real} l, m, u = quantile(x, [0.25, 0.5, 0.75]) Cauchy(m, (u - l) / 2) end diff --git a/src/univariate/continuous/chernoff.jl b/src/univariate/continuous/chernoff.jl index 3d319db252..97ca4b139f 100644 --- a/src/univariate/continuous/chernoff.jl +++ b/src/univariate/continuous/chernoff.jl @@ -30,154 +30,164 @@ Journal of Computational and Graphical Statistics, 2001. cdf(Chernoff(),-x) # For tail probabilities, use this instead of 1-cdf(Chernoff(),x) ``` """ -struct Chernoff <: ContinuousUnivariateDistribution -end +struct Chernoff <: ContinuousUnivariateDistribution end module ChernoffComputations - import QuadGK.quadgk - # The following arrays of constants have been precomputed to speed up computation. - # The first two correspond to the arrays a and b in the Groeneboom and Wellner article. - # The array airyai_roots contains roots of the airyai functions (atilde in the paper). - # Finally airyai_prime contains contains airyaiprime evaluated at the corresponding atildes - const a=[ - 0.14583333333333334 - -0.0014105902777777765 - -2.045269786155239e-5 - 1.7621771897199738e-6 - -5.7227597511834636e-8 - 1.2738111577329235e-9 - -2.1640824986157608e-11 - 2.87925759635786e-13 - -2.904496582565578e-15 - 1.8213982724416428e-17 - 4.318809086319947e-20 - -3.459634830348895e-21 - 6.541295360751684e-23 - -8.92275524974091e-25 - 1.0102911018761945e-26 - -9.976759882474874e-29 - ] - const b=[ - 0.6666666666666666 - 0.021164021164021163 - -0.0007893341226674574 - 1.9520978781876193e-5 - -3.184888134149933e-7 - 2.4964953625552273e-9 - 3.6216439304006735e-11 - -1.874438299727797e-12 - 4.393984966701305e-14 - -7.57237085924526e-16 - 1.063018673013022e-17 - -1.26451831871265e-19 - 1.2954000902764143e-21 - -1.1437790369170514e-23 - 8.543413293195206e-26 - -5.05879944592705e-28 - ] - const airyai_roots=[ - -2.338107410459767 - -4.08794944413097 - -5.520559828095551 - -6.786708090071759 - -7.944133587120853 - -9.02265085334098 - -10.040174341558085 - -11.008524303733262 - -11.936015563236262 - -12.828776752865757 - -13.691489035210719 - -14.527829951775335 - -15.340755135977997 - -16.132685156945772 - -16.90563399742994 - -17.66130010569706 - ] - const airyai_prime=[ - 0.7012108227206906 - -0.8031113696548534 - 0.865204025894141 - -0.9108507370495821 - 0.9473357094415571 - -0.9779228085695176 - 1.0043701226603126 - -1.0277386888207862 - 1.0487206485881893 - -1.0677938591574279 - 1.0853028313507 - -1.1015045702774968 - 1.1165961779326556 - -1.1307323104931881 - 1.1440366732735523 - -1.156609849116565 - ] +import QuadGK.quadgk +# The following arrays of constants have been precomputed to speed up computation. +# The first two correspond to the arrays a and b in the Groeneboom and Wellner article. +# The array airyai_roots contains roots of the airyai functions (atilde in the paper). +# Finally airyai_prime contains contains airyaiprime evaluated at the corresponding atildes +const a = [ + 0.14583333333333334 + -0.0014105902777777765 + -2.045269786155239e-5 + 1.7621771897199738e-6 + -5.7227597511834636e-8 + 1.2738111577329235e-9 + -2.1640824986157608e-11 + 2.87925759635786e-13 + -2.904496582565578e-15 + 1.8213982724416428e-17 + 4.318809086319947e-20 + -3.459634830348895e-21 + 6.541295360751684e-23 + -8.92275524974091e-25 + 1.0102911018761945e-26 + -9.976759882474874e-29 +] +const b = [ + 0.6666666666666666 + 0.021164021164021163 + -0.0007893341226674574 + 1.9520978781876193e-5 + -3.184888134149933e-7 + 2.4964953625552273e-9 + 3.6216439304006735e-11 + -1.874438299727797e-12 + 4.393984966701305e-14 + -7.57237085924526e-16 + 1.063018673013022e-17 + -1.26451831871265e-19 + 1.2954000902764143e-21 + -1.1437790369170514e-23 + 8.543413293195206e-26 + -5.05879944592705e-28 +] +const airyai_roots = [ + -2.338107410459767 + -4.08794944413097 + -5.520559828095551 + -6.786708090071759 + -7.944133587120853 + -9.02265085334098 + -10.040174341558085 + -11.008524303733262 + -11.936015563236262 + -12.828776752865757 + -13.691489035210719 + -14.527829951775335 + -15.340755135977997 + -16.132685156945772 + -16.90563399742994 + -17.66130010569706 +] +const airyai_prime = [ + 0.7012108227206906 + -0.8031113696548534 + 0.865204025894141 + -0.9108507370495821 + 0.9473357094415571 + -0.9779228085695176 + 1.0043701226603126 + -1.0277386888207862 + 1.0487206485881893 + -1.0677938591574279 + 1.0853028313507 + -1.1015045702774968 + 1.1165961779326556 + -1.1307323104931881 + 1.1440366732735523 + -1.156609849116565 +] - const cuberoottwo = cbrt(2.0) - const sqrthalfpi = sqrt(0.5*pi) - const sqrttwopi = sqrt(2.0*pi) +const cuberoottwo = cbrt(2.0) +const sqrthalfpi = sqrt(0.5 * pi) +const sqrttwopi = sqrt(2.0 * pi) - function p(y::Real) - if iszero(y) - return -sqrt(0.5*pi) - end +function p(y::Real) + if iszero(y) + return -sqrt(0.5 * pi) + end - (y > 0) || throw(DomainError(y, "argument must be positive")) + (y > 0) || throw(DomainError(y, "argument must be positive")) - cnsty = y^(-1.5) - if (y <= 1.0) - return sum([(b[k]*cnsty - a[k]*sqrthalfpi)*y^(3*k) for k=1:length(a)])-sqrthalfpi - else - return sum([exp(cuberoottwo*a*y) for a in airyai_roots]) * 2 * sqrttwopi * exp(-y*y*y/6) - cnsty - end + cnsty = y^(-1.5) + if (y <= 1.0) + return sum([(b[k] * cnsty - a[k] * sqrthalfpi) * y^(3 * k) for k = 1:length(a)]) - sqrthalfpi + else + return sum([exp(cuberoottwo * a * y) for a in airyai_roots]) * + 2 * + sqrttwopi * + exp(-y * y * y / 6) - cnsty end +end - function g(x::Real) - function g_one(y::Real) - return p(y) * exp(-0.5*y*(2*x+y)*(2*x+y)) - end - function g_two(y::Real) - z = 2*x+y*y - return (z*y*y + 0.5 * z*z) * exp(-0.5*y*y*z*z) - end - if (x <= -1.0) - return cuberoottwo*cuberoottwo * exp(2*x*x*x/3.0) * sum([exp(-cuberoottwo*airyai_roots[k]*x) / airyai_prime[k] for k=1:length(airyai_roots)]) - else - return 2*x - (quadgk(g_one, 0.0, Inf)[1] - 4*quadgk(g_two, 0.0, Inf)[1]) / sqrttwopi # should perhaps combine integrals - end +function g(x::Real) + function g_one(y::Real) + return p(y) * exp(-0.5 * y * (2 * x + y) * (2 * x + y)) + end + function g_two(y::Real) + z = 2 * x + y * y + return (z * y * y + 0.5 * z * z) * exp(-0.5 * y * y * z * z) end + if (x <= -1.0) + return cuberoottwo * + cuberoottwo * + exp(2 * x * x * x / 3.0) * + sum([ + exp(-cuberoottwo * airyai_roots[k] * x) / airyai_prime[k] for + k = 1:length(airyai_roots) + ]) + else + return 2 * x - + (quadgk(g_one, 0.0, Inf)[1] - 4 * quadgk(g_two, 0.0, Inf)[1]) / sqrttwopi # should perhaps combine integrals + end +end - _pdf(x::Real) = g(x)*g(-x)*0.5 - _cdf(x::Real) = (x < 0.0) ? _cdfbar(-x) : 0.5 + quadgk(_pdf,0.0,x)[1] - _cdfbar(x::Real) = (x < 0.0) ? _cdf(x) : quadgk(_pdf, x, Inf)[1] +_pdf(x::Real) = g(x) * g(-x) * 0.5 +_cdf(x::Real) = (x < 0.0) ? _cdfbar(-x) : 0.5 + quadgk(_pdf, 0.0, x)[1] +_cdfbar(x::Real) = (x < 0.0) ? _cdf(x) : quadgk(_pdf, x, Inf)[1] end pdf(d::Chernoff, x::Real) = ChernoffComputations._pdf(x) -logpdf(d::Chernoff, x::Real) = log(ChernoffComputations.g(x))+log(ChernoffComputations.g(-x))+log(0.5) +logpdf(d::Chernoff, x::Real) = + log(ChernoffComputations.g(x)) + log(ChernoffComputations.g(-x)) + log(0.5) cdf(d::Chernoff, x::Real) = ChernoffComputations._cdf(x) function quantile(d::Chernoff, tau::Real) # some commonly used quantiles were precomputed - precomputedquants=[ - 0.0 -Inf; - 0.01 -1.171534341573129; - 0.025 -0.9981810946684274; - 0.05 -0.8450811886357725; - 0.1 -0.6642351964332931; - 0.2 -0.43982766604886553; - 0.25 -0.353308035220429; - 0.3 -0.2751512847290148; - 0.4 -0.13319637678583637; - 0.5 0.0; - 0.6 0.13319637678583637; - 0.7 0.2751512847290147; - 0.75 0.353308035220429; - 0.8 0.4398276660488655; - 0.9 0.6642351964332931; - 0.95 0.8450811886357724; - 0.975 0.9981810946684272; - 0.99 1.17153434157313; + precomputedquants = [ + 0.0 -Inf + 0.01 -1.171534341573129 + 0.025 -0.9981810946684274 + 0.05 -0.8450811886357725 + 0.1 -0.6642351964332931 + 0.2 -0.43982766604886553 + 0.25 -0.353308035220429 + 0.3 -0.2751512847290148 + 0.4 -0.13319637678583637 + 0.5 0.0 + 0.6 0.13319637678583637 + 0.7 0.2751512847290147 + 0.75 0.353308035220429 + 0.8 0.4398276660488655 + 0.9 0.6642351964332931 + 0.95 0.8450811886357724 + 0.975 0.9981810946684272 + 0.99 1.17153434157313 1.0 Inf - ] + ] (0.0 <= tau && tau <= 1.0) || throw(DomainError(tau, "illegal value of tau")) present = searchsortedfirst(precomputedquants[:, 1], tau) if present <= size(precomputedquants, 1) @@ -190,13 +200,25 @@ function quantile(d::Chernoff, tau::Real) stdapprox = 0.52 dnorm = Normal(0.0, 1.0) if tau < 0.001 - return -newton(x -> tau - ChernoffComputations._cdfbar(x), ChernoffComputations._pdf, quantile(dnorm, 1.0 - tau)*stdapprox) + return -newton( + x -> tau - ChernoffComputations._cdfbar(x), + ChernoffComputations._pdf, + quantile(dnorm, 1.0 - tau) * stdapprox, + ) end if tau > 0.999 - return newton(x -> 1.0 - tau - ChernoffComputations._cdfbar(x), ChernoffComputations._pdf, quantile(dnorm, tau)*stdapprox) + return newton( + x -> 1.0 - tau - ChernoffComputations._cdfbar(x), + ChernoffComputations._pdf, + quantile(dnorm, tau) * stdapprox, + ) end - return newton(x -> ChernoffComputations._cdf(x) - tau, ChernoffComputations._pdf, quantile(dnorm, tau)*stdapprox) # should consider replacing x-> construct for speed + return newton( + x -> ChernoffComputations._cdf(x) - tau, + ChernoffComputations._pdf, + quantile(dnorm, tau) * stdapprox, + ) # should consider replacing x-> construct for speed end minimum(d::Chernoff) = -Inf @@ -250,8 +272,8 @@ function rand(rng::AbstractRNG, d::Chernoff) # Ziggurat random n 0.31692143952775814 0.2358977457249061 1.0218214689661219e-7 - ] - y=[ + ] + y = [ 0.02016386420423385 0.042162593823411566 0.06607475557706186 @@ -284,23 +306,23 @@ function rand(rng::AbstractRNG, d::Chernoff) # Ziggurat random n 1.2764995726449935 1.3789927085182485 1.516689116183566 - ] + ] n = length(x) i = rand(rng, 0:n-1) - r = (2.0*rand(rng)-1) * ((i>0) ? x[i] : A/y[1]) + r = (2.0 * rand(rng) - 1) * ((i > 0) ? x[i] : A / y[1]) rabs = abs(r) if rabs < x[i+1] return r end - s = (i>0) ? (y[i]+rand(rng)*(y[i+1]-y[i])) : rand(rng)*y[1] - if s < 2.0*ChernoffComputations._pdf(rabs) + s = (i > 0) ? (y[i] + rand(rng) * (y[i+1] - y[i])) : rand(rng) * y[1] + if s < 2.0 * ChernoffComputations._pdf(rabs) return r end if i > 0 return rand(rng, d) end - F0 = ChernoffComputations._cdf(A/y[1]) - tau = 2.0*rand(rng)-1 # ~ U(-1,1) + F0 = ChernoffComputations._cdf(A / y[1]) + tau = 2.0 * rand(rng) - 1 # ~ U(-1,1) tauabs = abs(tau) - return quantile(d, tauabs + (1-tauabs)*F0) * sign(tau) + return quantile(d, tauabs + (1 - tauabs) * F0) * sign(tau) end diff --git a/src/univariate/continuous/chi.jl b/src/univariate/continuous/chi.jl index 8fc7310038..4d6a8b82d4 100644 --- a/src/univariate/continuous/chi.jl +++ b/src/univariate/continuous/chi.jl @@ -26,12 +26,12 @@ struct Chi{T<:Real} <: ContinuousUnivariateDistribution Chi{T}(ν::T) where {T} = new{T}(ν) end -function Chi(ν::Real; check_args::Bool=true) +function Chi(ν::Real; check_args::Bool = true) @check_args Chi (ν, ν > zero(ν)) return Chi{typeof(ν)}(ν) end -Chi(ν::Integer; check_args::Bool=true) = Chi(float(ν); check_args=check_args) +Chi(ν::Integer; check_args::Bool = true) = Chi(float(ν); check_args = check_args) @distr_support Chi 0.0 Inf @@ -49,10 +49,12 @@ params(d::Chi) = (d.ν,) #### Statistics -mean(d::Chi) = (h = d.ν/2; sqrt2 * exp(loggamma(h + 1//2) - loggamma(h))) +mean(d::Chi) = (h = d.ν / 2; sqrt2 * exp(loggamma(h + 1 // 2) - loggamma(h))) var(d::Chi) = d.ν - mean(d)^2 -_chi_skewness(μ::Real, σ::Real) = (σ2 = σ^2; σ3 = σ2 * σ; (μ / σ3) * (1 - 2σ2)) +_chi_skewness(μ::Real, σ::Real) = (σ2 = σ^2; +σ3 = σ2 * σ; +(μ / σ3) * (1 - 2σ2)) function skewness(d::Chi) μ = mean(d) @@ -64,18 +66,15 @@ function kurtosis(d::Chi) μ = mean(d) σ = sqrt(d.ν - μ^2) γ = _chi_skewness(μ, σ) - (2/σ^2) * (1 - μ * σ * γ - σ^2) + (2 / σ^2) * (1 - μ * σ * γ - σ^2) end entropy(d::Chi{T}) where {T<:Real} = (ν = d.ν; - loggamma(ν/2) - T(logtwo)/2 - ((ν - 1)/2) * digamma(ν/2) + ν/2) +loggamma(ν / 2) - T(logtwo) / 2 - ((ν - 1) / 2) * digamma(ν / 2) + ν / 2) -function mode(d::Chi; check_args::Bool=true) +function mode(d::Chi; check_args::Bool = true) ν = d.ν - @check_args( - Chi, - (ν, ν >= 1, "Chi distribution has no mode when ν < 1"), - ) + @check_args(Chi, (ν, ν >= 1, "Chi distribution has no mode when ν < 1"),) sqrt(ν - 1) end @@ -99,20 +98,19 @@ end gradlogpdf(d::Chi{T}, x::Real) where {T<:Real} = x >= 0 ? (d.ν - 1) / x - x : zero(T) for f in (:cdf, :ccdf, :logcdf, :logccdf) - @eval $f(d::Chi, x::Real) = $f(Chisq(d.ν; check_args=false), max(x, 0)^2) + @eval $f(d::Chi, x::Real) = $f(Chisq(d.ν; check_args = false), max(x, 0)^2) end for f in (:quantile, :cquantile, :invlogcdf, :invlogccdf) - @eval $f(d::Chi, p::Real) = sqrt($f(Chisq(d.ν; check_args=false), p)) + @eval $f(d::Chi, p::Real) = sqrt($f(Chisq(d.ν; check_args = false), p)) end #### Sampling -rand(rng::AbstractRNG, d::Chi) = - (ν = d.ν; sqrt(rand(rng, Gamma(ν / 2.0, 2.0one(ν))))) +rand(rng::AbstractRNG, d::Chi) = (ν = d.ν; +sqrt(rand(rng, Gamma(ν / 2.0, 2.0one(ν))))) -struct ChiSampler{S <: Sampleable{Univariate,Continuous}} <: - Sampleable{Univariate,Continuous} +struct ChiSampler{S<:Sampleable{Univariate,Continuous}} <: Sampleable{Univariate,Continuous} s::S end diff --git a/src/univariate/continuous/chisq.jl b/src/univariate/continuous/chisq.jl index 8604742ce5..1e25cc1d70 100644 --- a/src/univariate/continuous/chisq.jl +++ b/src/univariate/continuous/chisq.jl @@ -25,12 +25,12 @@ struct Chisq{T<:Real} <: ContinuousUnivariateDistribution Chisq{T}(ν::T) where {T} = new{T}(ν) end -function Chisq(ν::Real; check_args::Bool=true) +function Chisq(ν::Real; check_args::Bool = true) @check_args Chisq (ν, ν > zero(ν)) return Chisq{typeof(ν)}(ν) end -Chisq(ν::Integer; check_args::Bool=true) = Chisq(float(ν); check_args=check_args) +Chisq(ν::Integer; check_args::Bool = true) = Chisq(float(ν); check_args = check_args) @distr_support Chisq 0.0 Inf @@ -57,16 +57,16 @@ kurtosis(d::Chisq) = 12 / d.ν mode(d::Chisq{T}) where {T<:Real} = d.ν > 2 ? d.ν - 2 : zero(T) -function median(d::Chisq; approx::Bool=false) +function median(d::Chisq; approx::Bool = false) if approx return d.ν * (1 - 2 / (9 * d.ν))^3 else - return quantile(d, 1//2) + return quantile(d, 1 // 2) end end function entropy(d::Chisq) - hν = d.ν/2 + hν = d.ν / 2 hν + logtwo + loggamma(hν) + (1 - hν) * digamma(hν) end @@ -82,16 +82,17 @@ end @_delegate_statsfuns Chisq chisq ν -mgf(d::Chisq, t::Real) = (1 - 2 * t)^(-d.ν/2) +mgf(d::Chisq, t::Real) = (1 - 2 * t)^(-d.ν / 2) function cgf(d::Chisq, t) ν = dof(d) - return -ν/2 * log1p(-2*t) + return -ν / 2 * log1p(-2 * t) end -cf(d::Chisq, t::Real) = (1 - 2 * im * t)^(-d.ν/2) +cf(d::Chisq, t::Real) = (1 - 2 * im * t)^(-d.ν / 2) -gradlogpdf(d::Chisq{T}, x::Real) where {T<:Real} = x > 0 ? (d.ν/2 - 1) / x - 1//2 : zero(T) +gradlogpdf(d::Chisq{T}, x::Real) where {T<:Real} = + x > 0 ? (d.ν / 2 - 1) / x - 1 // 2 : zero(T) #### Sampling diff --git a/src/univariate/continuous/cosine.jl b/src/univariate/continuous/cosine.jl index 4d11a9ddde..2e4f89d633 100644 --- a/src/univariate/continuous/cosine.jl +++ b/src/univariate/continuous/cosine.jl @@ -13,19 +13,21 @@ struct Cosine{T<:Real} <: ContinuousUnivariateDistribution Cosine{T}(μ::T, σ::T) where {T} = new{T}(µ, σ) end -function Cosine(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function Cosine(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Cosine (σ, σ > zero(σ)) return Cosine{T}(μ, σ) end -Cosine(μ::Real, σ::Real; check_args::Bool=true) = Cosine(promote(μ, σ)...; check_args=check_args) -Cosine(μ::Integer, σ::Integer; check_args::Bool=true) = Cosine(float(μ), float(σ); check_args=check_args) -Cosine(μ::Real=0.0) = Cosine(μ, one(µ); check_args=false) +Cosine(μ::Real, σ::Real; check_args::Bool = true) = + Cosine(promote(μ, σ)...; check_args = check_args) +Cosine(μ::Integer, σ::Integer; check_args::Bool = true) = + Cosine(float(μ), float(σ); check_args = check_args) +Cosine(μ::Real = 0.0) = Cosine(μ, one(µ); check_args = false) @distr_support Cosine d.μ - d.σ d.μ + d.σ #### Conversions -function convert(::Type{Cosine{T}}, μ::Real, σ::Real) where T<:Real +function convert(::Type{Cosine{T}}, μ::Real, σ::Real) where {T<:Real} Cosine(T(μ), T(σ)) end function Base.convert(::Type{Cosine{T}}, d::Cosine) where {T<:Real} @@ -50,16 +52,16 @@ median(d::Cosine) = d.μ mode(d::Cosine) = d.μ -var(d::Cosine{T}) where {T<:Real} = d.σ^2 * (1//3 - 2/T(π)^2) +var(d::Cosine{T}) where {T<:Real} = d.σ^2 * (1 // 3 - 2 / T(π)^2) skewness(d::Cosine{T}) where {T<:Real} = zero(T) -kurtosis(d::Cosine{T}) where {T<:Real} = 6*(90-T(π)^4) / (5*(T(π)^2-6)^2) +kurtosis(d::Cosine{T}) where {T<:Real} = 6 * (90 - T(π)^4) / (5 * (T(π)^2 - 6)^2) #### Evaluation -function pdf(d::Cosine{T}, x::Real) where T<:Real +function pdf(d::Cosine{T}, x::Real) where {T<:Real} if insupport(d, x) z = (x - d.μ) / d.σ return (1 + cospi(z)) / (2d.σ) @@ -70,7 +72,7 @@ end logpdf(d::Cosine, x::Real) = log(pdf(d, x)) -function cdf(d::Cosine{T}, x::Real) where T<:Real +function cdf(d::Cosine{T}, x::Real) where {T<:Real} if x < d.μ - d.σ return zero(T) end @@ -81,7 +83,7 @@ function cdf(d::Cosine{T}, x::Real) where T<:Real (1 + z + sinpi(z) * invπ) / 2 end -function ccdf(d::Cosine{T}, x::Real) where T<:Real +function ccdf(d::Cosine{T}, x::Real) where {T<:Real} if x < d.μ - d.σ return one(T) end diff --git a/src/univariate/continuous/epanechnikov.jl b/src/univariate/continuous/epanechnikov.jl index ea75c09ea5..54a546b224 100644 --- a/src/univariate/continuous/epanechnikov.jl +++ b/src/univariate/continuous/epanechnikov.jl @@ -7,20 +7,22 @@ struct Epanechnikov{T<:Real} <: ContinuousUnivariateDistribution Epanechnikov{T}(µ::T, σ::T) where {T} = new{T}(µ, σ) end -function Epanechnikov(μ::T, σ::T; check_args::Bool=true) where {T<:Real} +function Epanechnikov(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Epanechnikov (σ, σ > zero(σ)) return Epanechnikov{T}(μ, σ) end -Epanechnikov(μ::Real, σ::Real; check_args::Bool=true) = Epanechnikov(promote(μ, σ)...; check_args=check_args) -Epanechnikov(μ::Integer, σ::Integer; check_args::Bool=true) = Epanechnikov(float(μ), float(σ); check_args=check_args) -Epanechnikov(μ::Real=0.0) = Epanechnikov(μ, one(μ); check_args=false) +Epanechnikov(μ::Real, σ::Real; check_args::Bool = true) = + Epanechnikov(promote(μ, σ)...; check_args = check_args) +Epanechnikov(μ::Integer, σ::Integer; check_args::Bool = true) = + Epanechnikov(float(μ), float(σ); check_args = check_args) +Epanechnikov(μ::Real = 0.0) = Epanechnikov(μ, one(μ); check_args = false) @distr_support Epanechnikov d.μ - d.σ d.μ + d.σ #### Conversions -function convert(::Type{Epanechnikov{T}}, μ::Real, σ::Real) where T<:Real - Epanechnikov(T(μ), T(σ), check_args=false) +function convert(::Type{Epanechnikov{T}}, μ::Real, σ::Real) where {T<:Real} + Epanechnikov(T(μ), T(σ), check_args = false) end function Base.convert(::Type{Epanechnikov{T}}, d::Epanechnikov) where {T<:Real} Epanechnikov{T}(T(d.μ), T(d.σ)) @@ -41,39 +43,33 @@ mode(d::Epanechnikov) = d.μ var(d::Epanechnikov) = d.σ^2 / 5 skewness(d::Epanechnikov{T}) where {T<:Real} = zero(T) -kurtosis(d::Epanechnikov{T}) where {T<:Real} = -2.914285714285714*one(T) # 3/35-3 +kurtosis(d::Epanechnikov{T}) where {T<:Real} = -2.914285714285714 * one(T) # 3/35-3 ## Functions -function pdf(d::Epanechnikov{T}, x::Real) where T<:Real +function pdf(d::Epanechnikov{T}, x::Real) where {T<:Real} u = abs(x - d.μ) / d.σ u >= 1 ? zero(T) : 3 * (1 - u^2) / (4 * d.σ) end logpdf(d::Epanechnikov, x::Real) = log(pdf(d, x)) -function cdf(d::Epanechnikov{T}, x::Real) where T<:Real +function cdf(d::Epanechnikov{T}, x::Real) where {T<:Real} u = (x - d.μ) / d.σ - u <= -1 ? zero(T) : - u >= 1 ? one(T) : - 1//2 + u * (3//4 - u^2/4) + u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) end -function ccdf(d::Epanechnikov{T}, x::Real) where T<:Real +function ccdf(d::Epanechnikov{T}, x::Real) where {T<:Real} u = (d.μ - x) / d.σ - u <= -1 ? zero(T) : - u >= 1 ? one(T) : - 1//2 + u * (3//4 - u^2/4) + u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) end @quantile_newton Epanechnikov -function mgf(d::Epanechnikov{T}, t::Real) where T<:Real +function mgf(d::Epanechnikov{T}, t::Real) where {T<:Real} a = d.σ * t - a == 0 ? one(T) : - 3exp(d.μ * t) * (cosh(a) - sinh(a) / a) / a^2 + a == 0 ? one(T) : 3exp(d.μ * t) * (cosh(a) - sinh(a) / a) / a^2 end -function cf(d::Epanechnikov{T}, t::Real) where T<:Real +function cf(d::Epanechnikov{T}, t::Real) where {T<:Real} a = d.σ * t - a == 0 ? one(T)+zero(T)*im : - -3exp(im * d.μ * t) * (cos(a) - sin(a) / a) / a^2 + a == 0 ? one(T) + zero(T) * im : -3exp(im * d.μ * t) * (cos(a) - sin(a) / a) / a^2 end diff --git a/src/univariate/continuous/erlang.jl b/src/univariate/continuous/erlang.jl index cd984415e0..76cfaea443 100644 --- a/src/univariate/continuous/erlang.jl +++ b/src/univariate/continuous/erlang.jl @@ -20,27 +20,27 @@ struct Erlang{T<:Real} <: ContinuousUnivariateDistribution Erlang{T}(α::Int, θ::T) where {T} = new{T}(α, θ) end -function Erlang(α::Real, θ::Real; check_args::Bool=true) +function Erlang(α::Real, θ::Real; check_args::Bool = true) @check_args Erlang (α, isinteger(α)) (α, α >= zero(α)) return Erlang{typeof(θ)}(α, θ) end -function Erlang(α::Integer, θ::Real; check_args::Bool=true) +function Erlang(α::Integer, θ::Real; check_args::Bool = true) @check_args Erlang (α, α >= zero(α)) return Erlang{typeof(θ)}(α, θ) end -function Erlang(α::Integer, θ::Integer; check_args::Bool=true) - return Erlang(α, float(θ); check_args=check_args) +function Erlang(α::Integer, θ::Integer; check_args::Bool = true) + return Erlang(α, float(θ); check_args = check_args) end -Erlang(α::Integer=1) = Erlang(α, 1.0; check_args=false) +Erlang(α::Integer = 1) = Erlang(α, 1.0; check_args = false) @distr_support Erlang 0.0 Inf #### Conversions -function convert(::Type{Erlang{T}}, α::Integer, θ::S) where {T <: Real, S <: Real} - Erlang(α, T(θ), check_args=false) +function convert(::Type{Erlang{T}}, α::Integer, θ::S) where {T<:Real,S<:Real} + Erlang(α, T(θ), check_args = false) end function Base.convert(::Type{Erlang{T}}, d::Erlang) where {T<:Real} Erlang{T}(d.α, T(d.θ)) @@ -62,12 +62,9 @@ var(d::Erlang) = d.α * d.θ^2 skewness(d::Erlang) = 2 / sqrt(d.α) kurtosis(d::Erlang) = 6 / d.α -function mode(d::Erlang; check_args::Bool=true) +function mode(d::Erlang; check_args::Bool = true) α, θ = params(d) - @check_args( - Erlang, - (α, α >= 1, "Erlang has no mode when α < 1"), - ) + @check_args(Erlang, (α, α >= 1, "Erlang has no mode when α < 1"),) θ * (α - 1) end @@ -79,9 +76,9 @@ end mgf(d::Erlang, t::Real) = (1 - t * d.θ)^(-d.α) function cgf(d::Erlang, t) α, θ = params(d) - -α * log1p(-t*θ) + -α * log1p(-t * θ) end -cf(d::Erlang, t::Real) = (1 - im * t * d.θ)^(-d.α) +cf(d::Erlang, t::Real) = (1 - im * t * d.θ)^(-d.α) #### Evaluation & Sampling diff --git a/src/univariate/continuous/exponential.jl b/src/univariate/continuous/exponential.jl index 96737c94a2..d73f9644b6 100644 --- a/src/univariate/continuous/exponential.jl +++ b/src/univariate/continuous/exponential.jl @@ -26,18 +26,19 @@ struct Exponential{T<:Real} <: ContinuousUnivariateDistribution Exponential{T}(θ::T) where {T} = new{T}(θ) end -function Exponential(θ::Real; check_args::Bool=true) +function Exponential(θ::Real; check_args::Bool = true) @check_args Exponential (θ, θ > zero(θ)) return Exponential{typeof(θ)}(θ) end -Exponential(θ::Integer; check_args::Bool=true) = Exponential(float(θ); check_args=check_args) +Exponential(θ::Integer; check_args::Bool = true) = + Exponential(float(θ); check_args = check_args) Exponential() = Exponential{Float64}(1.0) @distr_support Exponential 0.0 Inf ### Conversions -convert(::Type{Exponential{T}}, θ::S) where {T <: Real, S <: Real} = Exponential(T(θ)) +convert(::Type{Exponential{T}}, θ::S) where {T<:Real,S<:Real} = Exponential(T(θ)) function Base.convert(::Type{Exponential{T}}, d::Exponential) where {T<:Real} return Exponential(T(d.θ)) end @@ -96,12 +97,12 @@ invlogccdf(d::Exponential, lp::Real) = -xval(d, lp) gradlogpdf(d::Exponential{T}, x::Real) where {T<:Real} = x > 0 ? -rate(d) : zero(T) -mgf(d::Exponential, t::Real) = 1/(1 - t * scale(d)) +mgf(d::Exponential, t::Real) = 1 / (1 - t * scale(d)) function cgf(d::Exponential, t) μ = mean(d) - return - log1p(- t * μ) + return -log1p(-t * μ) end -cf(d::Exponential, t::Real) = 1/(1 - t * im * scale(d)) +cf(d::Exponential, t::Real) = 1 / (1 - t * im * scale(d)) #### Sampling @@ -122,7 +123,12 @@ struct ExponentialStats <: SufficientStats ExponentialStats(sx::Real, sw::Real) = new(sx, sw) end -suffstats(::Type{<:Exponential}, x::AbstractArray{T}) where {T<:Real} = ExponentialStats(sum(x), length(x)) -suffstats(::Type{<:Exponential}, x::AbstractArray{T}, w::AbstractArray{Float64}) where {T<:Real} = ExponentialStats(dot(x, w), sum(w)) +suffstats(::Type{<:Exponential}, x::AbstractArray{T}) where {T<:Real} = + ExponentialStats(sum(x), length(x)) +suffstats( + ::Type{<:Exponential}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Real} = ExponentialStats(dot(x, w), sum(w)) fit_mle(::Type{<:Exponential}, ss::ExponentialStats) = Exponential(ss.sx / ss.sw) diff --git a/src/univariate/continuous/fdist.jl b/src/univariate/continuous/fdist.jl index e3f2ba104f..576259fb59 100644 --- a/src/univariate/continuous/fdist.jl +++ b/src/univariate/continuous/fdist.jl @@ -26,20 +26,23 @@ struct FDist{T<:Real} <: ContinuousUnivariateDistribution ν1::T ν2::T - function FDist{T}(ν1::T, ν2::T; check_args::Bool=true) where T + function FDist{T}(ν1::T, ν2::T; check_args::Bool = true) where {T} @check_args FDist (ν1, ν1 > zero(ν1)) (ν2, ν2 > zero(ν2)) new{T}(ν1, ν2) end end -FDist(ν1::T, ν2::T; check_args::Bool=true) where {T<:Real} = FDist{T}(ν1, ν2; check_args=check_args) -FDist(ν1::Integer, ν2::Integer; check_args::Bool=true) = FDist(float(ν1), float(ν2); check_args=check_args) -FDist(ν1::Real, ν2::Real; check_args::Bool=true) = FDist(promote(ν1, ν2)...; check_args=check_args) +FDist(ν1::T, ν2::T; check_args::Bool = true) where {T<:Real} = + FDist{T}(ν1, ν2; check_args = check_args) +FDist(ν1::Integer, ν2::Integer; check_args::Bool = true) = + FDist(float(ν1), float(ν2); check_args = check_args) +FDist(ν1::Real, ν2::Real; check_args::Bool = true) = + FDist(promote(ν1, ν2)...; check_args = check_args) @distr_support FDist 0.0 Inf #### Conversions -function convert(::Type{FDist{T}}, ν1::S, ν2::S) where {T <: Real, S <: Real} +function convert(::Type{FDist{T}}, ν1::S, ν2::S) where {T<:Real,S<:Real} FDist(T(ν1), T(ν2)) end Base.convert(::Type{FDist{T}}, d::FDist) where {T<:Real} = FDist{T}(T(d.ν1), T(d.ν2)) @@ -53,19 +56,20 @@ params(d::FDist) = (d.ν1, d.ν2) #### Statistics -mean(d::FDist{T}) where {T<:Real} = (ν2 = d.ν2; ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) +mean(d::FDist{T}) where {T<:Real} = (ν2 = d.ν2; +ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) -function mode(d::FDist{T}) where T<:Real +function mode(d::FDist{T}) where {T<:Real} (ν1, ν2) = params(d) - ν1 > 2 ? ((ν1 - 2)/ν1) * (ν2 / (ν2 + 2)) : zero(T) + ν1 > 2 ? ((ν1 - 2) / ν1) * (ν2 / (ν2 + 2)) : zero(T) end -function var(d::FDist{T}) where T<:Real +function var(d::FDist{T}) where {T<:Real} (ν1, ν2) = params(d) ν2 > 4 ? 2ν2^2 * (ν1 + ν2 - 2) / (ν1 * (ν2 - 2)^2 * (ν2 - 4)) : T(NaN) end -function skewness(d::FDist{T}) where T<:Real +function skewness(d::FDist{T}) where {T<:Real} (ν1, ν2) = params(d) if ν2 > 6 return (2ν1 + ν2 - 2) * sqrt(8(ν2 - 4)) / ((ν2 - 6) * sqrt(ν1 * (ν1 + ν2 - 2))) @@ -74,7 +78,7 @@ function skewness(d::FDist{T}) where T<:Real end end -function kurtosis(d::FDist{T}) where T<:Real +function kurtosis(d::FDist{T}) where {T<:Real} (ν1, ν2) = params(d) if ν2 > 8 a = ν1 * (5ν2 - 22) * (ν1 + ν2 - 2) + (ν2 - 4) * (ν2 - 2)^2 @@ -87,18 +91,18 @@ end function entropy(d::FDist) (ν1, ν2) = params(d) - hν1 = ν1/2 - hν2 = ν2/2 - hs = (ν1 + ν2)/2 + hν1 = ν1 / 2 + hν2 = ν2 / 2 + hs = (ν1 + ν2) / 2 return log(ν2 / ν1) + loggamma(hν1) + loggamma(hν2) - loggamma(hs) + - (1 - hν1) * digamma(hν1) + (-1 - hν2) * digamma(hν2) + - hs * digamma(hs) + (1 - hν1) * digamma(hν1) + + (-1 - hν2) * digamma(hν2) + + hs * digamma(hs) end #### Evaluation & Sampling @_delegate_statsfuns FDist fdist ν1 ν2 -rand(rng::AbstractRNG, d::FDist) = - ((ν1, ν2) = params(d); - (ν2 * rand(rng, Chisq(ν1))) / (ν1 * rand(rng, Chisq(ν2)))) +rand(rng::AbstractRNG, d::FDist) = ((ν1, ν2) = params(d); +(ν2 * rand(rng, Chisq(ν1))) / (ν1 * rand(rng, Chisq(ν2)))) diff --git a/src/univariate/continuous/frechet.jl b/src/univariate/continuous/frechet.jl index 46b6d40f9a..0046d3afd6 100644 --- a/src/univariate/continuous/frechet.jl +++ b/src/univariate/continuous/frechet.jl @@ -29,19 +29,21 @@ struct Frechet{T<:Real} <: ContinuousUnivariateDistribution Frechet{T}(α::T, θ::T) where {T<:Real} = new{T}(α, θ) end -function Frechet(α::T, θ::T; check_args::Bool=true) where {T <: Real} +function Frechet(α::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Frechet (α, α > zero(α)) (θ, θ > zero(θ)) return Frechet{T}(α, θ) end -Frechet(α::Real, θ::Real; check_args::Bool=true) = Frechet(promote(α, θ)...; check_args=check_args) -Frechet(α::Integer, θ::Integer; check_args::Bool=true) = Frechet(float(α), float(θ); check_args=check_args) -Frechet(α::Real=1.0) = Frechet(α, one(α); check_args=false) +Frechet(α::Real, θ::Real; check_args::Bool = true) = + Frechet(promote(α, θ)...; check_args = check_args) +Frechet(α::Integer, θ::Integer; check_args::Bool = true) = + Frechet(float(α), float(θ); check_args = check_args) +Frechet(α::Real = 1.0) = Frechet(α, one(α); check_args = false) @distr_support Frechet 0.0 Inf #### Conversions -function convert(::Type{Frechet{T}}, α::S, θ::S) where {T <: Real, S <: Real} +function convert(::Type{Frechet{T}}, α::S, θ::S) where {T<:Real,S<:Real} Frechet(T(α), T(θ)) end Base.convert(::Type{Frechet{T}}, d::Frechet) where {T<:Real} = Frechet{T}(T(d.α), T(d.θ)) @@ -64,7 +66,8 @@ end median(d::Frechet) = d.θ * logtwo^(-1 / d.α) -mode(d::Frechet) = (iα = -1/d.α; d.θ * (1 - iα) ^ iα) +mode(d::Frechet) = (iα = -1 / d.α; +d.θ * (1 - iα)^iα) function var(d::Frechet{T}) where {T<:Real} if d.α > 2 @@ -75,7 +78,7 @@ function var(d::Frechet{T}) where {T<:Real} end end -function skewness(d::Frechet{T}) where T<:Real +function skewness(d::Frechet{T}) where {T<:Real} if d.α > 3 iα = 1 / d.α g1 = gamma(1 - iα) @@ -87,7 +90,7 @@ function skewness(d::Frechet{T}) where T<:Real end end -function kurtosis(d::Frechet{T}) where T<:Real +function kurtosis(d::Frechet{T}) where {T<:Real} if d.α > 3 iα = 1 / d.α g1 = gamma(1 - iα) @@ -107,7 +110,7 @@ end #### Evaluation -function logpdf(d::Frechet{T}, x::Real) where T<:Real +function logpdf(d::Frechet{T}, x::Real) where {T<:Real} (α, θ) = params(d) if x > 0 z = θ / x @@ -118,23 +121,23 @@ function logpdf(d::Frechet{T}, x::Real) where T<:Real end zval(d::Frechet, x::Real) = (d.θ / max(x, 0))^d.α -xval(d::Frechet, z::Real) = d.θ * z^(- 1 / d.α) +xval(d::Frechet, z::Real) = d.θ * z^(-1 / d.α) -cdf(d::Frechet, x::Real) = exp(- zval(d, x)) -ccdf(d::Frechet, x::Real) = -expm1(- zval(d, x)) -logcdf(d::Frechet, x::Real) = - zval(d, x) -logccdf(d::Frechet, x::Real) = log1mexp(- zval(d, x)) +cdf(d::Frechet, x::Real) = exp(-zval(d, x)) +ccdf(d::Frechet, x::Real) = -expm1(-zval(d, x)) +logcdf(d::Frechet, x::Real) = -zval(d, x) +logccdf(d::Frechet, x::Real) = log1mexp(-zval(d, x)) quantile(d::Frechet, p::Real) = xval(d, -log(p)) cquantile(d::Frechet, p::Real) = xval(d, -log1p(-p)) invlogcdf(d::Frechet, lp::Real) = xval(d, -lp) invlogccdf(d::Frechet, lp::Real) = xval(d, -log1mexp(lp)) -function gradlogpdf(d::Frechet{T}, x::Real) where T<:Real +function gradlogpdf(d::Frechet{T}, x::Real) where {T<:Real} (α, θ) = params(d) - insupport(Frechet, x) ? -(α + 1) / x + α * (θ^α) * x^(-α-1) : zero(T) + insupport(Frechet, x) ? -(α + 1) / x + α * (θ^α) * x^(-α - 1) : zero(T) end ## Sampling -rand(rng::AbstractRNG, d::Frechet) = d.θ * randexp(rng) ^ (-1 / d.α) +rand(rng::AbstractRNG, d::Frechet) = d.θ * randexp(rng)^(-1 / d.α) diff --git a/src/univariate/continuous/gamma.jl b/src/univariate/continuous/gamma.jl index 866255fb7d..52ae64de70 100644 --- a/src/univariate/continuous/gamma.jl +++ b/src/univariate/continuous/gamma.jl @@ -30,20 +30,22 @@ struct Gamma{T<:Real} <: ContinuousUnivariateDistribution Gamma{T}(α, θ) where {T} = new{T}(α, θ) end -function Gamma(α::T, θ::T; check_args::Bool=true) where {T <: Real} +function Gamma(α::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Gamma (α, α > zero(α)) (θ, θ > zero(θ)) return Gamma{T}(α, θ) end -Gamma(α::Real, θ::Real; check_args::Bool=true) = Gamma(promote(α, θ)...; check_args=check_args) -Gamma(α::Integer, θ::Integer; check_args::Bool=true) = Gamma(float(α), float(θ); check_args=check_args) -Gamma(α::Real; check_args::Bool=true) = Gamma(α, one(α); check_args=check_args) +Gamma(α::Real, θ::Real; check_args::Bool = true) = + Gamma(promote(α, θ)...; check_args = check_args) +Gamma(α::Integer, θ::Integer; check_args::Bool = true) = + Gamma(float(α), float(θ); check_args = check_args) +Gamma(α::Real; check_args::Bool = true) = Gamma(α, one(α); check_args = check_args) Gamma() = Gamma{Float64}(1.0, 1.0) @distr_support Gamma 0.0 Inf #### Conversions -convert(::Type{Gamma{T}}, α::S, θ::S) where {T <: Real, S <: Real} = Gamma(T(α), T(θ)) +convert(::Type{Gamma{T}}, α::S, θ::S) where {T<:Real,S<:Real} = Gamma(T(α), T(θ)) Base.convert(::Type{Gamma{T}}, d::Gamma) where {T<:Real} = Gamma{T}(T(d.α), T(d.θ)) Base.convert(::Type{Gamma{T}}, d::Gamma{T}) where {T<:Real} = d @@ -89,8 +91,8 @@ function kldivergence(p::Gamma, q::Gamma) αp, θp = params(p) αq, θq = params(q) θp_over_θq = θp / θq - return (αp - αq) * digamma(αp) - loggamma(αp) + loggamma(αq) - - αq * log(θp_over_θq) + αp * (θp_over_θq - 1) + return (αp - αq) * digamma(αp) - loggamma(αp) + loggamma(αq) - αq * log(θp_over_θq) + + αp * (θp_over_θq - 1) end #### Evaluation & Sampling @@ -105,8 +107,7 @@ function rand(rng::AbstractRNG, d::Gamma) # TODO: shape(d) = 0.5 : use scaled chisq return rand(rng, GammaIPSampler(d)) elseif shape(d) == 1.0 - θ = - return rand(rng, Exponential{partype(d)}(scale(d))) + θ = return rand(rng, Exponential{partype(d)}(scale(d))) else return rand(rng, GammaMTSampler(d)) end @@ -133,17 +134,21 @@ struct GammaStats <: SufficientStats GammaStats(sx::Real, slogx::Real, tw::Real) = new(sx, slogx, tw) end -function suffstats(::Type{<:Gamma}, x::AbstractArray{T}) where T<:Real +function suffstats(::Type{<:Gamma}, x::AbstractArray{T}) where {T<:Real} sx = zero(T) slogx = zero(T) - for xi = x + for xi in x sx += xi slogx += log(xi) end GammaStats(sx, slogx, length(x)) end -function suffstats(::Type{<:Gamma}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Real +function suffstats( + ::Type{<:Gamma}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Real} n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) @@ -168,14 +173,19 @@ function gamma_mle_update(logmx::Float64, mlogx::Float64, a::Float64) 1 / z end -function fit_mle(::Type{<:Gamma}, ss::GammaStats; - alpha0::Float64=NaN, maxiter::Int=1000, tol::Float64=1e-16) +function fit_mle( + ::Type{<:Gamma}, + ss::GammaStats; + alpha0::Float64 = NaN, + maxiter::Int = 1000, + tol::Float64 = 1e-16, +) mx = ss.sx / ss.tw logmx = log(mx) mlogx = ss.slogx / ss.tw - a::Float64 = isnan(alpha0) ? (logmx - mlogx)/2 : alpha0 + a::Float64 = isnan(alpha0) ? (logmx - mlogx) / 2 : alpha0 converged = false t = 0 diff --git a/src/univariate/continuous/generalizedextremevalue.jl b/src/univariate/continuous/generalizedextremevalue.jl index f8d0541b33..65ed9438ec 100644 --- a/src/univariate/continuous/generalizedextremevalue.jl +++ b/src/univariate/continuous/generalizedextremevalue.jl @@ -39,31 +39,42 @@ struct GeneralizedExtremeValue{T<:Real} <: ContinuousUnivariateDistribution σ::T ξ::T - function GeneralizedExtremeValue{T}(μ::T, σ::T, ξ::T) where T + function GeneralizedExtremeValue{T}(μ::T, σ::T, ξ::T) where {T} σ > zero(σ) || error("Scale must be positive") new{T}(μ, σ, ξ) end end -GeneralizedExtremeValue(μ::T, σ::T, ξ::T) where {T<:Real} = GeneralizedExtremeValue{T}(μ, σ, ξ) -GeneralizedExtremeValue(μ::Real, σ::Real, ξ::Real) = GeneralizedExtremeValue(promote(μ, σ, ξ)...) +GeneralizedExtremeValue(μ::T, σ::T, ξ::T) where {T<:Real} = + GeneralizedExtremeValue{T}(μ, σ, ξ) +GeneralizedExtremeValue(μ::Real, σ::Real, ξ::Real) = + GeneralizedExtremeValue(promote(μ, σ, ξ)...) function GeneralizedExtremeValue(μ::Integer, σ::Integer, ξ::Integer) return GeneralizedExtremeValue(float(μ), float(σ), float(ξ)) end #### Conversions -function convert(::Type{GeneralizedExtremeValue{T}}, μ::Real, σ::Real, ξ::Real) where T<:Real +function convert( + ::Type{GeneralizedExtremeValue{T}}, + μ::Real, + σ::Real, + ξ::Real, +) where {T<:Real} GeneralizedExtremeValue(T(μ), T(σ), T(ξ)) end -function Base.convert(::Type{GeneralizedExtremeValue{T}}, d::GeneralizedExtremeValue) where {T<:Real} +function Base.convert( + ::Type{GeneralizedExtremeValue{T}}, + d::GeneralizedExtremeValue, +) where {T<:Real} GeneralizedExtremeValue{T}(T(d.μ), T(d.σ), T(d.ξ)) end -Base.convert(::Type{GeneralizedExtremeValue{T}}, d::GeneralizedExtremeValue{T}) where {T<:Real} = d +Base.convert( + ::Type{GeneralizedExtremeValue{T}}, + d::GeneralizedExtremeValue{T}, +) where {T<:Real} = d -minimum(d::GeneralizedExtremeValue{T}) where {T<:Real} = - d.ξ > 0 ? d.μ - d.σ / d.ξ : -T(Inf) -maximum(d::GeneralizedExtremeValue{T}) where {T<:Real} = - d.ξ < 0 ? d.μ - d.σ / d.ξ : T(Inf) +minimum(d::GeneralizedExtremeValue{T}) where {T<:Real} = d.ξ > 0 ? d.μ - d.σ / d.ξ : -T(Inf) +maximum(d::GeneralizedExtremeValue{T}) where {T<:Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : T(Inf) #### Parameters @@ -86,11 +97,11 @@ function median(d::GeneralizedExtremeValue) if abs(ξ) < eps() # ξ == 0 return μ - σ * log(log(2)) else - return μ + σ * (log(2) ^ (- ξ) - 1) / ξ + return μ + σ * (log(2)^(-ξ) - 1) / ξ end end -function mean(d::GeneralizedExtremeValue{T}) where T<:Real +function mean(d::GeneralizedExtremeValue{T}) where {T<:Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 @@ -108,42 +119,42 @@ function mode(d::GeneralizedExtremeValue) if abs(ξ) < eps(one(ξ)) # ξ == 0 return μ else - return μ + σ * ((1 + ξ) ^ (-ξ) - 1) / ξ + return μ + σ * ((1 + ξ)^(-ξ) - 1) / ξ end end -function var(d::GeneralizedExtremeValue{T}) where T<:Real +function var(d::GeneralizedExtremeValue{T}) where {T<:Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 - return σ ^2 * π^2 / 6 - elseif ξ < 1/2 - return σ^2 * (g(d, 2) - g(d, 1) ^ 2) / ξ^2 + return σ^2 * π^2 / 6 + elseif ξ < 1 / 2 + return σ^2 * (g(d, 2) - g(d, 1)^2) / ξ^2 else return T(Inf) end end -function skewness(d::GeneralizedExtremeValue{T}) where T<:Real +function skewness(d::GeneralizedExtremeValue{T}) where {T<:Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 - return 12sqrt(6) * zeta(3) / pi ^ 3 * one(T) - elseif ξ < 1/3 + return 12sqrt(6) * zeta(3) / pi^3 * one(T) + elseif ξ < 1 / 3 g1 = g(d, 1) g2 = g(d, 2) g3 = g(d, 3) - return sign(ξ) * (g3 - 3g1 * g2 + 2g1^3) / (g2 - g1^2) ^ (3/2) + return sign(ξ) * (g3 - 3g1 * g2 + 2g1^3) / (g2 - g1^2)^(3 / 2) else return T(Inf) end end -function kurtosis(d::GeneralizedExtremeValue{T}) where T<:Real +function kurtosis(d::GeneralizedExtremeValue{T}) where {T<:Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 - return T(12)/5 + return T(12) / 5 elseif ξ < 1 / 4 g1 = g(d, 1) g2 = g(d, 2) @@ -178,9 +189,9 @@ insupport(d::GeneralizedExtremeValue, x::Real) = minimum(d) <= x <= maximum(d) #### Evaluation -function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where T<:Real - if x == -Inf || x == Inf || ! insupport(d, x) - return -T(Inf) +function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where {T<:Real} + if x == -Inf || x == Inf || !insupport(d, x) + return -T(Inf) else (μ, σ, ξ) = params(d) @@ -192,15 +203,15 @@ function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where T<:Real if z * ξ == -1 # Otherwise, would compute zero to the power something. return -T(Inf) else - t = (1 + z * ξ) ^ (-1/ξ) - return - log(σ) + (ξ + 1) * log(t) - t + t = (1 + z * ξ)^(-1 / ξ) + return -log(σ) + (ξ + 1) * log(t) - t end end end end -function pdf(d::GeneralizedExtremeValue{T}, x::Real) where T<:Real - if x == -Inf || x == Inf || ! insupport(d, x) +function pdf(d::GeneralizedExtremeValue{T}, x::Real) where {T<:Real} + if x == -Inf || x == Inf || !insupport(d, x) return zero(T) else (μ, σ, ξ) = params(d) @@ -213,8 +224,8 @@ function pdf(d::GeneralizedExtremeValue{T}, x::Real) where T<:Real if z * ξ == -1 # In this case: zero to the power something. return zero(T) else - t = (1 + z*ξ)^(- 1 / ξ) - return (t^(ξ + 1) * exp(- t)) / σ + t = (1 + z * ξ)^(-1 / ξ) + return (t^(ξ + 1) * exp(-t)) / σ end end end @@ -224,16 +235,16 @@ function logcdf(d::GeneralizedExtremeValue, x::Real) μ, σ, ξ = params(d) z = (x - μ) / σ return if abs(ξ) < eps(one(ξ)) # ξ == 0 - -exp(- z) + -exp(-z) else # y(x) = y(bound) = 0 if x is not in the support with lower/upper bound y = max(1 + z * ξ, 0) - - y^(-1/ξ) + -y^(-1 / ξ) end end cdf(d::GeneralizedExtremeValue, x::Real) = exp(logcdf(d, x)) -ccdf(d::GeneralizedExtremeValue, x::Real) = - expm1(logcdf(d, x)) +ccdf(d::GeneralizedExtremeValue, x::Real) = -expm1(logcdf(d, x)) logccdf(d::GeneralizedExtremeValue, x::Real) = log1mexp(logcdf(d, x)) @@ -245,9 +256,9 @@ function rand(rng::AbstractRNG, d::GeneralizedExtremeValue) u = 1 - rand(rng) if abs(ξ) < eps(one(ξ)) # ξ == 0 - rd = - log(- log(u)) + rd = -log(-log(u)) else - rd = expm1(- ξ * log(- log(u))) / ξ + rd = expm1(-ξ * log(-log(u))) / ξ end return μ + σ * rd diff --git a/src/univariate/continuous/generalizedpareto.jl b/src/univariate/continuous/generalizedpareto.jl index 3a5f35ec83..099a085c42 100644 --- a/src/univariate/continuous/generalizedpareto.jl +++ b/src/univariate/continuous/generalizedpareto.jl @@ -38,24 +38,24 @@ struct GeneralizedPareto{T<:Real} <: ContinuousUnivariateDistribution GeneralizedPareto{T}(μ::T, σ::T, ξ::T) where {T} = new{T}(μ, σ, ξ) end -function GeneralizedPareto(μ::T, σ::T, ξ::T; check_args::Bool=true) where {T <: Real} +function GeneralizedPareto(μ::T, σ::T, ξ::T; check_args::Bool = true) where {T<:Real} @check_args GeneralizedPareto (σ, σ > zero(σ)) return GeneralizedPareto{T}(μ, σ, ξ) end -function GeneralizedPareto(μ::Real, σ::Real, ξ::Real; check_args::Bool=true) - return GeneralizedPareto(promote(μ, σ, ξ)...; check_args=check_args) +function GeneralizedPareto(μ::Real, σ::Real, ξ::Real; check_args::Bool = true) + return GeneralizedPareto(promote(μ, σ, ξ)...; check_args = check_args) end -function GeneralizedPareto(μ::Integer, σ::Integer, ξ::Integer; check_args::Bool=true) - GeneralizedPareto(float(μ), float(σ), float(ξ); check_args=check_args) +function GeneralizedPareto(μ::Integer, σ::Integer, ξ::Integer; check_args::Bool = true) + GeneralizedPareto(float(μ), float(σ), float(ξ); check_args = check_args) end -function GeneralizedPareto(σ::Real, ξ::Real; check_args::Bool=true) - GeneralizedPareto(zero(σ), σ, ξ; check_args=check_args) +function GeneralizedPareto(σ::Real, ξ::Real; check_args::Bool = true) + GeneralizedPareto(zero(σ), σ, ξ; check_args = check_args) end -function GeneralizedPareto(ξ::Real; check_args::Bool=true) - GeneralizedPareto(zero(ξ), one(ξ), ξ; check_args=check_args) +function GeneralizedPareto(ξ::Real; check_args::Bool = true) + GeneralizedPareto(zero(ξ), one(ξ), ξ; check_args = check_args) end GeneralizedPareto() = GeneralizedPareto{Float64}(0.0, 1.0, 1.0) @@ -64,7 +64,7 @@ minimum(d::GeneralizedPareto) = d.μ maximum(d::GeneralizedPareto{T}) where {T<:Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : Inf #### Conversions -function convert(::Type{GeneralizedPareto{T}}, μ::S, σ::S, ξ::S) where {T <: Real, S <: Real} +function convert(::Type{GeneralizedPareto{T}}, μ::S, σ::S, ξ::S) where {T<:Real,S<:Real} GeneralizedPareto(T(μ), T(σ), T(ξ)) end function Base.convert(::Type{GeneralizedPareto{T}}, d::GeneralizedPareto) where {T<:Real} @@ -82,7 +82,8 @@ partype(::GeneralizedPareto{T}) where {T} = T #### Statistics -median(d::GeneralizedPareto) = d.ξ == 0 ? d.μ + d.σ * logtwo : d.μ + d.σ * expm1(d.ξ * logtwo) / d.ξ +median(d::GeneralizedPareto) = + d.ξ == 0 ? d.μ + d.σ * logtwo : d.μ + d.σ * expm1(d.ξ * logtwo) / d.ξ function mean(d::GeneralizedPareto{T}) where {T<:Real} if d.ξ < 1 @@ -103,14 +104,14 @@ end function skewness(d::GeneralizedPareto{T}) where {T<:Real} (μ, σ, ξ) = params(d) - if ξ < (1/3) + if ξ < (1 / 3) return 2(1 + ξ) * sqrt(1 - 2ξ) / (1 - 3ξ) else return T(Inf) end end -function kurtosis(d::GeneralizedPareto{T}) where T<:Real +function kurtosis(d::GeneralizedPareto{T}) where {T<:Real} (μ, σ, ξ) = params(d) if ξ < 0.25 @@ -125,7 +126,7 @@ end #### Evaluation -function logpdf(d::GeneralizedPareto{T}, x::Real) where T<:Real +function logpdf(d::GeneralizedPareto{T}, x::Real) where {T<:Real} (μ, σ, ξ) = params(d) # The logpdf is log(0) outside the support range. @@ -160,7 +161,7 @@ ccdf(d::GeneralizedPareto, x::Real) = exp(logccdf(d, x)) cdf(d::GeneralizedPareto, x::Real) = -expm1(logccdf(d, x)) logcdf(d::GeneralizedPareto, x::Real) = log1mexp(logccdf(d, x)) -function quantile(d::GeneralizedPareto{T}, p::Real) where T<:Real +function quantile(d::GeneralizedPareto{T}, p::Real) where {T<:Real} (μ, σ, ξ) = params(d) if p == 0 @@ -174,7 +175,7 @@ function quantile(d::GeneralizedPareto{T}, p::Real) where T<:Real z = expm1(-ξ * log1p(-p)) / ξ end else - z = T(NaN) + z = T(NaN) end return μ + σ * z diff --git a/src/univariate/continuous/gumbel.jl b/src/univariate/continuous/gumbel.jl index 54090967fc..8f4cda871a 100644 --- a/src/univariate/continuous/gumbel.jl +++ b/src/univariate/continuous/gumbel.jl @@ -28,14 +28,16 @@ struct Gumbel{T<:Real} <: ContinuousUnivariateDistribution Gumbel{T}(µ::T, θ::T) where {T} = new{T}(µ, θ) end -function Gumbel(μ::T, θ::T; check_args::Bool=true) where {T <: Real} +function Gumbel(μ::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Gumbel (θ, θ > zero(θ)) return Gumbel{T}(μ, θ) end -Gumbel(μ::Real, θ::Real; check_args::Bool=true) = Gumbel(promote(μ, θ)...; check_args=check_args) -Gumbel(μ::Integer, θ::Integer; check_args::Bool=true) = Gumbel(float(μ), float(θ); check_args=check_args) -Gumbel(μ::Real=0.0) = Gumbel(μ, one(μ); check_args=false) +Gumbel(μ::Real, θ::Real; check_args::Bool = true) = + Gumbel(promote(μ, θ)...; check_args = check_args) +Gumbel(μ::Integer, θ::Integer; check_args::Bool = true) = + Gumbel(float(μ), float(θ); check_args = check_args) +Gumbel(μ::Real = 0.0) = Gumbel(μ, one(μ); check_args = false) @distr_support Gumbel -Inf Inf @@ -45,7 +47,7 @@ Base.eltype(::Type{Gumbel{T}}) where {T} = T #### Conversions -convert(::Type{Gumbel{T}}, μ::S, θ::S) where {T <: Real, S <: Real} = Gumbel(T(μ), T(θ)) +convert(::Type{Gumbel{T}}, μ::S, θ::S) where {T<:Real,S<:Real} = Gumbel(T(μ), T(θ)) Base.convert(::Type{Gumbel{T}}, d::Gumbel) where {T<:Real} = Gumbel{T}(T(d.μ), T(d.θ)) Base.convert(::Type{Gumbel{T}}, d::Gumbel{T}) where {T<:Real} = d @@ -73,11 +75,11 @@ median(d::Gumbel{T}) where {T<:Real} = d.μ - d.θ * log(T(logtwo)) mode(d::Gumbel) = d.μ -var(d::Gumbel{T}) where {T<:Real} = T(π)^2/6 * d.θ^2 +var(d::Gumbel{T}) where {T<:Real} = T(π)^2 / 6 * d.θ^2 -skewness(d::Gumbel{T}) where {T<:Real} = 12*sqrt(T(6))*zeta(T(3)) / π^3 +skewness(d::Gumbel{T}) where {T<:Real} = 12 * sqrt(T(6)) * zeta(T(3)) / π^3 -kurtosis(d::Gumbel{T}) where {T<:Real} = T(12)/5 +kurtosis(d::Gumbel{T}) where {T<:Real} = T(12) / 5 entropy(d::Gumbel) = log(d.θ) + 1 + MathConstants.γ @@ -94,7 +96,7 @@ end function logpdf(d::Gumbel, x::Real) z = zval(d, x) - - (z + exp(-z) + log(d.θ)) + -(z + exp(-z) + log(d.θ)) end cdf(d::Gumbel, x::Real) = exp(-exp(-zval(d, x))) diff --git a/src/univariate/continuous/inversegamma.jl b/src/univariate/continuous/inversegamma.jl index 60bda1d740..de7f66e79f 100644 --- a/src/univariate/continuous/inversegamma.jl +++ b/src/univariate/continuous/inversegamma.jl @@ -28,23 +28,28 @@ External links struct InverseGamma{T<:Real} <: ContinuousUnivariateDistribution invd::Gamma{T} θ::T - InverseGamma{T}(α::T, θ::T) where {T<:Real} = new{T}(Gamma(α, inv(θ), check_args=false), θ) + InverseGamma{T}(α::T, θ::T) where {T<:Real} = + new{T}(Gamma(α, inv(θ), check_args = false), θ) end -function InverseGamma(α::T, θ::T; check_args::Bool=true) where {T <: Real} +function InverseGamma(α::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args InverseGamma (α, α > zero(α)) (θ, θ > zero(θ)) return InverseGamma{T}(α, θ) end -InverseGamma(α::Real, θ::Real; check_args::Bool=true) = InverseGamma(promote(α, θ)...; check_args=check_args) -InverseGamma(α::Integer, θ::Integer; check_args::Bool=true) = InverseGamma(float(α), float(θ); check_args=check_args) -InverseGamma(α::Real; check_args::Bool=true) = InverseGamma(α, one(α); check_args=check_args) +InverseGamma(α::Real, θ::Real; check_args::Bool = true) = + InverseGamma(promote(α, θ)...; check_args = check_args) +InverseGamma(α::Integer, θ::Integer; check_args::Bool = true) = + InverseGamma(float(α), float(θ); check_args = check_args) +InverseGamma(α::Real; check_args::Bool = true) = + InverseGamma(α, one(α); check_args = check_args) InverseGamma() = InverseGamma{Float64}(1.0, 1.0) @distr_support InverseGamma 0.0 Inf #### Conversions -convert(::Type{InverseGamma{T}}, α::S, θ::S) where {T <: Real, S <: Real} = InverseGamma(T(α), T(θ)) +convert(::Type{InverseGamma{T}}, α::S, θ::S) where {T<:Real,S<:Real} = + InverseGamma(T(α), T(θ)) function Base.convert(::Type{InverseGamma{T}}, d::InverseGamma) where {T<:Real} return InverseGamma{T}(T(shape(d)), T(d.θ)) end @@ -62,21 +67,22 @@ partype(::InverseGamma{T}) where {T} = T #### Parameters -mean(d::InverseGamma{T}) where {T} = ((α, θ) = params(d); α > 1 ? θ / (α - 1) : T(Inf)) +mean(d::InverseGamma{T}) where {T} = ((α, θ) = params(d); +α > 1 ? θ / (α - 1) : T(Inf)) mode(d::InverseGamma) = scale(d) / (shape(d) + 1) -function var(d::InverseGamma{T}) where T<:Real +function var(d::InverseGamma{T}) where {T<:Real} (α, θ) = params(d) α > 2 ? θ^2 / ((α - 1)^2 * (α - 2)) : T(Inf) end -function skewness(d::InverseGamma{T}) where T<:Real +function skewness(d::InverseGamma{T}) where {T<:Real} α = shape(d) α > 3 ? 4sqrt(α - 2) / (α - 3) : T(NaN) end -function kurtosis(d::InverseGamma{T}) where T<:Real +function kurtosis(d::InverseGamma{T}) where {T<:Real} α = shape(d) α > 4 ? (30α - 66) / ((α - 3) * (α - 4)) : T(NaN) end @@ -111,14 +117,15 @@ cquantile(d::InverseGamma, p::Real) = inv(quantile(d.invd, p)) invlogcdf(d::InverseGamma, p::Real) = inv(invlogccdf(d.invd, p)) invlogccdf(d::InverseGamma, p::Real) = inv(invlogcdf(d.invd, p)) -function mgf(d::InverseGamma{T}, t::Real) where T<:Real +function mgf(d::InverseGamma{T}, t::Real) where {T<:Real} (a, b) = params(d) - t == zero(t) ? one(T) : 2(-b*t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4*b*t)) + t == zero(t) ? one(T) : 2(-b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * b * t)) end -function cf(d::InverseGamma{T}, t::Real) where T<:Real +function cf(d::InverseGamma{T}, t::Real) where {T<:Real} (a, b) = params(d) - t == zero(t) ? one(T)+zero(T)*im : 2(-im*b*t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4*im*b*t)) + t == zero(t) ? one(T) + zero(T) * im : + 2(-im * b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * im * b * t)) end diff --git a/src/univariate/continuous/inversegaussian.jl b/src/univariate/continuous/inversegaussian.jl index b585ec3b3d..6579eff927 100644 --- a/src/univariate/continuous/inversegaussian.jl +++ b/src/univariate/continuous/inversegaussian.jl @@ -29,21 +29,24 @@ struct InverseGaussian{T<:Real} <: ContinuousUnivariateDistribution InverseGaussian{T}(μ::T, λ::T) where {T<:Real} = new{T}(μ, λ) end -function InverseGaussian(μ::T, λ::T; check_args::Bool=true) where {T<:Real} +function InverseGaussian(μ::T, λ::T; check_args::Bool = true) where {T<:Real} @check_args InverseGaussian (μ, μ > zero(μ)) (λ, λ > zero(λ)) return InverseGaussian{T}(μ, λ) end -InverseGaussian(μ::Real, λ::Real; check_args::Bool=true) = InverseGaussian(promote(μ, λ)...; check_args=check_args) -InverseGaussian(μ::Integer, λ::Integer; check_args::Bool=true) = InverseGaussian(float(μ), float(λ); check_args=check_args) -InverseGaussian(μ::Real; check_args::Bool=true) = InverseGaussian(μ, one(μ); check_args=check_args) +InverseGaussian(μ::Real, λ::Real; check_args::Bool = true) = + InverseGaussian(promote(μ, λ)...; check_args = check_args) +InverseGaussian(μ::Integer, λ::Integer; check_args::Bool = true) = + InverseGaussian(float(μ), float(λ); check_args = check_args) +InverseGaussian(μ::Real; check_args::Bool = true) = + InverseGaussian(μ, one(μ); check_args = check_args) InverseGaussian() = InverseGaussian{Float64}(1.0, 1.0) @distr_support InverseGaussian 0.0 Inf #### Conversions -function convert(::Type{InverseGaussian{T}}, μ::S, λ::S) where {T <: Real, S <: Real} +function convert(::Type{InverseGaussian{T}}, μ::S, λ::S) where {T<:Real,S<:Real} InverseGaussian(T(μ), T(λ)) end function Base.convert(::Type{InverseGaussian{T}}, d::InverseGaussian) where {T<:Real} @@ -70,13 +73,13 @@ kurtosis(d::InverseGaussian) = 15d.μ / d.λ function mode(d::InverseGaussian) μ, λ = params(d) r = μ / λ - μ * (sqrt(1 + (3r/2)^2) - (3r/2)) + μ * (sqrt(1 + (3r / 2)^2) - (3r / 2)) end #### Evaluation -function pdf(d::InverseGaussian{T}, x::Real) where T<:Real +function pdf(d::InverseGaussian{T}, x::Real) where {T<:Real} if x > 0 μ, λ = params(d) return sqrt(λ / (twoπ * x^3)) * exp(-λ * (x - μ)^2 / (2μ^2 * x)) @@ -85,10 +88,10 @@ function pdf(d::InverseGaussian{T}, x::Real) where T<:Real end end -function logpdf(d::InverseGaussian{T}, x::Real) where T<:Real +function logpdf(d::InverseGaussian{T}, x::Real) where {T<:Real} if x > 0 μ, λ = params(d) - return (log(λ) - (log2π + 3log(x)) - λ * (x - μ)^2 / (μ^2 * x))/2 + return (log(λ) - (log2π + 3log(x)) - λ * (x - μ)^2 / (μ^2 * x)) / 2 else return -T(Inf) end @@ -187,7 +190,11 @@ function suffstats(::Type{<:InverseGaussian}, x::AbstractVector{<:Real}) InverseGaussianStats(sx, sinvx, length(x)) end -function suffstats(::Type{<:InverseGaussian}, x::AbstractVector{<:Real}, w::AbstractVector{<:Real}) +function suffstats( + ::Type{<:InverseGaussian}, + x::AbstractVector{<:Real}, + w::AbstractVector{<:Real}, +) n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) @@ -197,8 +204,8 @@ function suffstats(::Type{<:InverseGaussian}, x::AbstractVector{<:Real}, w::Abst sinvx = zero(T) sw = zero(T) @inbounds @simd for i in eachindex(x) - sx += w[i]*x[i] - sinvx += w[i]/x[i] + sx += w[i] * x[i] + sinvx += w[i] / x[i] sw += w[i] end InverseGaussianStats(sx, sinvx, sw) @@ -206,6 +213,6 @@ end function fit_mle(::Type{<:InverseGaussian}, ss::InverseGaussianStats) mu = ss.sx / ss.sw - invlambda = ss.sinvx / ss.sw - inv(mu) + invlambda = ss.sinvx / ss.sw - inv(mu) InverseGaussian(mu, inv(invlambda)) end diff --git a/src/univariate/continuous/johnsonsu.jl b/src/univariate/continuous/johnsonsu.jl index 65597adc2d..ebabb5c077 100644 --- a/src/univariate/continuous/johnsonsu.jl +++ b/src/univariate/continuous/johnsonsu.jl @@ -30,19 +30,21 @@ struct JohnsonSU{T<:Real} <: ContinuousUnivariateDistribution JohnsonSU{T}(ξ::T, λ::T, γ::T, δ::T) where {T<:Real} = new{T}(ξ, λ, γ, δ) end -function JohnsonSU(ξ::T, λ::T, γ::T, δ::T; check_args::Bool=true) where {T<:Real} +function JohnsonSU(ξ::T, λ::T, γ::T, δ::T; check_args::Bool = true) where {T<:Real} @check_args JohnsonSU (λ, λ ≥ zero(λ)) (δ, δ ≥ zero(δ)) return JohnsonSU{T}(ξ, λ, γ, δ) end JohnsonSU() = JohnsonSU{Int}(0, 1, 0, 1) -JohnsonSU(ξ::Real, λ::Real, γ::Real, δ::Real; check_args::Bool=true) = JohnsonSU(promote(ξ, λ, γ, δ)...; check_args=check_args) +JohnsonSU(ξ::Real, λ::Real, γ::Real, δ::Real; check_args::Bool = true) = + JohnsonSU(promote(ξ, λ, γ, δ)...; check_args = check_args) @distr_support JohnsonSU -Inf Inf #### Conversions -Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU) where {T<:Real} = JohnsonSU{T}(T(d.ξ), T(d.λ), T(d.γ), T(d.δ)) +Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU) where {T<:Real} = + JohnsonSU{T}(T(d.ξ), T(d.λ), T(d.γ), T(d.δ)) Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU{T}) where {T<:Real} = d #### Parameters @@ -56,18 +58,18 @@ partype(d::JohnsonSU{T}) where {T<:Real} = T #### Statistics function mean(d::JohnsonSU) - a = exp(1/(2*d.δ^2)) - r = d.γ/d.δ + a = exp(1 / (2 * d.δ^2)) + r = d.γ / d.δ d.ξ - d.λ * a * sinh(r) end function median(d::JohnsonSU) - r = d.γ/d.δ + r = d.γ / d.δ d.ξ + d.λ * sinh(-r) end function var(d::JohnsonSU) a = d.δ^-2 - r = d.γ/d.δ - d.λ^2/2 * expm1(a) * (exp(a)*cosh(2r)+1) + r = d.γ / d.δ + d.λ^2 / 2 * expm1(a) * (exp(a) * cosh(2r) + 1) end #### Evaluation @@ -77,7 +79,8 @@ zval(d::JohnsonSU, x::Real) = d.γ + d.δ * asinh(yval(d, x)) xval(d::JohnsonSU, x::Real) = d.λ * sinh((x - d.γ) / d.δ) + d.ξ pdf(d::JohnsonSU, x::Real) = d.δ / (d.λ * hypot(1, yval(d, x))) * normpdf(zval(d, x)) -logpdf(d::JohnsonSU, x::Real) = log(d.δ) - log(d.λ) - log1psq(yval(d, x)) / 2 + normlogpdf(zval(d, x)) +logpdf(d::JohnsonSU, x::Real) = + log(d.δ) - log(d.λ) - log1psq(yval(d, x)) / 2 + normlogpdf(zval(d, x)) cdf(d::JohnsonSU, x::Real) = normcdf(zval(d, x)) logcdf(d::JohnsonSU, x::Real) = normlogcdf(zval(d, x)) ccdf(d::JohnsonSU, x::Real) = normccdf(zval(d, x)) diff --git a/src/univariate/continuous/kolmogorov.jl b/src/univariate/continuous/kolmogorov.jl index f7bb373ab8..835a2466b8 100644 --- a/src/univariate/continuous/kolmogorov.jl +++ b/src/univariate/continuous/kolmogorov.jl @@ -9,8 +9,7 @@ Kolmogorov distribution defined as where ``B(t)`` is a Brownian bridge used in the Kolmogorov--Smirnov test for large n. """ -struct Kolmogorov <: ContinuousUnivariateDistribution -end +struct Kolmogorov <: ContinuousUnivariateDistribution end @distr_support Kolmogorov 0.0 Inf @@ -19,8 +18,8 @@ params(d::Kolmogorov) = () #### Statistics -mean(d::Kolmogorov) = sqrt2π*log(2)/2 -var(d::Kolmogorov) = pi^2/12 - pi*log(2)^2/2 +mean(d::Kolmogorov) = sqrt2π * log(2) / 2 +var(d::Kolmogorov) = pi^2 / 12 - pi * log(2)^2 / 2 # TODO: higher-order moments also exist, can be obtained by differentiating series mode(d::Kolmogorov) = 0.735467907916572 @@ -39,61 +38,61 @@ median(d::Kolmogorov) = 0.8275735551899077 # both series so assume error in table. function cdf_raw(d::Kolmogorov, x::Real) - a = -(pi*pi)/(x*x) + a = -(pi * pi) / (x * x) f = exp(a) - f2 = f*f - u = (1 + f*(1 + f2)) - sqrt2π*exp(a/8)*u/x + f2 = f * f + u = (1 + f * (1 + f2)) + sqrt2π * exp(a / 8) * u / x end function ccdf_raw(d::Kolmogorov, x::Real) - f = exp(-2*x*x) - f2 = f*f - f3 = f2*f - f5 = f2*f3 - f7 = f2*f5 - u = (1 - f3*(1 - f5*(1 - f7))) - 2f*u + f = exp(-2 * x * x) + f2 = f * f + f3 = f2 * f + f5 = f2 * f3 + f7 = f2 * f5 + u = (1 - f3 * (1 - f5 * (1 - f7))) + 2f * u end -function cdf(d::Kolmogorov,x::Real) +function cdf(d::Kolmogorov, x::Real) if x <= 0 0 elseif x <= 1 - cdf_raw(d,x) + cdf_raw(d, x) else - 1-ccdf_raw(d,x) + 1 - ccdf_raw(d, x) end end -function ccdf(d::Kolmogorov,x::Real) +function ccdf(d::Kolmogorov, x::Real) if x <= 0 1 elseif x <= 1 - 1-cdf_raw(d,x) + 1 - cdf_raw(d, x) else - ccdf_raw(d,x) + ccdf_raw(d, x) end end # TODO: figure out how best to truncate series -function pdf(d::Kolmogorov,x::Real) +function pdf(d::Kolmogorov, x::Real) if x <= 0 return 0.0 elseif x <= 1 - c = π/(2*x) + c = π / (2 * x) s = 0.0 for i = 1:20 - k = ((2i - 1)*c)^2 - s += (k - 1)*exp(-k/2) + k = ((2i - 1) * c)^2 + s += (k - 1) * exp(-k / 2) end - return sqrt2π*s/x^2 + return sqrt2π * s / x^2 else s = 0.0 for i = 1:20 - s += (iseven(i) ? -1 : 1)*i^2*exp(-2(i*x)^2) + s += (iseven(i) ? -1 : 1) * i^2 * exp(-2(i * x)^2) end - return 8*x*s + return 8 * x * s end end @@ -113,40 +112,40 @@ function rand(rng::AbstractRNG, d::Kolmogorov) while true g = rand_trunc_gamma(rng) - x = pi/sqrt(8g) + x = pi / sqrt(8g) w = 0.0 - z = 1/(2g) + z = 1 / (2g) p = exp(-g) n = 1 q = 1.0 u = rand(rng) while u >= w - w += z*q + w += z * q if u >= w return x end n += 2 - nsq = n*n - q = p^(nsq-1) - w -= nsq*q + nsq = n * n + q = p^(nsq - 1) + w -= nsq * q end end else while true e = randexp(rng) u = rand(rng) - x = sqrt(t*t+e/2) + x = sqrt(t * t + e / 2) w = 0.0 n = 1 - z = exp(-2*x*x) + z = exp(-2 * x * x) while u > w n += 1 - w += n*n*z^(n*n-1) + w += n * n * z^(n * n - 1) if u >= w return x end n += 1 - w -= n*n*z^(n*n-1) + w -= n * n * z^(n * n - 1) end end end @@ -160,7 +159,7 @@ function rand_trunc_gamma(rng::AbstractRNG) e0 = rand(rng, Exponential(1.2952909208355123)) e1 = rand(rng, Exponential(2)) g = tp + e0 - if (e0*e0 <= tp*e1*(g+tp)) || (g/tp - 1 - log(g/tp) <= e1) + if (e0 * e0 <= tp * e1 * (g + tp)) || (g / tp - 1 - log(g / tp) <= e1) return g end end diff --git a/src/univariate/continuous/ksdist.jl b/src/univariate/continuous/ksdist.jl index 1de09651a3..3ae970e82f 100644 --- a/src/univariate/continuous/ksdist.jl +++ b/src/univariate/continuous/ksdist.jl @@ -20,111 +20,111 @@ end # TODO: implement Simard and L'Ecuyer (2011) meta-algorithm # requires Pomeranz and Pelz-Good algorithms -function cdf(d::KSDist,x::Float64) +function cdf(d::KSDist, x::Float64) n = d.n - b = x*n + b = x * n # known exact values - if b <= 1/2 + if b <= 1 / 2 return 0.0 elseif b <= 1 # accuracy could be improved - return exp(logfactorial(n)+n*(log(2*b-1)-log(n))) + return exp(logfactorial(n) + n * (log(2 * b - 1) - log(n))) elseif x >= 1 return 1.0 - elseif b >= n-1 - return 1 - 2*(1-x)^n + elseif b >= n - 1 + return 1 - 2 * (1 - x)^n end - a = b*x + a = b * x if a >= 18 return 1.0 elseif n <= 10_000 if a <= 4 - return cdf_durbin(d,x) + return cdf_durbin(d, x) else - return 1 - ccdf_miller(d,x) + return 1 - ccdf_miller(d, x) end else - return cdf(Kolmogorov(),sqrt(a)) + return cdf(Kolmogorov(), sqrt(a)) end end -function ccdf(d::KSDist,x::Float64) +function ccdf(d::KSDist, x::Float64) n = d.n - b = x*n + b = x * n # Ruben and Gambino (1982) known exact values if b <= 0.5 return 1.0 elseif b <= 1 - return 1-exp(logfactorial(n)+n*(log(2*b-1)-log(n))) + return 1 - exp(logfactorial(n) + n * (log(2 * b - 1) - log(n))) elseif x >= 1 return 0.0 - elseif b >= n-1 - return 2*(1-x)^n + elseif b >= n - 1 + return 2 * (1 - x)^n end - a = b*x + a = b * x if a >= 370 return 0.0 elseif a >= 4 || (n > 140 && a >= 2.2) - return ccdf_miller(d,x) + return ccdf_miller(d, x) else - return 1-cdf(d,x) + return 1 - cdf(d, x) end end # Durbin matrix CDF method, based on Marsaglia, Tsang and Wang (2003) # modified to avoid need for exponent tracking -function cdf_durbin(d::KSDist,x::Float64) +function cdf_durbin(d::KSDist, x::Float64) n = d.n - k, ch, h = ceil_rems_mult(n,x) + k, ch, h = ceil_rems_mult(n, x) - m = 2*k-1 + m = 2 * k - 1 H = Matrix{Float64}(undef, m, m) for i = 1:m, j = 1:m - H[i,j] = i-j+1 >= 0 ? 1 : 0 + H[i, j] = i - j + 1 >= 0 ? 1 : 0 end r = 1.0 for i = 1:m # (1-h^i) = (1-h)(1+h+...+h^(i-1)) - H[i,1] = H[m,m-i+1] = ch*r + H[i, 1] = H[m, m-i+1] = ch * r r += h^i end - H[m,1] += h <= 0.5 ? -h^m : -h^m+(h-ch) + H[m, 1] += h <= 0.5 ? -h^m : -h^m + (h - ch) for i = 1:m, j = 1:m - for g = 1:max(i-j+1,0) - H[i,j] /= g + for g = 1:max(i - j + 1, 0) + H[i, j] /= g end # we can avoid keeping track of the exponent by dividing by e # (from Stirling's approximation) - H[i,j] /= ℯ + H[i, j] /= ℯ end Q = H^n - s = Q[k,k] - s*stirling(n) + s = Q[k, k] + s * stirling(n) end # Miller (1956) approximation function ccdf_miller(d::KSDist, x::Real) - 2*ccdf(KSOneSided(d.n),x) + 2 * ccdf(KSOneSided(d.n), x) end ## these functions are used in durbin and pomeranz algorithms # calculate exact remainders the easy way -function floor_rems_mult(n,x) - t = big(x)*big(n) +function floor_rems_mult(n, x) + t = big(x) * big(n) fl = floor(t) lrem = t - fl - urem = (fl+one(fl)) - t - return convert(typeof(n),fl), convert(typeof(x),lrem), convert(typeof(x),urem) + urem = (fl + one(fl)) - t + return convert(typeof(n), fl), convert(typeof(x), lrem), convert(typeof(x), urem) end -function ceil_rems_mult(n,x) - t = big(x)*big(n) +function ceil_rems_mult(n, x) + t = big(x) * big(n) cl = ceil(t) lrem = t - (cl - one(cl)) urem = cl - t - return convert(typeof(n),cl), convert(typeof(x),lrem), convert(typeof(x),urem) + return convert(typeof(n), cl), convert(typeof(x), lrem), convert(typeof(x), urem) end # n!*(e/n)^n @@ -132,12 +132,12 @@ function stirling(n) if n < 500 s = 1.0 for i = 1:n - s *= i/n*ℯ + s *= i / n * ℯ end return s else # 3rd-order Stirling's approximation more accurate for large n - twn = 12*n - return sqrt(2*pi*n)*(1 + twn\(1 + (2*twn)\(1 - (15*twn)\139))) + twn = 12 * n + return sqrt(2 * pi * n) * (1 + twn \ (1 + (2 * twn) \ (1 - (15 * twn) \ 139))) end end diff --git a/src/univariate/continuous/ksonesided.jl b/src/univariate/continuous/ksonesided.jl index 82d1893929..6454946104 100644 --- a/src/univariate/continuous/ksonesided.jl +++ b/src/univariate/continuous/ksonesided.jl @@ -25,11 +25,11 @@ function ccdf(d::KSOneSided, x::Float64) end n = d.n s = 0.0 - for j = 0:floor(Int,n-n*x) - p = x+j/n - s += pdf(Binomial(n,p),j) / p + for j = 0:floor(Int, n - n * x) + p = x + j / n + s += pdf(Binomial(n, p), j) / p end - s*x + s * x end -cdf(d::KSOneSided, x::Float64) = 1 - ccdf(d,x) +cdf(d::KSOneSided, x::Float64) = 1 - ccdf(d, x) diff --git a/src/univariate/continuous/kumaraswamy.jl b/src/univariate/continuous/kumaraswamy.jl index 8a3fecfe71..83094e5373 100644 --- a/src/univariate/continuous/kumaraswamy.jl +++ b/src/univariate/continuous/kumaraswamy.jl @@ -27,7 +27,7 @@ struct Kumaraswamy{T<:Real} <: ContinuousUnivariateDistribution b::T end -function Kumaraswamy(a::Real, b::Real; check_args::Bool=true) +function Kumaraswamy(a::Real, b::Real; check_args::Bool = true) @check_args Kumaraswamy (a, a > zero(a)) (b, b > zero(b)) a′, b′ = promote(a, b) return Kumaraswamy{typeof(a′)}(a′, b′) @@ -35,7 +35,8 @@ end Kumaraswamy() = Kumaraswamy{Float64}(1.0, 1.0) -Base.convert(::Type{Kumaraswamy{T}}, d::Kumaraswamy) where {T} = Kumaraswamy{T}(T(d.a), T(d.b)) +Base.convert(::Type{Kumaraswamy{T}}, d::Kumaraswamy) where {T} = + Kumaraswamy{T}(T(d.a), T(d.b)) Base.convert(::Type{Kumaraswamy{T}}, d::Kumaraswamy{T}) where {T} = d @distr_support Kumaraswamy 0 1 diff --git a/src/univariate/continuous/laplace.jl b/src/univariate/continuous/laplace.jl index 2a7bf04a47..5fc63318b0 100644 --- a/src/univariate/continuous/laplace.jl +++ b/src/univariate/continuous/laplace.jl @@ -28,21 +28,23 @@ struct Laplace{T<:Real} <: ContinuousUnivariateDistribution Laplace{T}(µ::T, θ::T) where {T} = new{T}(µ, θ) end -function Laplace(μ::T, θ::T; check_args::Bool=true) where {T <: Real} +function Laplace(μ::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Laplace (θ, θ > zero(θ)) return Laplace{T}(μ, θ) end -Laplace(μ::Real, θ::Real; check_args::Bool=true) = Laplace(promote(μ, θ)...; check_args=check_args) -Laplace(μ::Integer, θ::Integer; check_args::Bool=true) = Laplace(float(μ), float(θ); check_args=check_args) -Laplace(μ::Real=0.0) = Laplace(μ, one(μ); check_args=false) +Laplace(μ::Real, θ::Real; check_args::Bool = true) = + Laplace(promote(μ, θ)...; check_args = check_args) +Laplace(μ::Integer, θ::Integer; check_args::Bool = true) = + Laplace(float(μ), float(θ); check_args = check_args) +Laplace(μ::Real = 0.0) = Laplace(μ, one(μ); check_args = false) const Biexponential = Laplace @distr_support Laplace -Inf Inf #### Conversions -function convert(::Type{Laplace{T}}, μ::S, θ::S) where {T <: Real, S <: Real} +function convert(::Type{Laplace{T}}, μ::S, θ::S) where {T<:Real,S<:Real} Laplace(T(μ), T(θ)) end function Base.convert(::Type{Laplace{T}}, d::Laplace) where {T<:Real} @@ -70,7 +72,7 @@ skewness(d::Laplace{T}) where {T<:Real} = zero(T) kurtosis(d::Laplace{T}) where {T<:Real} = 3one(T) entropy(d::Laplace) = log(2d.θ) + 1 - + function kldivergence(p::Laplace, q::Laplace) pμ, pθ = params(p) qμ, qθ = params(q) @@ -84,17 +86,19 @@ zval(d::Laplace, x::Real) = (x - d.μ) / d.θ xval(d::Laplace, z::Real) = d.μ + z * d.θ pdf(d::Laplace, x::Real) = exp(-abs(zval(d, x))) / 2scale(d) -logpdf(d::Laplace, x::Real) = - (abs(zval(d, x)) + log(2scale(d))) +logpdf(d::Laplace, x::Real) = -(abs(zval(d, x)) + log(2scale(d))) -cdf(d::Laplace, x::Real) = (z = zval(d, x); z < 0 ? exp(z)/2 : 1 - exp(-z)/2) -ccdf(d::Laplace, x::Real) = (z = zval(d, x); z > 0 ? exp(-z)/2 : 1 - exp(z)/2) +cdf(d::Laplace, x::Real) = (z = zval(d, x); z < 0 ? exp(z) / 2 : 1 - exp(-z) / 2) +ccdf(d::Laplace, x::Real) = (z = zval(d, x); z > 0 ? exp(-z) / 2 : 1 - exp(z) / 2) logcdf(d::Laplace, x::Real) = (z = zval(d, x); z < 0 ? loghalf + z : loghalf + log2mexp(-z)) logccdf(d::Laplace, x::Real) = (z = zval(d, x); z > 0 ? loghalf - z : loghalf + log2mexp(z)) -quantile(d::Laplace, p::Real) = p < 1/2 ? xval(d, log(2p)) : xval(d, -log(2(1 - p))) -cquantile(d::Laplace, p::Real) = p > 1/2 ? xval(d, log(2(1 - p))) : xval(d, -log(2p)) -invlogcdf(d::Laplace, lp::Real) = lp < loghalf ? xval(d, logtwo + lp) : xval(d, -(logtwo + log1mexp(lp))) -invlogccdf(d::Laplace, lp::Real) = lp > loghalf ? xval(d, logtwo + log1mexp(lp)) : xval(d, -(logtwo + lp)) +quantile(d::Laplace, p::Real) = p < 1 / 2 ? xval(d, log(2p)) : xval(d, -log(2(1 - p))) +cquantile(d::Laplace, p::Real) = p > 1 / 2 ? xval(d, log(2(1 - p))) : xval(d, -log(2p)) +invlogcdf(d::Laplace, lp::Real) = + lp < loghalf ? xval(d, logtwo + lp) : xval(d, -(logtwo + log1mexp(lp))) +invlogccdf(d::Laplace, lp::Real) = + lp > loghalf ? xval(d, logtwo + log1mexp(lp)) : xval(d, -(logtwo + lp)) function gradlogpdf(d::Laplace, x::Real) μ, θ = params(d) @@ -109,11 +113,11 @@ function mgf(d::Laplace, t::Real) end function cgf(d::Laplace, t) μ, θ = params(d) - t*μ - log1p(-(θ*t)^2) + t * μ - log1p(-(θ * t)^2) end function cf(d::Laplace, t::Real) st = d.θ * t - cis(t * d.μ) / (1+st*st) + cis(t * d.μ) / (1 + st * st) end #### Affine transformations @@ -124,7 +128,7 @@ Base.:*(c::Real, d::Laplace) = Laplace(c * d.μ, abs(c) * d.θ) #### Sampling rand(rng::AbstractRNG, d::Laplace) = - d.μ + d.θ*randexp(rng)*ifelse(rand(rng, Bool), 1, -1) + d.μ + d.θ * randexp(rng) * ifelse(rand(rng, Bool), 1, -1) #### Fitting diff --git a/src/univariate/continuous/levy.jl b/src/univariate/continuous/levy.jl index e1d80decfc..c242abc72f 100644 --- a/src/univariate/continuous/levy.jl +++ b/src/univariate/continuous/levy.jl @@ -27,20 +27,22 @@ struct Levy{T<:Real} <: ContinuousUnivariateDistribution Levy{T}(μ::T, σ::T) where {T<:Real} = new{T}(μ, σ) end -function Levy(μ::T, σ::T; check_args::Bool=true) where {T<:Real} +function Levy(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Levy (σ, σ > zero(σ)) return Levy{T}(μ, σ) end -Levy(μ::Real, σ::Real; check_args::Bool=true) = Levy(promote(μ, σ)...; check_args=check_args) -Levy(μ::Integer, σ::Integer; check_args::Bool=true) = Levy(float(μ), float(σ); check_args=check_args) -Levy(μ::Real=0.0) = Levy(μ, one(μ); check_args=false) +Levy(μ::Real, σ::Real; check_args::Bool = true) = + Levy(promote(μ, σ)...; check_args = check_args) +Levy(μ::Integer, σ::Integer; check_args::Bool = true) = + Levy(float(μ), float(σ); check_args = check_args) +Levy(μ::Real = 0.0) = Levy(μ, one(μ); check_args = false) @distr_support Levy d.μ Inf #### Conversions -convert(::Type{Levy{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = Levy(T(μ), T(σ)) +convert(::Type{Levy{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = Levy(T(μ), T(σ)) Base.convert(::Type{Levy{T}}, d::Levy) where {T<:Real} = Levy{T}(T(d.μ), T(d.σ)) Base.convert(::Type{Levy{T}}, d::Levy{T}) where {T<:Real} = d @@ -67,29 +69,31 @@ median(d::Levy{T}) where {T<:Real} = d.μ + d.σ / (2 * T(erfcinv(0.5))^2) #### Evaluation -function pdf(d::Levy{T}, x::Real) where T<:Real +function pdf(d::Levy{T}, x::Real) where {T<:Real} μ, σ = params(d) if x <= μ return zero(T) end z = x - μ - (sqrt(σ) / sqrt2π) * exp((-σ) / (2z)) / z^(3//2) + (sqrt(σ) / sqrt2π) * exp((-σ) / (2z)) / z^(3 // 2) end -function logpdf(d::Levy{T}, x::Real) where T<:Real +function logpdf(d::Levy{T}, x::Real) where {T<:Real} μ, σ = params(d) if x <= μ return T(-Inf) end z = x - μ - (log(σ) - log2π - σ / z - 3log(z))/2 + (log(σ) - log2π - σ / z - 3log(z)) / 2 end -cdf(d::Levy{T}, x::Real) where {T<:Real} = x <= d.μ ? zero(T) : erfc(sqrt(d.σ / (2(x - d.μ)))) -ccdf(d::Levy{T}, x::Real) where {T<:Real} = x <= d.μ ? one(T) : erf(sqrt(d.σ / (2(x - d.μ)))) +cdf(d::Levy{T}, x::Real) where {T<:Real} = + x <= d.μ ? zero(T) : erfc(sqrt(d.σ / (2(x - d.μ)))) +ccdf(d::Levy{T}, x::Real) where {T<:Real} = + x <= d.μ ? one(T) : erf(sqrt(d.σ / (2(x - d.μ)))) -quantile(d::Levy, p::Real) = d.μ + d.σ / (2*erfcinv(p)^2) -cquantile(d::Levy, p::Real) = d.μ + d.σ / (2*erfinv(p)^2) +quantile(d::Levy, p::Real) = d.μ + d.σ / (2 * erfcinv(p)^2) +cquantile(d::Levy, p::Real) = d.μ + d.σ / (2 * erfinv(p)^2) mgf(d::Levy{T}, t::Real) where {T<:Real} = t == zero(t) ? one(T) : T(NaN) diff --git a/src/univariate/continuous/lindley.jl b/src/univariate/continuous/lindley.jl index 9e6c3ee797..d47dacc2fa 100644 --- a/src/univariate/continuous/lindley.jl +++ b/src/univariate/continuous/lindley.jl @@ -24,12 +24,12 @@ struct Lindley{T<:Real} <: ContinuousUnivariateDistribution Lindley{T}(θ::T) where {T} = new{T}(θ) end -function Lindley(θ::Real; check_args::Bool=true) +function Lindley(θ::Real; check_args::Bool = true) @check_args Lindley (θ, θ > zero(θ)) return Lindley{typeof(θ)}(θ) end -Lindley(θ::Integer; check_args::Bool=true) = Lindley(float(θ); check_args=check_args) +Lindley(θ::Integer; check_args::Bool = true) = Lindley(float(θ); check_args = check_args) Lindley() = Lindley{Float64}(1.0) @@ -50,7 +50,7 @@ mean(d::Lindley) = (2 + d.θ) / d.θ / (1 + d.θ) var(d::Lindley) = 2 / d.θ^2 - 1 / (1 + d.θ)^2 -skewness(d::Lindley) = 2 * @evalpoly(d.θ, 2, 6, 6, 1) / @evalpoly(d.θ, 2, 4, 1)^(3//2) +skewness(d::Lindley) = 2 * @evalpoly(d.θ, 2, 6, 6, 1) / @evalpoly(d.θ, 2, 4, 1)^(3 // 2) kurtosis(d::Lindley) = 3 * @evalpoly(d.θ, 8, 32, 44, 24, 3) / @evalpoly(d.θ, 2, 4, 1)^2 - 3 @@ -140,8 +140,8 @@ end # Compute W₋₁(x) for x ∈ (-1/e, 0) using formula (27) in Lóczi. By Theorem 2.23, the # upper bound on the error for this algorithm is (1/2)^(2^n), where n is the number of # recursion steps. The default here is set such that this error is less than `eps()`. -function _lambertwm1(x, n=6) - if -exp(-one(x)) < x <= -1//4 +function _lambertwm1(x, n = 6) + if -exp(-one(x)) < x <= -1 // 4 β = -1 - sqrt2 * sqrt(1 + ℯ * x) elseif x < 0 lnmx = log(-x) @@ -149,7 +149,7 @@ function _lambertwm1(x, n=6) else throw(DomainError(x)) end - for i in 1:n + for i = 1:n β = β / (1 + β) * (1 + log(x / β)) end return β diff --git a/src/univariate/continuous/logistic.jl b/src/univariate/continuous/logistic.jl index 67e3608947..83ecc38c7d 100644 --- a/src/univariate/continuous/logistic.jl +++ b/src/univariate/continuous/logistic.jl @@ -30,19 +30,21 @@ struct Logistic{T<:Real} <: ContinuousUnivariateDistribution end -function Logistic(μ::T, θ::T; check_args::Bool=true) where {T <: Real} +function Logistic(μ::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Logistic (θ, θ > zero(θ)) return Logistic{T}(μ, θ) end -Logistic(μ::Real, θ::Real; check_args::Bool=true) = Logistic(promote(μ, θ)...; check_args=check_args) -Logistic(μ::Integer, θ::Integer; check_args::Bool=true) = Logistic(float(μ), float(θ); check_args=check_args) -Logistic(μ::Real=0.0) = Logistic(μ, one(μ); check_args=false) +Logistic(μ::Real, θ::Real; check_args::Bool = true) = + Logistic(promote(μ, θ)...; check_args = check_args) +Logistic(μ::Integer, θ::Integer; check_args::Bool = true) = + Logistic(float(μ), float(θ); check_args = check_args) +Logistic(μ::Real = 0.0) = Logistic(μ, one(μ); check_args = false) @distr_support Logistic -Inf Inf #### Conversions -function convert(::Type{Logistic{T}}, μ::S, θ::S) where {T <: Real, S <: Real} +function convert(::Type{Logistic{T}}, μ::S, θ::S) where {T<:Real,S<:Real} Logistic(T(μ), T(θ)) end function Base.convert(::Type{Logistic{T}}, d::Logistic) where {T<:Real} @@ -68,7 +70,7 @@ mode(d::Logistic) = d.μ std(d::Logistic) = π * d.θ / sqrt3 var(d::Logistic) = (π * d.θ)^2 / 3 skewness(d::Logistic{T}) where {T<:Real} = zero(T) -kurtosis(d::Logistic{T}) where {T<:Real} = T(6)/5 +kurtosis(d::Logistic{T}) where {T<:Real} = T(6) / 5 entropy(d::Logistic) = log(d.θ) + 2 @@ -78,8 +80,8 @@ entropy(d::Logistic) = log(d.θ) + 2 zval(d::Logistic, x::Real) = (x - d.μ) / d.θ xval(d::Logistic, z::Real) = d.μ + z * d.θ -pdf(d::Logistic, x::Real) = (lz = logistic(-abs(zval(d, x))); lz*(1-lz)/d.θ) -logpdf(d::Logistic, x::Real) = (u = -abs(zval(d, x)); u - 2*log1pexp(u) - log(d.θ)) +pdf(d::Logistic, x::Real) = (lz = logistic(-abs(zval(d, x))); lz * (1 - lz) / d.θ) +logpdf(d::Logistic, x::Real) = (u = -abs(zval(d, x)); u - 2 * log1pexp(u) - log(d.θ)) cdf(d::Logistic, x::Real) = logistic(zval(d, x)) ccdf(d::Logistic, x::Real) = logistic(-zval(d, x)) @@ -99,7 +101,7 @@ end mgf(d::Logistic, t::Real) = exp(t * d.μ) / sinc(d.θ * t) function cgf(d::Logistic, t) μ, θ = params(d) - t*μ - log(sinc(θ*t)) + t * μ - log(sinc(θ * t)) end function cf(d::Logistic, t::Real) a = (π * t) * d.θ diff --git a/src/univariate/continuous/logitnormal.jl b/src/univariate/continuous/logitnormal.jl index cf0832d00c..4e74d4af36 100644 --- a/src/univariate/continuous/logitnormal.jl +++ b/src/univariate/continuous/logitnormal.jl @@ -58,14 +58,16 @@ struct LogitNormal{T<:Real} <: ContinuousUnivariateDistribution LogitNormal{T}(μ::T, σ::T) where {T} = new{T}(μ, σ) end -function LogitNormal(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function LogitNormal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args LogitNormal (σ, σ > zero(σ)) return LogitNormal{T}(μ, σ) end -LogitNormal(μ::Real, σ::Real; check_args::Bool=true) = LogitNormal(promote(μ, σ)...; check_args=check_args) -LogitNormal(μ::Integer, σ::Integer; check_args::Bool=true) = LogitNormal(float(μ), float(σ); check_args=check_args) -LogitNormal(μ::Real=0.0) = LogitNormal(μ, one(μ); check_args=false) +LogitNormal(μ::Real, σ::Real; check_args::Bool = true) = + LogitNormal(promote(μ, σ)...; check_args = check_args) +LogitNormal(μ::Integer, σ::Integer; check_args::Bool = true) = + LogitNormal(float(μ), float(σ); check_args = check_args) +LogitNormal(μ::Real = 0.0) = LogitNormal(μ, one(μ); check_args = false) # minimum and maximum not defined for logitnormal # but see https://github.com/JuliaStats/Distributions.jl/pull/457 @@ -74,8 +76,8 @@ LogitNormal(μ::Real=0.0) = LogitNormal(μ, one(μ); check_args=false) #### Conversions -convert(::Type{LogitNormal{T}}, μ::S, σ::S) where - {T <: Real, S <: Real} = LogitNormal(T(μ), T(σ)) +convert(::Type{LogitNormal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = + LogitNormal(T(μ), T(σ)) function Base.convert(::Type{LogitNormal{T}}, d::LogitNormal) where {T<:Real} LogitNormal{T}(T(d.μ), T(d.σ)) end @@ -112,15 +114,15 @@ end #### Evaluation #TODO check pd and logpdf -function pdf(d::LogitNormal{T}, x::Real) where T<:Real +function pdf(d::LogitNormal{T}, x::Real) where {T<:Real} if zero(x) < x < one(x) - return normpdf(d.μ, d.σ, logit(x)) / (x * (1-x)) + return normpdf(d.μ, d.σ, logit(x)) / (x * (1 - x)) else return T(0) end end -function logpdf(d::LogitNormal{T}, x::Real) where T<:Real +function logpdf(d::LogitNormal{T}, x::Real) where {T<:Real} if zero(x) < x < one(x) lx = logit(x) return normlogpdf(d.μ, d.σ, lx) - log(x) - log1p(-x) @@ -168,7 +170,7 @@ end ## Fitting -function fit_mle(::Type{<:LogitNormal}, x::AbstractArray{T}) where T<:Real +function fit_mle(::Type{<:LogitNormal}, x::AbstractArray{T}) where {T<:Real} lx = logit.(x) μ, σ = mean_and_std(lx) LogitNormal(μ, σ) diff --git a/src/univariate/continuous/lognormal.jl b/src/univariate/continuous/lognormal.jl index 70faceb736..ab92991937 100644 --- a/src/univariate/continuous/lognormal.jl +++ b/src/univariate/continuous/lognormal.jl @@ -32,20 +32,23 @@ struct LogNormal{T<:Real} <: ContinuousUnivariateDistribution LogNormal{T}(μ::T, σ::T) where {T} = new{T}(μ, σ) end -function LogNormal(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function LogNormal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args LogNormal (σ, σ ≥ zero(σ)) return LogNormal{T}(μ, σ) end -LogNormal(μ::Real, σ::Real; check_args::Bool=true) = LogNormal(promote(μ, σ)...; check_args=check_args) -LogNormal(μ::Integer, σ::Integer; check_args::Bool=true) = LogNormal(float(μ), float(σ); check_args=check_args) -LogNormal(μ::Real=0.0) = LogNormal(μ, one(μ); check_args=false) +LogNormal(μ::Real, σ::Real; check_args::Bool = true) = + LogNormal(promote(μ, σ)...; check_args = check_args) +LogNormal(μ::Integer, σ::Integer; check_args::Bool = true) = + LogNormal(float(μ), float(σ); check_args = check_args) +LogNormal(μ::Real = 0.0) = LogNormal(μ, one(μ); check_args = false) @distr_support LogNormal 0.0 Inf #### Conversions -convert(::Type{LogNormal{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = LogNormal(T(μ), T(σ)) -Base.convert(::Type{LogNormal{T}}, d::LogNormal) where {T<:Real} = LogNormal{T}(T(d.μ), T(d.σ)) +convert(::Type{LogNormal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = LogNormal(T(μ), T(σ)) +Base.convert(::Type{LogNormal{T}}, d::LogNormal) where {T<:Real} = + LogNormal{T}(T(d.μ), T(d.σ)) Base.convert(::Type{LogNormal{T}}, d::LogNormal{T}) where {T<:Real} = d #### Parameters @@ -59,9 +62,11 @@ meanlogx(d::LogNormal) = d.μ varlogx(d::LogNormal) = abs2(d.σ) stdlogx(d::LogNormal) = d.σ -mean(d::LogNormal) = ((μ, σ) = params(d); exp(μ + σ^2/2)) +mean(d::LogNormal) = ((μ, σ) = params(d); +exp(μ + σ^2 / 2)) median(d::LogNormal) = exp(d.μ) -mode(d::LogNormal) = ((μ, σ) = params(d); exp(μ - σ^2)) +mode(d::LogNormal) = ((μ, σ) = params(d); +exp(μ - σ^2)) partype(::LogNormal{T}) where {T<:Real} = T function var(d::LogNormal) @@ -82,12 +87,12 @@ function kurtosis(d::LogNormal) e2 = e * e e3 = e2 * e e4 = e3 * e - e4 + 2*e3 + 3*e2 - 6 + e4 + 2 * e3 + 3 * e2 - 6 end function entropy(d::LogNormal) (μ, σ) = params(d) - (1 + log(twoπ * σ^2))/2 + μ + (1 + log(twoπ * σ^2)) / 2 + μ end function kldivergence(p::LogNormal, q::LogNormal) @@ -127,7 +132,7 @@ end function ccdf(d::LogNormal, x::Real) logx = x ≤ zero(x) ? log(zero(x)) : log(x) return ccdf(Normal(d.μ, d.σ), logx) - end +end function logcdf(d::LogNormal, x::Real) logx = x ≤ zero(x) ? log(zero(x)) : log(x) @@ -168,7 +173,7 @@ end ## Fitting -function fit_mle(::Type{<:LogNormal}, x::AbstractArray{T}) where T<:Real +function fit_mle(::Type{<:LogNormal}, x::AbstractArray{T}) where {T<:Real} lx = log.(x) μ, σ = mean_and_std(lx) LogNormal(μ, σ) diff --git a/src/univariate/continuous/loguniform.jl b/src/univariate/continuous/loguniform.jl index a0ceb6bada..3581cf390f 100644 --- a/src/univariate/continuous/loguniform.jl +++ b/src/univariate/continuous/loguniform.jl @@ -13,17 +13,19 @@ External links struct LogUniform{T<:Real} <: ContinuousUnivariateDistribution a::T b::T - LogUniform{T}(a::T, b::T) where {T <: Real} = new{T}(a, b) + LogUniform{T}(a::T, b::T) where {T<:Real} = new{T}(a, b) end -function LogUniform(a::T, b::T; check_args::Bool=true) where {T <: Real} +function LogUniform(a::T, b::T; check_args::Bool = true) where {T<:Real} @check_args LogUniform (0 < a < b) LogUniform{T}(a, b) end -LogUniform(a::Real, b::Real; check_args::Bool=true) = LogUniform(promote(a, b)...; check_args=check_args) +LogUniform(a::Real, b::Real; check_args::Bool = true) = + LogUniform(promote(a, b)...; check_args = check_args) -Base.convert(::Type{LogUniform{T}}, d::LogUniform) where {T<:Real} = LogUniform{T}(T(d.a), T(d.b)) +Base.convert(::Type{LogUniform{T}}, d::LogUniform) where {T<:Real} = + LogUniform{T}(T(d.a), T(d.b)) Base.convert(::Type{LogUniform{T}}, d::LogUniform{T}) where {T<:Real} = d Base.minimum(d::LogUniform) = d.a @@ -37,18 +39,18 @@ partype(::LogUniform{T}) where {T<:Real} = T function mean(d::LogUniform) a, b = params(d) - (b - a) / log(b/a) + (b - a) / log(b / a) end function var(d::LogUniform) a, b = params(d) - log_ba = log(b/a) - (b^2 - a^2) / (2*log_ba) - ((b-a)/ log_ba)^2 + log_ba = log(b / a) + (b^2 - a^2) / (2 * log_ba) - ((b - a) / log_ba)^2 end -mode(d::LogUniform) = d.a -modes(d::LogUniform) = partype(d)[] +mode(d::LogUniform) = d.a +modes(d::LogUniform) = partype(d)[] function entropy(d::LogUniform) - a,b = params(d) + a, b = params(d) log(a * b) / 2 + log(log(b / a)) end #### Evaluation @@ -62,11 +64,11 @@ function cdf(d::LogUniform, x::Real) x1 = clamp(x1, a, b) return log(x1 / a) / log(b / a) end -logpdf(d::LogUniform, x::Real) = log(pdf(d,x)) +logpdf(d::LogUniform, x::Real) = log(pdf(d, x)) function quantile(d::LogUniform, p::Real) - p1,a,b = promote(p, params(d)...) # ensure e.g. quantile(LogUniform(1,2), 1f0)::Float32 - exp(p1 * log(b/a)) * a + p1, a, b = promote(p, params(d)...) # ensure e.g. quantile(LogUniform(1,2), 1f0)::Float32 + exp(p1 * log(b / a)) * a end function kldivergence(p::LogUniform, q::LogUniform) diff --git a/src/univariate/continuous/noncentralbeta.jl b/src/univariate/continuous/noncentralbeta.jl index 222963e472..5be09c8b07 100644 --- a/src/univariate/continuous/noncentralbeta.jl +++ b/src/univariate/continuous/noncentralbeta.jl @@ -10,13 +10,15 @@ struct NoncentralBeta{T<:Real} <: ContinuousUnivariateDistribution NoncentralBeta{T}(α::T, β::T, λ::T) where {T} = new{T}(α, β, λ) end -function NoncentralBeta(α::T, β::T, λ::T; check_args::Bool=true) where {T <: Real} +function NoncentralBeta(α::T, β::T, λ::T; check_args::Bool = true) where {T<:Real} @check_args NoncentralBeta (α, α > zero(α)) (β, β > zero(β)) (λ, λ >= zero(λ)) return NoncentralBeta{T}(α, β, λ) end -NoncentralBeta(α::Real, β::Real, λ::Real; check_args::Bool=true) = NoncentralBeta(promote(α, β, λ)...; check_args=check_args) -NoncentralBeta(α::Integer, β::Integer, λ::Integer; check_args::Bool=true) = NoncentralBeta(float(α), float(β), float(λ); check_args=check_args) +NoncentralBeta(α::Real, β::Real, λ::Real; check_args::Bool = true) = + NoncentralBeta(promote(α, β, λ)...; check_args = check_args) +NoncentralBeta(α::Integer, β::Integer, λ::Integer; check_args::Bool = true) = + NoncentralBeta(float(α), float(β), float(λ); check_args = check_args) @distr_support NoncentralBeta 0.0 1.0 diff --git a/src/univariate/continuous/noncentralchisq.jl b/src/univariate/continuous/noncentralchisq.jl index 24a7558b4a..3f51df4c2e 100644 --- a/src/univariate/continuous/noncentralchisq.jl +++ b/src/univariate/continuous/noncentralchisq.jl @@ -26,22 +26,24 @@ External links struct NoncentralChisq{T<:Real} <: ContinuousUnivariateDistribution ν::T λ::T - NoncentralChisq{T}(ν::T, λ::T) where {T <: Real} = new{T}(ν, λ) + NoncentralChisq{T}(ν::T, λ::T) where {T<:Real} = new{T}(ν, λ) end -function NoncentralChisq(ν::T, λ::T; check_args::Bool=true) where {T <: Real} +function NoncentralChisq(ν::T, λ::T; check_args::Bool = true) where {T<:Real} @check_args NoncentralChisq (ν, ν > zero(ν)) (λ, λ >= zero(λ)) return NoncentralChisq{T}(ν, λ) end -NoncentralChisq(ν::Real, λ::Real; check_args::Bool=true) = NoncentralChisq(promote(ν, λ)...; check_args=check_args) -NoncentralChisq(ν::Integer, λ::Integer; check_args::Bool=true) = NoncentralChisq(float(ν), float(λ); check_args=check_args) +NoncentralChisq(ν::Real, λ::Real; check_args::Bool = true) = + NoncentralChisq(promote(ν, λ)...; check_args = check_args) +NoncentralChisq(ν::Integer, λ::Integer; check_args::Bool = true) = + NoncentralChisq(float(ν), float(λ); check_args = check_args) @distr_support NoncentralChisq 0.0 Inf #### Conversions -function convert(::Type{NoncentralChisq{T}}, ν::S, λ::S) where {T <: Real, S <: Real} +function convert(::Type{NoncentralChisq{T}}, ν::S, λ::S) where {T<:Real,S<:Real} NoncentralChisq(T(ν), T(λ)) end function Base.convert(::Type{NoncentralChisq{T}}, d::NoncentralChisq) where {T<:Real} @@ -59,19 +61,19 @@ params(d::NoncentralChisq) = (d.ν, d.λ) mean(d::NoncentralChisq) = d.ν + d.λ var(d::NoncentralChisq) = 2(d.ν + 2d.λ) -skewness(d::NoncentralChisq) = 2sqrt2*(d.ν + 3d.λ)/sqrt(d.ν + 2d.λ)^3 -kurtosis(d::NoncentralChisq) = 12(d.ν + 4d.λ)/(d.ν + 2d.λ)^2 +skewness(d::NoncentralChisq) = 2sqrt2 * (d.ν + 3d.λ) / sqrt(d.ν + 2d.λ)^3 +kurtosis(d::NoncentralChisq) = 12(d.ν + 4d.λ) / (d.ν + 2d.λ)^2 function mgf(d::NoncentralChisq, t::Real) - exp(d.λ * t/(1 - 2t))*(1 - 2t)^(-d.ν/2) + exp(d.λ * t / (1 - 2t)) * (1 - 2t)^(-d.ν / 2) end function cgf(d::NoncentralChisq, t) ν, λ = params(d) - return λ*t/(1 - 2*t) + cgf(Chisq{typeof(ν)}(ν), t) + return λ * t / (1 - 2 * t) + cgf(Chisq{typeof(ν)}(ν), t) end function cf(d::NoncentralChisq, t::Real) - cis(d.λ * t/(1 - 2im*t))*(1 - 2im*t)^(-d.ν/2) + cis(d.λ * t / (1 - 2im * t)) * (1 - 2im * t)^(-d.ν / 2) end diff --git a/src/univariate/continuous/noncentralf.jl b/src/univariate/continuous/noncentralf.jl index 9a493f8d4e..2416a1504b 100644 --- a/src/univariate/continuous/noncentralf.jl +++ b/src/univariate/continuous/noncentralf.jl @@ -10,19 +10,21 @@ struct NoncentralF{T<:Real} <: ContinuousUnivariateDistribution NoncentralF{T}(ν1::T, ν2::T, λ::T) where {T} = new{T}(ν1, ν2, λ) end -function NoncentralF(ν1::T, ν2::T, λ::T; check_args::Bool=true) where {T <: Real} +function NoncentralF(ν1::T, ν2::T, λ::T; check_args::Bool = true) where {T<:Real} @check_args NoncentralF (ν1, ν1 > zero(ν1)) (ν2, ν2 > zero(ν2)) (λ, λ >= zero(λ)) return NoncentralF{T}(ν1, ν2, λ) end -NoncentralF(ν1::Real, ν2::Real, λ::Real; check_args::Bool=true) = NoncentralF(promote(ν1, ν2, λ)...; check_args=check_args) -NoncentralF(ν1::Integer, ν2::Integer, λ::Integer; check_args::Bool=true) = NoncentralF(float(ν1), float(ν2), float(λ); check_args=check_args) +NoncentralF(ν1::Real, ν2::Real, λ::Real; check_args::Bool = true) = + NoncentralF(promote(ν1, ν2, λ)...; check_args = check_args) +NoncentralF(ν1::Integer, ν2::Integer, λ::Integer; check_args::Bool = true) = + NoncentralF(float(ν1), float(ν2), float(λ); check_args = check_args) @distr_support NoncentralF 0.0 Inf #### Conversions -function convert(::Type{NoncentralF{T}}, ν1::S, ν2::S, λ::S) where {T <: Real, S <: Real} +function convert(::Type{NoncentralF{T}}, ν1::S, ν2::S, λ::S) where {T<:Real,S<:Real} NoncentralF(T(ν1), T(ν2), T(λ)) end function Base.convert(::Type{NoncentralF{T}}, d::NoncentralF) where {T<:Real} @@ -41,9 +43,10 @@ function mean(d::NoncentralF{T}) where {T<:Real} d.ν2 > 2 ? d.ν2 / (d.ν2 - 2) * (d.ν1 + d.λ) / d.ν1 : T(NaN) end -var(d::NoncentralF{T}) where {T<:Real} = d.ν2 > 4 ? 2d.ν2^2 * - ((d.ν1 + d.λ)^2 + (d.ν2 - 2)*(d.ν1 + 2d.λ)) / - (d.ν1 * (d.ν2 - 2)^2 * (d.ν2 - 4)) : T(NaN) +var(d::NoncentralF{T}) where {T<:Real} = + d.ν2 > 4 ? + 2d.ν2^2 * ((d.ν1 + d.λ)^2 + (d.ν2 - 2) * (d.ν1 + 2d.λ)) / + (d.ν1 * (d.ν2 - 2)^2 * (d.ν2 - 4)) : T(NaN) ### Evaluation & Sampling @@ -51,7 +54,7 @@ var(d::NoncentralF{T}) where {T<:Real} = d.ν2 > 4 ? 2d.ν2^2 * @_delegate_statsfuns NoncentralF nfdist ν1 ν2 λ function rand(rng::AbstractRNG, d::NoncentralF) - r1 = rand(rng, NoncentralChisq(d.ν1,d.λ)) / d.ν1 + r1 = rand(rng, NoncentralChisq(d.ν1, d.λ)) / d.ν1 r2 = rand(rng, Chisq(d.ν2)) / d.ν2 r1 / r2 end @@ -59,7 +62,7 @@ end # TODO: remove RFunctions dependency once NoncentralChisq has its removed @rand_rdist(NoncentralF) function rand(d::NoncentralF) - r1 = rand(NoncentralChisq(d.ν1,d.λ)) / d.ν1 + r1 = rand(NoncentralChisq(d.ν1, d.λ)) / d.ν1 r2 = rand(Chisq(d.ν2)) / d.ν2 r1 / r2 end diff --git a/src/univariate/continuous/noncentralt.jl b/src/univariate/continuous/noncentralt.jl index dfcdb6e3ca..5b5d229e04 100644 --- a/src/univariate/continuous/noncentralt.jl +++ b/src/univariate/continuous/noncentralt.jl @@ -9,19 +9,23 @@ struct NoncentralT{T<:Real} <: ContinuousUnivariateDistribution NoncentralT{T}(ν::T, λ::T) where {T} = new{T}(ν, λ) end -function NoncentralT(ν::T, λ::T; check_args::Bool=true) where {T <: Real} +function NoncentralT(ν::T, λ::T; check_args::Bool = true) where {T<:Real} @check_args NoncentralT (ν, ν > zero(ν)) return NoncentralT{T}(ν, λ) end -NoncentralT(ν::Real, λ::Real; check_args::Bool=true) = NoncentralT(promote(ν, λ)...; check_args=check_args) -NoncentralT(ν::Integer, λ::Integer; check_args::Bool=true) = NoncentralT(float(ν), float(λ); check_args=check_args) +NoncentralT(ν::Real, λ::Real; check_args::Bool = true) = + NoncentralT(promote(ν, λ)...; check_args = check_args) +NoncentralT(ν::Integer, λ::Integer; check_args::Bool = true) = + NoncentralT(float(ν), float(λ); check_args = check_args) @distr_support NoncentralT -Inf Inf ### Conversions -convert(::Type{NoncentralT{T}}, ν::S, λ::S) where {T <: Real, S <: Real} = NoncentralT(T(ν), T(λ)) -Base.convert(::Type{NoncentralT{T}}, d::NoncentralT) where {T<:Real} = NoncentralT(T(d.ν), T(d.λ)) +convert(::Type{NoncentralT{T}}, ν::S, λ::S) where {T<:Real,S<:Real} = + NoncentralT(T(ν), T(λ)) +Base.convert(::Type{NoncentralT{T}}, d::NoncentralT) where {T<:Real} = + NoncentralT(T(d.ν), T(d.λ)) Base.convert(::Type{NoncentralT{T}}, d::NoncentralT{T}) where {T<:Real} = d ### Parameters @@ -32,17 +36,16 @@ partype(::NoncentralT{T}) where {T} = T ### Statistics -function mean(d::NoncentralT{T}) where T<:Real +function mean(d::NoncentralT{T}) where {T<:Real} if d.ν > 1 - isinf(d.ν) ? d.λ : - sqrt(d.ν/2) * d.λ * gamma((d.ν - 1)/2) / gamma(d.ν/2) + isinf(d.ν) ? d.λ : sqrt(d.ν / 2) * d.λ * gamma((d.ν - 1) / 2) / gamma(d.ν / 2) else T(NaN) end end -function var(d::NoncentralT{T}) where T<:Real - d.ν > 2 ? d.ν*(1 + d.λ^2) / (d.ν - 2) - mean(d)^2 : T(NaN) +function var(d::NoncentralT{T}) where {T<:Real} + d.ν > 2 ? d.ν * (1 + d.λ^2) / (d.ν - 2) - mean(d)^2 : T(NaN) end ### Evaluation & Sampling @@ -54,5 +57,5 @@ function rand(rng::AbstractRNG, d::NoncentralT) ν = d.ν z = randn(rng) v = rand(rng, Chisq(ν)) - (z+d.λ)/sqrt(v/ν) + (z + d.λ) / sqrt(v / ν) end diff --git a/src/univariate/continuous/normal.jl b/src/univariate/continuous/normal.jl index ef2b77bb58..ed20075389 100644 --- a/src/univariate/continuous/normal.jl +++ b/src/univariate/continuous/normal.jl @@ -33,20 +33,22 @@ struct Normal{T<:Real} <: ContinuousUnivariateDistribution Normal{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) end -function Normal(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function Normal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Normal (σ, σ >= zero(σ)) return Normal{T}(μ, σ) end #### Outer constructors -Normal(μ::Real, σ::Real; check_args::Bool=true) = Normal(promote(μ, σ)...; check_args=check_args) -Normal(μ::Integer, σ::Integer; check_args::Bool=true) = Normal(float(μ), float(σ); check_args=check_args) -Normal(μ::Real=0.0) = Normal(μ, one(μ); check_args=false) +Normal(μ::Real, σ::Real; check_args::Bool = true) = + Normal(promote(μ, σ)...; check_args = check_args) +Normal(μ::Integer, σ::Integer; check_args::Bool = true) = + Normal(float(μ), float(σ); check_args = check_args) +Normal(μ::Real = 0.0) = Normal(μ, one(μ); check_args = false) const Gaussian = Normal # #### Conversions -convert(::Type{Normal{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = Normal(T(μ), T(σ)) +convert(::Type{Normal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = Normal(T(μ), T(σ)) Base.convert(::Type{Normal{T}}, d::Normal) where {T<:Real} = Normal{T}(T(d.μ), T(d.σ)) Base.convert(::Type{Normal{T}}, d::Normal{T}) where {T<:Real} = d @@ -73,7 +75,7 @@ std(d::Normal) = d.σ skewness(d::Normal{T}) where {T<:Real} = zero(T) kurtosis(d::Normal{T}) where {T<:Real} = zero(T) -entropy(d::Normal) = (log2π + 1)/2 + log(d.σ) +entropy(d::Normal) = (log2π + 1) / 2 + log(d.σ) function kldivergence(p::Normal, q::Normal) μp = mean(p) @@ -102,8 +104,8 @@ gradlogpdf(d::Normal, x::Real) = (d.μ - x) / d.σ^2 mgf(d::Normal, t::Real) = exp(t * d.μ + d.σ^2 / 2 * t^2) function cgf(d::Normal, t) - μ,σ = params(d) - t*μ + (σ*t)^2/2 + μ, σ = params(d) + t * μ + (σ * t)^2 / 2 end cf(d::Normal, t::Real) = exp(im * t * d.μ - d.σ^2 / 2 * t^2) @@ -132,7 +134,7 @@ struct NormalStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where T<:Real +function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where {T<:Real} n = length(x) # compute s @@ -151,7 +153,11 @@ function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where T<:Real NormalStats(s, m, s2, n) end -function suffstats(::Type{<:Normal}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Real +function suffstats( + ::Type{<:Normal}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Real} n = length(x) # compute s @@ -185,7 +191,7 @@ struct NormalKnownMuStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where T<:Real +function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where {T<:Real} μ = g.μ s2 = zero(T) + zero(μ) for i in eachindex(x) @@ -194,7 +200,11 @@ function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where T<:Real NormalKnownMuStats(g.μ, s2, length(x)) end -function suffstats(g::NormalKnownMu, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Real +function suffstats( + g::NormalKnownMu, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Real} μ = g.μ s2 = 0.0 * abs2(zero(T) - zero(μ)) tw = 0.0 @@ -221,11 +231,15 @@ struct NormalKnownSigmaStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(g::NormalKnownSigma, x::AbstractArray{T}) where T<:Real +function suffstats(g::NormalKnownSigma, x::AbstractArray{T}) where {T<:Real} NormalKnownSigmaStats(g.σ, sum(x), Float64(length(x))) end -function suffstats(g::NormalKnownSigma, x::AbstractArray{T}, w::AbstractArray{T}) where T<:Real +function suffstats( + g::NormalKnownSigma, + x::AbstractArray{T}, + w::AbstractArray{T}, +) where {T<:Real} NormalKnownSigmaStats(g.σ, dot(x, w), sum(w)) end @@ -237,7 +251,12 @@ fit_mle(g::NormalKnownSigma, ss::NormalKnownSigmaStats) = Normal(ss.sx / ss.tw, # generic fit_mle methods -function fit_mle(::Type{<:Normal}, x::AbstractArray{T}; mu::Float64=NaN, sigma::Float64=NaN) where T<:Real +function fit_mle( + ::Type{<:Normal}, + x::AbstractArray{T}; + mu::Float64 = NaN, + sigma::Float64 = NaN, +) where {T<:Real} if isnan(mu) if isnan(sigma) fit_mle(Normal, suffstats(Normal, x)) @@ -255,7 +274,13 @@ function fit_mle(::Type{<:Normal}, x::AbstractArray{T}; mu::Float64=NaN, sigma:: end end -function fit_mle(::Type{<:Normal}, x::AbstractArray{T}, w::AbstractArray{Float64}; mu::Float64=NaN, sigma::Float64=NaN) where T<:Real +function fit_mle( + ::Type{<:Normal}, + x::AbstractArray{T}, + w::AbstractArray{Float64}; + mu::Float64 = NaN, + sigma::Float64 = NaN, +) where {T<:Real} if isnan(mu) if isnan(sigma) fit_mle(Normal, suffstats(Normal, x, w)) diff --git a/src/univariate/continuous/normalcanon.jl b/src/univariate/continuous/normalcanon.jl index 89d45a4fa6..5d45f62170 100644 --- a/src/univariate/continuous/normalcanon.jl +++ b/src/univariate/continuous/normalcanon.jl @@ -11,28 +11,34 @@ struct NormalCanon{T<:Real} <: ContinuousUnivariateDistribution λ::T # σ^(-2) μ::T # μ - function NormalCanon{T}(η, λ; check_args::Bool=true) where T + function NormalCanon{T}(η, λ; check_args::Bool = true) where {T} @check_args NormalCanon (λ, λ > zero(λ)) new{T}(η, λ, η / λ) end end -NormalCanon(η::T, λ::T; check_args::Bool=true) where {T<:Real} = NormalCanon{typeof(η/λ)}(η, λ; check_args=check_args) -NormalCanon(η::Real, λ::Real; check_args::Bool=true) = NormalCanon(promote(η, λ)...; check_args=check_args) -NormalCanon(η::Integer, λ::Integer; check_args::Bool=true) = NormalCanon(float(η), float(λ); check_args=check_args) -NormalCanon() = NormalCanon{Float64}(0.0, 1.0; check_args=false) +NormalCanon(η::T, λ::T; check_args::Bool = true) where {T<:Real} = + NormalCanon{typeof(η / λ)}(η, λ; check_args = check_args) +NormalCanon(η::Real, λ::Real; check_args::Bool = true) = + NormalCanon(promote(η, λ)...; check_args = check_args) +NormalCanon(η::Integer, λ::Integer; check_args::Bool = true) = + NormalCanon(float(η), float(λ); check_args = check_args) +NormalCanon() = NormalCanon{Float64}(0.0, 1.0; check_args = false) @distr_support NormalCanon -Inf Inf #### Type Conversions -convert(::Type{NormalCanon{T}}, η::S, λ::S) where {T <: Real, S <: Real} = NormalCanon(T(η), T(λ)) -Base.convert(::Type{NormalCanon{T}}, d::NormalCanon) where {T<:Real} = NormalCanon{T}(T(d.η), T(d.λ); check_args=false) +convert(::Type{NormalCanon{T}}, η::S, λ::S) where {T<:Real,S<:Real} = + NormalCanon(T(η), T(λ)) +Base.convert(::Type{NormalCanon{T}}, d::NormalCanon) where {T<:Real} = + NormalCanon{T}(T(d.η), T(d.λ); check_args = false) Base.convert(::Type{NormalCanon{T}}, d::NormalCanon{T}) where {T<:Real} = d ## conversion between Normal and NormalCanon convert(::Type{Normal}, d::NormalCanon) = Normal(d.μ, 1 / sqrt(d.λ)) -convert(::Type{NormalCanon}, d::Normal) = (λ = 1 / d.σ^2; NormalCanon(λ * d.μ, λ)) +convert(::Type{NormalCanon}, d::Normal) = (λ = 1 / d.σ^2; +NormalCanon(λ * d.μ, λ)) meanform(d::NormalCanon) = convert(Normal, d) canonform(d::Normal) = convert(NormalCanon, d) @@ -68,16 +74,16 @@ end #### Evaluation -pdf(d::NormalCanon, x::Real) = (sqrt(d.λ) / sqrt2π) * exp(-d.λ * abs2(x - d.μ)/2) -logpdf(d::NormalCanon, x::Real) = (log(d.λ) - log2π - d.λ * abs2(x - d.μ))/2 +pdf(d::NormalCanon, x::Real) = (sqrt(d.λ) / sqrt2π) * exp(-d.λ * abs2(x - d.μ) / 2) +logpdf(d::NormalCanon, x::Real) = (log(d.λ) - log2π - d.λ * abs2(x - d.μ)) / 2 zval(d::NormalCanon, x::Real) = (x - d.μ) * sqrt(d.λ) xval(d::NormalCanon, z::Real) = d.μ + z / sqrt(d.λ) -cdf(d::NormalCanon, x::Real) = normcdf(zval(d,x)) -ccdf(d::NormalCanon, x::Real) = normccdf(zval(d,x)) -logcdf(d::NormalCanon, x::Real) = normlogcdf(zval(d,x)) -logccdf(d::NormalCanon, x::Real) = normlogccdf(zval(d,x)) +cdf(d::NormalCanon, x::Real) = normcdf(zval(d, x)) +ccdf(d::NormalCanon, x::Real) = normccdf(zval(d, x)) +logcdf(d::NormalCanon, x::Real) = normlogcdf(zval(d, x)) +logccdf(d::NormalCanon, x::Real) = normlogccdf(zval(d, x)) quantile(d::NormalCanon, p::Real) = xval(d, norminvcdf(p)) cquantile(d::NormalCanon, p::Real) = xval(d, norminvccdf(p)) diff --git a/src/univariate/continuous/normalinversegaussian.jl b/src/univariate/continuous/normalinversegaussian.jl index 4e2b89decb..7ef40dff9a 100644 --- a/src/univariate/continuous/normalinversegaussian.jl +++ b/src/univariate/continuous/normalinversegaussian.jl @@ -20,15 +20,17 @@ struct NormalInverseGaussian{T<:Real} <: ContinuousUnivariateDistribution β::T δ::T γ::T - function NormalInverseGaussian{T}(μ::T, α::T, β::T, δ::T) where T + function NormalInverseGaussian{T}(μ::T, α::T, β::T, δ::T) where {T} γ = sqrt(α^2 - β^2) new{T}(μ, α, β, δ, γ) end end -NormalInverseGaussian(μ::T, α::T, β::T, δ::T) where {T<:Real} = NormalInverseGaussian{T}(μ, α, β, δ) -NormalInverseGaussian(μ::Real, α::Real, β::Real, δ::Real) = NormalInverseGaussian(promote(μ, α, β, δ)...) +NormalInverseGaussian(μ::T, α::T, β::T, δ::T) where {T<:Real} = + NormalInverseGaussian{T}(μ, α, β, δ) +NormalInverseGaussian(μ::Real, α::Real, β::Real, δ::Real) = + NormalInverseGaussian(promote(μ, α, β, δ)...) function NormalInverseGaussian(μ::Integer, α::Integer, β::Integer, δ::Integer) return NormalInverseGaussian(float(μ), float(α), float(β), float(δ)) end @@ -36,13 +38,25 @@ end @distr_support NormalInverseGaussian -Inf Inf #### Conversions -function convert(::Type{NormalInverseGaussian{T}}, μ::Real, α::Real, β::Real, δ::Real) where T<:Real +function convert( + ::Type{NormalInverseGaussian{T}}, + μ::Real, + α::Real, + β::Real, + δ::Real, +) where {T<:Real} NormalInverseGaussian(T(μ), T(α), T(β), T(δ)) end -function Base.convert(::Type{NormalInverseGaussian{T}}, d::NormalInverseGaussian) where {T<:Real} +function Base.convert( + ::Type{NormalInverseGaussian{T}}, + d::NormalInverseGaussian, +) where {T<:Real} NormalInverseGaussian{T}(T(d.μ), T(d.α), T(d.β), T(d.δ)) end -Base.convert(::Type{NormalInverseGaussian{T}}, d::NormalInverseGaussian{T}) where {T<:Real} = d +Base.convert( + ::Type{NormalInverseGaussian{T}}, + d::NormalInverseGaussian{T}, +) where {T<:Real} = d params(d::NormalInverseGaussian) = (d.μ, d.α, d.β, d.δ) @inline partype(d::NormalInverseGaussian{T}) where {T<:Real} = T @@ -50,16 +64,20 @@ params(d::NormalInverseGaussian) = (d.μ, d.α, d.β, d.δ) mean(d::NormalInverseGaussian) = d.μ + d.δ * d.β / d.γ var(d::NormalInverseGaussian) = d.δ * d.α^2 / d.γ^3 skewness(d::NormalInverseGaussian) = 3d.β / (d.α * sqrt(d.δ * d.γ)) -kurtosis(d::NormalInverseGaussian) = 3 * (1 + 4*d.β^2/d.α^2) / (d.δ * d.γ) +kurtosis(d::NormalInverseGaussian) = 3 * (1 + 4 * d.β^2 / d.α^2) / (d.δ * d.γ) function pdf(d::NormalInverseGaussian, x::Real) μ, α, β, δ = params(d) - α * δ * besselk(1, α*sqrt(δ^2+(x - μ)^2)) / (π*sqrt(δ^2 + (x - μ)^2)) * exp(δ * d.γ + β*(x - μ)) + α * δ * besselk(1, α * sqrt(δ^2 + (x - μ)^2)) / (π * sqrt(δ^2 + (x - μ)^2)) * + exp(δ * d.γ + β * (x - μ)) end function logpdf(d::NormalInverseGaussian, x::Real) μ, α, β, δ = params(d) - log(α*δ) + log(besselk(1, α*sqrt(δ^2+(x-μ)^2))) - log(π*sqrt(δ^2+(x-μ)^2)) + δ*d.γ + β*(x-μ) + log(α * δ) + log(besselk(1, α * sqrt(δ^2 + (x - μ)^2))) - + log(π * sqrt(δ^2 + (x - μ)^2)) + + δ * d.γ + + β * (x - μ) end @@ -76,8 +94,8 @@ end function rand(rng::Random.AbstractRNG, d::NormalInverseGaussian) μ, α, β, δ = params(d) - Z = InverseGaussian(δ/d.γ, δ^2) + Z = InverseGaussian(δ / d.γ, δ^2) z = rand(rng, Z) - X = Normal(μ + β*z, sqrt(z)) + X = Normal(μ + β * z, sqrt(z)) return rand(rng, X) end diff --git a/src/univariate/continuous/pareto.jl b/src/univariate/continuous/pareto.jl index 249fa29eb7..1af251dd61 100644 --- a/src/univariate/continuous/pareto.jl +++ b/src/univariate/continuous/pareto.jl @@ -27,14 +27,16 @@ struct Pareto{T<:Real} <: ContinuousUnivariateDistribution Pareto{T}(α::T, θ::T) where {T} = new{T}(α, θ) end -function Pareto(α::T, θ::T; check_args::Bool=true) where {T <: Real} +function Pareto(α::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Pareto (α, α > zero(α)) (θ, θ > zero(θ)) return Pareto{T}(α, θ) end -Pareto(α::Real, θ::Real; check_args::Bool=true) = Pareto(promote(α, θ)...; check_args=check_args) -Pareto(α::Integer, θ::Integer; check_args::Bool=true) = Pareto(float(α), float(θ); check_args=check_args) -Pareto(α::Real; check_args::Bool=true) = Pareto(α, one(α); check_args=check_args) +Pareto(α::Real, θ::Real; check_args::Bool = true) = + Pareto(promote(α, θ)...; check_args = check_args) +Pareto(α::Integer, θ::Integer; check_args::Bool = true) = + Pareto(float(α), float(θ); check_args = check_args) +Pareto(α::Real; check_args::Bool = true) = Pareto(α, one(α); check_args = check_args) Pareto() = Pareto{Float64}(1.0, 1.0) @distr_support Pareto d.θ Inf @@ -55,39 +57,41 @@ params(d::Pareto) = (d.α, d.θ) #### Statistics -function mean(d::Pareto{T}) where T<:Real +function mean(d::Pareto{T}) where {T<:Real} (α, θ) = params(d) α > 1 ? α * θ / (α - 1) : T(Inf) end -median(d::Pareto) = ((α, θ) = params(d); θ * 2^(1/α)) +median(d::Pareto) = ((α, θ) = params(d); +θ * 2^(1 / α)) mode(d::Pareto) = d.θ -function var(d::Pareto{T}) where T<:Real +function var(d::Pareto{T}) where {T<:Real} (α, θ) = params(d) α > 2 ? (θ^2 * α) / ((α - 1)^2 * (α - 2)) : T(Inf) end -function skewness(d::Pareto{T}) where T<:Real +function skewness(d::Pareto{T}) where {T<:Real} α = shape(d) α > 3 ? ((2(1 + α)) / (α - 3)) * sqrt((α - 2) / α) : T(NaN) end -function kurtosis(d::Pareto{T}) where T<:Real +function kurtosis(d::Pareto{T}) where {T<:Real} α = shape(d) α > 4 ? (6(α^3 + α^2 - 6α - 2)) / (α * (α - 3) * (α - 4)) : T(NaN) end -entropy(d::Pareto) = ((α, θ) = params(d); log(θ / α) + 1 / α + 1) +entropy(d::Pareto) = ((α, θ) = params(d); +log(θ / α) + 1 / α + 1) #### Evaluation -function pdf(d::Pareto{T}, x::Real) where T<:Real +function pdf(d::Pareto{T}, x::Real) where {T<:Real} (α, θ) = params(d) - x >= θ ? α * (θ / x)^α * (1/x) : zero(T) + x >= θ ? α * (θ / x)^α * (1 / x) : zero(T) end -function logpdf(d::Pareto{T}, x::Real) where T<:Real +function logpdf(d::Pareto{T}, x::Real) where {T<:Real} (α, θ) = params(d) x >= θ ? log(α) + α * log(θ) - (α + 1) * log(x) : -T(Inf) end @@ -121,7 +125,7 @@ end ## Fitting -function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where T<:Real +function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where {T<:Real} # Based on # https://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation @@ -130,10 +134,10 @@ function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where T<:Real n = length(x) lθ = log(θ) temp1 = zero(T) - for i=1:n + for i = 1:n temp1 += log(x[i]) - lθ end - α = n/temp1 + α = n / temp1 return Pareto(α, θ) end diff --git a/src/univariate/continuous/pgeneralizedgaussian.jl b/src/univariate/continuous/pgeneralizedgaussian.jl index e4e7b3c926..0faa11d25d 100644 --- a/src/univariate/continuous/pgeneralizedgaussian.jl +++ b/src/univariate/continuous/pgeneralizedgaussian.jl @@ -35,12 +35,12 @@ struct PGeneralizedGaussian{T<:Real} <: ContinuousUnivariateDistribution PGeneralizedGaussian{T}(μ::T, α::T, p::T) where {T<:Real} = new{T}(µ, α, p) end -function PGeneralizedGaussian(μ::T, α::T, p::T; check_args::Bool=true) where {T<:Real} +function PGeneralizedGaussian(μ::T, α::T, p::T; check_args::Bool = true) where {T<:Real} @check_args PGeneralizedGaussian (α, α > zero(α)) (p, p > zero(p)) return PGeneralizedGaussian{T}(μ, α, p) end -function PGeneralizedGaussian(μ::Real, α::Real, p::Real; check_args::Bool=true) - return PGeneralizedGaussian(promote(μ, α, p)...; check_args=check_args) +function PGeneralizedGaussian(μ::Real, α::Real, p::Real; check_args::Bool = true) + return PGeneralizedGaussian(promote(μ, α, p)...; check_args = check_args) end """ @@ -48,7 +48,7 @@ end Build a p-generalized Gaussian with `μ=0.0, α=1.0` """ -function PGeneralizedGaussian(p::Real; check_args::Bool=true) +function PGeneralizedGaussian(p::Real; check_args::Bool = true) @check_args PGeneralizedGaussian (p, p > zero(p)) return PGeneralizedGaussian{typeof(p)}(zero(p), oftype(p, 1), p) end @@ -63,10 +63,14 @@ PGeneralizedGaussian() = PGeneralizedGaussian{Float64}(0.0, √2, 2.0) # approxi #### Conversions -function Base.convert(::Type{PGeneralizedGaussian{T}}, d::PGeneralizedGaussian) where {T<:Real} +function Base.convert( + ::Type{PGeneralizedGaussian{T}}, + d::PGeneralizedGaussian, +) where {T<:Real} return PGeneralizedGaussian{T}(T(d.μ), T(d.α), T(d.p)) end -Base.convert(::Type{PGeneralizedGaussian{T}}, d::PGeneralizedGaussian{T}) where {T<:Real} = d +Base.convert(::Type{PGeneralizedGaussian{T}}, d::PGeneralizedGaussian{T}) where {T<:Real} = + d @distr_support PGeneralizedGaussian -Inf Inf @@ -97,7 +101,7 @@ entropy(d::PGeneralizedGaussian) = 1 / d.p - log(d.p / (2 * d.α * gamma(1 / d.p function pdf(d::PGeneralizedGaussian, x::Real) μ, α, p = params(d) - return (p / (2 * α * gamma(1 / p))) * exp(- (abs(x - μ) / α)^p) + return (p / (2 * α * gamma(1 / p))) * exp(-(abs(x - μ) / α)^p) end function logpdf(d::PGeneralizedGaussian, x::Real) μ, α, p = params(d) diff --git a/src/univariate/continuous/rayleigh.jl b/src/univariate/continuous/rayleigh.jl index 9b475bf125..df84e29778 100644 --- a/src/univariate/continuous/rayleigh.jl +++ b/src/univariate/continuous/rayleigh.jl @@ -28,19 +28,19 @@ struct Rayleigh{T<:Real} <: ContinuousUnivariateDistribution Rayleigh{T}(σ::T) where {T<:Real} = new{T}(σ) end -function Rayleigh(σ::Real; check_args::Bool=true) +function Rayleigh(σ::Real; check_args::Bool = true) @check_args Rayleigh (σ, σ > zero(σ)) return Rayleigh{typeof(σ)}(σ) end -Rayleigh(σ::Integer; check_args::Bool=true) = Rayleigh(float(σ); check_args=check_args) +Rayleigh(σ::Integer; check_args::Bool = true) = Rayleigh(float(σ); check_args = check_args) Rayleigh() = Rayleigh{Float64}(1.0) @distr_support Rayleigh 0.0 Inf #### Conversions -convert(::Type{Rayleigh{T}}, σ::S) where {T <: Real, S <: Real} = Rayleigh(T(σ)) +convert(::Type{Rayleigh{T}}, σ::S) where {T<:Real,S<:Real} = Rayleigh(T(σ)) Base.convert(::Type{Rayleigh{T}}, d::Rayleigh) where {T<:Real} = Rayleigh{T}(T(d.σ)) Base.convert(::Type{Rayleigh{T}}, d::Rayleigh{T}) where {T<:Real} = d @@ -57,29 +57,30 @@ mean(d::Rayleigh) = sqrthalfπ * d.σ median(d::Rayleigh{T}) where {T<:Real} = sqrt2 * sqrt(T(logtwo)) * d.σ # sqrt(log(4)) mode(d::Rayleigh) = d.σ -var(d::Rayleigh{T}) where {T<:Real} = (2 - T(π)/2) * d.σ^2 -std(d::Rayleigh{T}) where {T<:Real} = sqrt(2 - T(π)/2) * d.σ +var(d::Rayleigh{T}) where {T<:Real} = (2 - T(π) / 2) * d.σ^2 +std(d::Rayleigh{T}) where {T<:Real} = sqrt(2 - T(π) / 2) * d.σ -skewness(d::Rayleigh{T}) where {T<:Real} = 2 * sqrtπ * (T(π) - 3)/(4 - T(π))^(3/2) -kurtosis(d::Rayleigh{T}) where {T<:Real} = -(6*T(π)^2 - 24*T(π) +16)/(4 - T(π))^2 +skewness(d::Rayleigh{T}) where {T<:Real} = 2 * sqrtπ * (T(π) - 3) / (4 - T(π))^(3 / 2) +kurtosis(d::Rayleigh{T}) where {T<:Real} = -(6 * T(π)^2 - 24 * T(π) + 16) / (4 - T(π))^2 -entropy(d::Rayleigh{T}) where {T<:Real} = 1 - T(logtwo)/2 + T(MathConstants.γ)/2 + log(d.σ) +entropy(d::Rayleigh{T}) where {T<:Real} = + 1 - T(logtwo) / 2 + T(MathConstants.γ) / 2 + log(d.σ) #### Evaluation -function pdf(d::Rayleigh{T}, x::Real) where T<:Real +function pdf(d::Rayleigh{T}, x::Real) where {T<:Real} σ2 = d.σ^2 - x > 0 ? (x / σ2) * exp(- (x^2) / (2σ2)) : zero(T) + x > 0 ? (x / σ2) * exp(-(x^2) / (2σ2)) : zero(T) end -function logpdf(d::Rayleigh{T}, x::Real) where T<:Real +function logpdf(d::Rayleigh{T}, x::Real) where {T<:Real} σ2 = d.σ^2 x > 0 ? log(x / σ2) - (x^2) / (2σ2) : -T(Inf) end function logccdf(d::Rayleigh, x::Real) - z = - x^2 / (2 * d.σ^2) + z = -x^2 / (2 * d.σ^2) # return negative zero so that cdf is +0, not -0 return x > 0 ? z : isnan(x) ? oftype(z, NaN) : -zero(z) end @@ -105,6 +106,6 @@ function fit_mle(::Type{<:Rayleigh}, x::AbstractArray{T}) where {T<:Real} s2 += xi^2 end - s2 /= (2*length(x)) + s2 /= (2 * length(x)) return Rayleigh(sqrt(s2)) end diff --git a/src/univariate/continuous/rician.jl b/src/univariate/continuous/rician.jl index f5c354f705..f79abd872f 100644 --- a/src/univariate/continuous/rician.jl +++ b/src/univariate/continuous/rician.jl @@ -33,20 +33,22 @@ struct Rician{T<:Real} <: ContinuousUnivariateDistribution Rician{T}(ν, σ) where {T} = new{T}(ν, σ) end -function Rician(ν::T, σ::T; check_args::Bool=true) where {T<:Real} +function Rician(ν::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Rician (ν, ν ≥ zero(ν)) (σ, σ ≥ zero(σ)) return Rician{T}(ν, σ) end Rician() = Rician{Float64}(0.0, 1.0) -Rician(ν::Real, σ::Real; check_args::Bool=true) = Rician(promote(ν, σ)...; check_args=check_args) -Rician(ν::Integer, σ::Integer; check_args::Bool=true) = Rician(float(ν), float(σ); check_args=check_args) +Rician(ν::Real, σ::Real; check_args::Bool = true) = + Rician(promote(ν, σ)...; check_args = check_args) +Rician(ν::Integer, σ::Integer; check_args::Bool = true) = + Rician(float(ν), float(σ); check_args = check_args) @distr_support Rician 0.0 Inf #### Conversions -function convert(::Type{Rician{T}}, ν::Real, σ::Real) where T<:Real +function convert(::Type{Rician{T}}, ν::Real, σ::Real) where {T<:Real} Rician(T(ν), T(σ)) end @@ -64,10 +66,11 @@ partype(d::Rician{T}) where {T<:Real} = T #### Statistics # helper -_Lhalf(x) = exp(x/2) * ((1-x) * besseli(zero(x), -x/2) - x * besseli(oneunit(x), -x/2)) +_Lhalf(x) = + exp(x / 2) * ((1 - x) * besseli(zero(x), -x / 2) - x * besseli(oneunit(x), -x / 2)) -mean(d::Rician) = d.σ * sqrthalfπ * _Lhalf(-d.ν^2/(2 * d.σ^2)) -var(d::Rician) = 2 * d.σ^2 + d.ν^2 - halfπ * d.σ^2 * _Lhalf(-d.ν^2/(2 * d.σ^2))^2 +mean(d::Rician) = d.σ * sqrthalfπ * _Lhalf(-d.ν^2 / (2 * d.σ^2)) +var(d::Rician) = 2 * d.σ^2 + d.ν^2 - halfπ * d.σ^2 * _Lhalf(-d.ν^2 / (2 * d.σ^2))^2 function mode(d::Rician) m = mean(d) @@ -75,7 +78,7 @@ function mode(d::Rician) end # helper: 1D minimization using Golden-section search -function _minimize_gss(f, a, b; tol=1e-12) +function _minimize_gss(f, a, b; tol = 1e-12) ϕ = (√5 + 1) / 2 c = b - (b - a) / ϕ d = a + (b - a) / ϕ @@ -138,18 +141,22 @@ end #### Fitting # implementation based on the Koay inversion technique -function fit(::Type{<:Rician}, x::AbstractArray{T}; tol=1e-12, maxiters=500) where T +function fit(::Type{<:Rician}, x::AbstractArray{T}; tol = 1e-12, maxiters = 500) where {T} μ₁ = mean(x) μ₂ = var(x) r = μ₁ / √μ₂ - if r < sqrt(π/(4-π)) + if r < sqrt(π / (4 - π)) ν = zero(float(T)) σ = scale(fit(Rayleigh, x)) else - ξ(θ) = 2 + θ^2 - π/8 * exp(-θ^2 / 2) * ((2 + θ^2) * besseli(0, θ^2 / 4) + θ^2 * besseli(1, θ^2 / 4))^2 - g(θ) = sqrt(ξ(θ) * (1+r^2) - 2) + ξ(θ) = + 2 + θ^2 - + π / 8 * + exp(-θ^2 / 2) * + ((2 + θ^2) * besseli(0, θ^2 / 4) + θ^2 * besseli(1, θ^2 / 4))^2 + g(θ) = sqrt(ξ(θ) * (1 + r^2) - 2) θ = g(1) - for j in 1:maxiters + for j = 1:maxiters θ⁻ = θ θ = g(θ) abs(θ - θ⁻) < tol && break diff --git a/src/univariate/continuous/semicircle.jl b/src/univariate/continuous/semicircle.jl index 760164ea02..7543084ebb 100644 --- a/src/univariate/continuous/semicircle.jl +++ b/src/univariate/continuous/semicircle.jl @@ -20,16 +20,17 @@ External links """ struct Semicircle{T<:Real} <: ContinuousUnivariateDistribution r::T - Semicircle{T}(r::T) where {T <: Real} = new{T}(r) + Semicircle{T}(r::T) where {T<:Real} = new{T}(r) end -function Semicircle(r::Real; check_args::Bool=true) +function Semicircle(r::Real; check_args::Bool = true) @check_args Semicircle (r, r > zero(r)) return Semicircle{typeof(r)}(r) end -Semicircle(r::Integer; check_args::Bool=true) = Semicircle(float(r); check_args=check_args) +Semicircle(r::Integer; check_args::Bool = true) = + Semicircle(float(r); check_args = check_args) @distr_support Semicircle -d.r +d.r diff --git a/src/univariate/continuous/skewedexponentialpower.jl b/src/univariate/continuous/skewedexponentialpower.jl index d771511142..c144982867 100644 --- a/src/univariate/continuous/skewedexponentialpower.jl +++ b/src/univariate/continuous/skewedexponentialpower.jl @@ -29,7 +29,7 @@ location(d) # Get the location parameter, i.e. μ scale(d) # Get the scale parameter, i.e. σ ``` """ -struct SkewedExponentialPower{T <: Real} <: ContinuousUnivariateDistribution +struct SkewedExponentialPower{T<:Real} <: ContinuousUnivariateDistribution μ::T σ::T p::T @@ -37,23 +37,45 @@ struct SkewedExponentialPower{T <: Real} <: ContinuousUnivariateDistribution SkewedExponentialPower{T}(μ::T, σ::T, p::T, α::T) where {T} = new{T}(μ, σ, p, α) end -function SkewedExponentialPower(µ::T, σ::T, p::T, α::T; check_args::Bool=true) where {T <: Real} - @check_args SkewedExponentialPower (σ, σ > zero(σ)) (p, p > zero(p)) (α, zero(α) < α < one(α)) +function SkewedExponentialPower( + µ::T, + σ::T, + p::T, + α::T; + check_args::Bool = true, +) where {T<:Real} + @check_args SkewedExponentialPower (σ, σ > zero(σ)) (p, p > zero(p)) ( + α, + zero(α) < α < one(α), + ) return SkewedExponentialPower{T}(µ, σ, p, α) end -function SkewedExponentialPower(μ::Real, σ::Real, p::Real=2, α::Real=1//2; check_args::Bool=true) - return SkewedExponentialPower(promote(μ, σ, p, α)...; check_args=check_args) +function SkewedExponentialPower( + μ::Real, + σ::Real, + p::Real = 2, + α::Real = 1 // 2; + check_args::Bool = true, +) + return SkewedExponentialPower(promote(μ, σ, p, α)...; check_args = check_args) end -SkewedExponentialPower(μ::Real=0) = SkewedExponentialPower(μ, 1, 2, 1//2; check_args=false) +SkewedExponentialPower(μ::Real = 0) = + SkewedExponentialPower(μ, 1, 2, 1 // 2; check_args = false) @distr_support SkewedExponentialPower -Inf Inf ### Conversions -function Base.convert(::Type{SkewedExponentialPower{T}}, d::SkewedExponentialPower) where {T<:Real} +function Base.convert( + ::Type{SkewedExponentialPower{T}}, + d::SkewedExponentialPower, +) where {T<:Real} SkewedExponentialPower{T}(T(d.μ), T(d.σ), T(d.p), T(d.α)) end -Base.convert(::Type{SkewedExponentialPower{T}}, d::SkewedExponentialPower{T}) where {T<:Real} = d +Base.convert( + ::Type{SkewedExponentialPower{T}}, + d::SkewedExponentialPower{T}, +) where {T<:Real} = d ### Parameters @inline partype(::SkewedExponentialPower{T}) where {T<:Real} = T @@ -69,42 +91,50 @@ scale(d::SkewedExponentialPower) = d.σ function m_k(d::SkewedExponentialPower, k::Integer) _, σ, p, α = params(d) inv_p = inv(p) - return k * (logtwo + inv_p * log(p) + log(σ)) + loggamma((1 + k) * inv_p) - - loggamma(inv_p) + log(abs((-1)^k * α^(1 + k) + (1 - α)^(1 + k))) + return k * (logtwo + inv_p * log(p) + log(σ)) + loggamma((1 + k) * inv_p) - + loggamma(inv_p) + log(abs((-1)^k * α^(1 + k) + (1 - α)^(1 + k))) end # needed for odd moments in log scale -sgn(d::SkewedExponentialPower) = d.α > 1//2 ? -1 : 1 +sgn(d::SkewedExponentialPower) = d.α > 1 // 2 ? -1 : 1 -mean(d::SkewedExponentialPower) = d.α == 1//2 ? float(d.μ) : sgn(d)*exp(m_k(d, 1)) + d.μ -mode(d::SkewedExponentialPower) = mean(d) -var(d::SkewedExponentialPower) = exp(m_k(d, 2)) - exp(2*m_k(d, 1)) -skewness(d::SkewedExponentialPower) = d.α == 1//2 ? float(zero(partype(d))) : sgn(d)*exp(m_k(d, 3)) / (std(d))^3 -kurtosis(d::SkewedExponentialPower) = exp(m_k(d, 4))/var(d)^2 - 3 +mean(d::SkewedExponentialPower) = d.α == 1 // 2 ? float(d.μ) : sgn(d) * exp(m_k(d, 1)) + d.μ +mode(d::SkewedExponentialPower) = mean(d) +var(d::SkewedExponentialPower) = exp(m_k(d, 2)) - exp(2 * m_k(d, 1)) +skewness(d::SkewedExponentialPower) = + d.α == 1 // 2 ? float(zero(partype(d))) : sgn(d) * exp(m_k(d, 3)) / (std(d))^3 +kurtosis(d::SkewedExponentialPower) = exp(m_k(d, 4)) / var(d)^2 - 3 function logpdf(d::SkewedExponentialPower, x::Real) μ, σ, p, α = params(d) a = x < μ ? α : 1 - α inv_p = inv(p) - return -(logtwo + log(σ) + loggamma(inv_p) + ((1 - p) * log(p) + (abs(μ - x) / (2 * σ * a))^p) / p) + return -( + logtwo + + log(σ) + + loggamma(inv_p) + + ((1 - p) * log(p) + (abs(μ - x) / (2 * σ * a))^p) / p + ) end function cdf(d::SkewedExponentialPower, x::Real) μ, σ, p, α = params(d) inv_p = inv(p) if x <= μ - α * ccdf(Gamma(inv_p), inv_p * (abs((x-μ)/σ) / (2*α))^p) + α * ccdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * α))^p) else - α + (1-α) * cdf(Gamma(inv_p), inv_p * (abs((x-μ)/σ) / (2*(1-α)))^p) + α + (1 - α) * cdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * (1 - α)))^p) end end function logcdf(d::SkewedExponentialPower, x::Real) μ, σ, p, α = params(d) inv_p = inv(p) if x <= μ - log(α) + logccdf(Gamma(inv_p), inv_p * (abs((x-μ)/σ) / (2*α))^p) + log(α) + logccdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * α))^p) else - log1mexp(log1p(-α) + logccdf(Gamma(inv_p), inv_p * (abs((x-μ)/σ) / (2*(1-α)))^p)) + log1mexp( + log1p(-α) + logccdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * (1 - α)))^p), + ) end end @@ -112,19 +142,19 @@ function quantile(d::SkewedExponentialPower, p::Real) μ, σ, _, α = params(d) inv_p = inv(d.p) if p <= α - μ - 2*α*σ * (d.p * quantile(Gamma(inv_p), (α-p)/α))^inv_p + μ - 2 * α * σ * (d.p * quantile(Gamma(inv_p), (α - p) / α))^inv_p else - μ + 2*(1-α)*σ * (d.p * quantile(Gamma(inv_p), (p-α)/(1-α)))^inv_p + μ + 2 * (1 - α) * σ * (d.p * quantile(Gamma(inv_p), (p - α) / (1 - α)))^inv_p end end function rand(rng::AbstractRNG, d::SkewedExponentialPower) μ, σ, p, α = params(d) inv_p = inv(d.p) - z = 2*σ * (p * rand(rng, Gamma(inv_p, 1)))^inv_p + z = 2 * σ * (p * rand(rng, Gamma(inv_p, 1)))^inv_p if rand(rng) < d.α return μ - α * z else - return μ + (1-α) * z + return μ + (1 - α) * z end end diff --git a/src/univariate/continuous/skewnormal.jl b/src/univariate/continuous/skewnormal.jl index dd9215deae..a0756a221d 100644 --- a/src/univariate/continuous/skewnormal.jl +++ b/src/univariate/continuous/skewnormal.jl @@ -26,20 +26,24 @@ struct SkewNormal{T<:Real} <: ContinuousUnivariateDistribution SkewNormal{T}(ξ::T, ω::T, α::T) where {T} = new{T}(ξ, ω, α) end -function SkewNormal(ξ::T, ω::T, α::T; check_args::Bool=true) where {T <: Real} +function SkewNormal(ξ::T, ω::T, α::T; check_args::Bool = true) where {T<:Real} @check_args SkewNormal (ω, ω > zero(ω)) return SkewNormal{T}(ξ, ω, α) end -SkewNormal(ξ::Real, ω::Real, α::Real; check_args::Bool=true) = SkewNormal(promote(ξ, ω, α)...; check_args=check_args) -SkewNormal(ξ::Integer, ω::Integer, α::Integer; check_args::Bool=true) = SkewNormal(float(ξ), float(ω), float(α); check_args=check_args) -SkewNormal(α::Real=0.0) = SkewNormal(zero(α), one(α), α; check_args=false) +SkewNormal(ξ::Real, ω::Real, α::Real; check_args::Bool = true) = + SkewNormal(promote(ξ, ω, α)...; check_args = check_args) +SkewNormal(ξ::Integer, ω::Integer, α::Integer; check_args::Bool = true) = + SkewNormal(float(ξ), float(ω), float(α); check_args = check_args) +SkewNormal(α::Real = 0.0) = SkewNormal(zero(α), one(α), α; check_args = false) @distr_support SkewNormal -Inf Inf #### Conversions -convert(::Type{SkewNormal{T}}, ξ::S, ω::S, α::S) where {T <: Real, S <: Real} = SkewNormal(T(ξ), T(ω), T(α)) -Base.convert(::Type{SkewNormal{T}}, d::SkewNormal) where {T<:Real} = SkewNormal{T}(T(d.ξ), T(d.ω), T(d.α)) +convert(::Type{SkewNormal{T}}, ξ::S, ω::S, α::S) where {T<:Real,S<:Real} = + SkewNormal(T(ξ), T(ω), T(α)) +Base.convert(::Type{SkewNormal{T}}, d::SkewNormal) where {T<:Real} = + SkewNormal{T}(T(d.ξ), T(d.ω), T(d.α)) Base.convert(::Type{SkewNormal{T}}, d::SkewNormal{T}) where {T<:Real} = d #### Parameters @@ -48,35 +52,42 @@ params(d::SkewNormal) = (d.ξ, d.ω, d.α) #### Statistics delta(d::SkewNormal) = d.α / √(1 + d.α^2) -mean_z(d::SkewNormal) = √(2/π) * delta(d) -std_z(d::SkewNormal) = 1 - (2/π) * delta(d)^2 +mean_z(d::SkewNormal) = √(2 / π) * delta(d) +std_z(d::SkewNormal) = 1 - (2 / π) * delta(d)^2 mean(d::SkewNormal) = d.ξ + d.ω * mean_z(d) var(d::SkewNormal) = abs2(d.ω) * (1 - mean_z(d)^2) std(d::SkewNormal) = √var(d) -skewness(d::SkewNormal) = ((4 - π)/2) * (mean_z(d)^3/(1 - mean_z(d)^2)^(3/2)) -kurtosis(d::SkewNormal) = 2 * (π-3) * ((delta(d) * sqrt(2/π))^4/(1-2 * (delta(d)^2)/π)^2) +skewness(d::SkewNormal) = ((4 - π) / 2) * (mean_z(d)^3 / (1 - mean_z(d)^2)^(3 / 2)) +kurtosis(d::SkewNormal) = + 2 * (π - 3) * ((delta(d) * sqrt(2 / π))^4 / (1 - 2 * (delta(d)^2) / π)^2) # no analytic expression for max m_0(d) but accurate numerical approximation -m_0(d::SkewNormal) = mean_z(d) - (skewness(d) * std_z(d))/2 - (sign(d.α)/2) * exp(-2π/abs(d.α)) -mode(d::SkewNormal) = d.ξ + d.ω * m_0(d) +m_0(d::SkewNormal) = + mean_z(d) - (skewness(d) * std_z(d)) / 2 - (sign(d.α) / 2) * exp(-2π / abs(d.α)) +mode(d::SkewNormal) = d.ξ + d.ω * m_0(d) #### Evaluation -pdf(d::SkewNormal, x::Real) = (2/d.ω) * normpdf((x-d.ξ)/d.ω) * normcdf(d.α * (x-d.ξ)/d.ω) -logpdf(d::SkewNormal, x::Real) = log(2) - log(d.ω) + normlogpdf((x-d.ξ) / d.ω) + normlogcdf(d.α * (x-d.ξ) / d.ω) +pdf(d::SkewNormal, x::Real) = + (2 / d.ω) * normpdf((x - d.ξ) / d.ω) * normcdf(d.α * (x - d.ξ) / d.ω) +logpdf(d::SkewNormal, x::Real) = + log(2) - log(d.ω) + normlogpdf((x - d.ξ) / d.ω) + normlogcdf(d.α * (x - d.ξ) / d.ω) #cdf requires Owen's T function. #cdf/quantile etc -mgf(d::SkewNormal, t::Real) = 2 * exp(d.ξ * t + (d.ω^2 * t^2)/2 ) * normcdf(d.ω * delta(d) * t) +mgf(d::SkewNormal, t::Real) = + 2 * exp(d.ξ * t + (d.ω^2 * t^2) / 2) * normcdf(d.ω * delta(d) * t) -cf(d::SkewNormal, t::Real) = exp(im * t * d.ξ - (d.ω^2 * t^2)/2) * (1 + im * erfi((d.ω * delta(d) * t)/(sqrt(2))) ) +cf(d::SkewNormal, t::Real) = + exp(im * t * d.ξ - (d.ω^2 * t^2) / 2) * + (1 + im * erfi((d.ω * delta(d) * t) / (sqrt(2)))) #### Sampling function rand(rng::AbstractRNG, d::SkewNormal) u0 = randn(rng) v = randn(rng) δ = delta(d) - u1 = δ * u0 + √(1-δ^2) * v + u1 = δ * u0 + √(1 - δ^2) * v return d.ξ + d.ω * sign(u0) * u1 end diff --git a/src/univariate/continuous/studentizedrange.jl b/src/univariate/continuous/studentizedrange.jl index eea76386d0..743a05d32e 100644 --- a/src/univariate/continuous/studentizedrange.jl +++ b/src/univariate/continuous/studentizedrange.jl @@ -30,23 +30,25 @@ External links struct StudentizedRange{T<:Real} <: ContinuousUnivariateDistribution ν::T k::T - StudentizedRange{T}(ν::T, k::T) where {T <: Real} = new{T}(ν, k) + StudentizedRange{T}(ν::T, k::T) where {T<:Real} = new{T}(ν, k) end -function StudentizedRange(ν::T, k::T; check_args::Bool=true) where {T <: Real} +function StudentizedRange(ν::T, k::T; check_args::Bool = true) where {T<:Real} @check_args StudentizedRange (ν, ν > zero(ν)) (k, k > one(k)) return StudentizedRange{T}(ν, k) end -StudentizedRange(ν::Integer, k::Integer; check_args::Bool=true) = StudentizedRange(float(ν), float(k); check_args=check_args) -StudentizedRange(ν::Real, k::Real; check_args::Bool=true) = StudentizedRange(promote(ν, k)...; check_args=check_args) +StudentizedRange(ν::Integer, k::Integer; check_args::Bool = true) = + StudentizedRange(float(ν), float(k); check_args = check_args) +StudentizedRange(ν::Real, k::Real; check_args::Bool = true) = + StudentizedRange(promote(ν, k)...; check_args = check_args) @distr_support StudentizedRange 0.0 Inf ### Conversions -function convert(::Type{StudentizedRange{T}}, ν::S, k::S) where {T <: Real, S <: Real} +function convert(::Type{StudentizedRange{T}}, ν::S, k::S) where {T<:Real,S<:Real} StudentizedRange(T(ν), T(k)) end @@ -57,7 +59,7 @@ Base.convert(::Type{StudentizedRange{T}}, d::StudentizedRange{T}) where {T<:Real ### Parameters params(d::StudentizedRange) = (d.ν, d.k) -@inline partype(d::StudentizedRange{T}) where {T <: Real} = T +@inline partype(d::StudentizedRange{T}) where {T<:Real} = T ### Evaluation & Sampling diff --git a/src/univariate/continuous/symtriangular.jl b/src/univariate/continuous/symtriangular.jl index f94518b20d..df1217d992 100644 --- a/src/univariate/continuous/symtriangular.jl +++ b/src/univariate/continuous/symtriangular.jl @@ -20,23 +20,25 @@ scale(d) # Get the scale parameter, i.e. σ struct SymTriangularDist{T<:Real} <: ContinuousUnivariateDistribution μ::T σ::T - SymTriangularDist{T}(µ::T, σ::T) where {T <: Real} = new{T}(µ, σ) + SymTriangularDist{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) end -function SymTriangularDist(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function SymTriangularDist(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args SymTriangularDist (σ, σ > zero(σ)) return SymTriangularDist{T}(μ, σ) end -SymTriangularDist(μ::Real, σ::Real; check_args::Bool=true) = SymTriangularDist(promote(μ, σ)...; check_args=check_args) -SymTriangularDist(μ::Integer, σ::Integer; check_args::Bool=true) = SymTriangularDist(float(μ), float(σ); check_args=check_args) -SymTriangularDist(μ::Real=0.0) = SymTriangularDist(μ, one(μ); check_args=false) +SymTriangularDist(μ::Real, σ::Real; check_args::Bool = true) = + SymTriangularDist(promote(μ, σ)...; check_args = check_args) +SymTriangularDist(μ::Integer, σ::Integer; check_args::Bool = true) = + SymTriangularDist(float(μ), float(σ); check_args = check_args) +SymTriangularDist(μ::Real = 0.0) = SymTriangularDist(μ, one(μ); check_args = false) @distr_support SymTriangularDist d.μ - d.σ d.μ + d.σ #### Conversions -function convert(::Type{SymTriangularDist{T}}, μ::Real, σ::Real) where T<:Real +function convert(::Type{SymTriangularDist{T}}, μ::Real, σ::Real) where {T<:Real} SymTriangularDist(T(μ), T(σ)) end function Base.convert(::Type{SymTriangularDist{T}}, d::SymTriangularDist) where {T<:Real} @@ -61,9 +63,9 @@ mode(d::SymTriangularDist) = d.μ var(d::SymTriangularDist) = d.σ^2 / 6 skewness(d::SymTriangularDist{T}) where {T<:Real} = zero(T) -kurtosis(d::SymTriangularDist{T}) where {T<:Real} = T(-3)/5 +kurtosis(d::SymTriangularDist{T}) where {T<:Real} = T(-3) / 5 -entropy(d::SymTriangularDist) = 1//2 + log(d.σ) +entropy(d::SymTriangularDist) = 1 // 2 + log(d.σ) #### Evaluation @@ -75,37 +77,36 @@ pdf(d::SymTriangularDist, x::Real) = (1 - zval(d, x)) / scale(d) logpdf(d::SymTriangularDist, x::Real) = log(pdf(d, x)) function cdf(d::SymTriangularDist, x::Real) - r = (1 - zval(d, x))^2/2 + r = (1 - zval(d, x))^2 / 2 return x < d.μ ? r : 1 - r end function ccdf(d::SymTriangularDist, x::Real) - r = (1 - zval(d, x))^2/2 + r = (1 - zval(d, x))^2 / 2 return x < d.μ ? 1 - r : r end function logcdf(d::SymTriangularDist, x::Real) - log_r = 2 * log1p(- zval(d, x)) + loghalf + log_r = 2 * log1p(-zval(d, x)) + loghalf return x < d.μ ? log_r : log1mexp(log_r) end function logccdf(d::SymTriangularDist, x::Real) - log_r = 2 * log1p(- zval(d, x)) + loghalf + log_r = 2 * log1p(-zval(d, x)) + loghalf return x < d.μ ? log1mexp(log_r) : log_r end -quantile(d::SymTriangularDist, p::Real) = p < 1/2 ? xval(d, sqrt(2p) - 1) : - xval(d, 1 - sqrt(2(1 - p))) +quantile(d::SymTriangularDist, p::Real) = + p < 1 / 2 ? xval(d, sqrt(2p) - 1) : xval(d, 1 - sqrt(2(1 - p))) -cquantile(d::SymTriangularDist, p::Real) = p > 1/2 ? xval(d, sqrt(2(1-p)) - 1) : - xval(d, 1 - sqrt(2p)) +cquantile(d::SymTriangularDist, p::Real) = + p > 1 / 2 ? xval(d, sqrt(2(1 - p)) - 1) : xval(d, 1 - sqrt(2p)) -invlogcdf(d::SymTriangularDist, lp::Real) = lp < loghalf ? xval(d, expm1(1/2*(lp - loghalf))) : - xval(d, 1 - sqrt(-2expm1(lp))) +invlogcdf(d::SymTriangularDist, lp::Real) = + lp < loghalf ? xval(d, expm1(1 / 2 * (lp - loghalf))) : xval(d, 1 - sqrt(-2expm1(lp))) function invlogccdf(d::SymTriangularDist, lp::Real) - lp > loghalf ? xval(d, sqrt(-2*expm1(lp)) - 1) : - xval(d, -(expm1((lp - loghalf)/2))) + lp > loghalf ? xval(d, sqrt(-2 * expm1(lp)) - 1) : xval(d, -(expm1((lp - loghalf) / 2))) end @@ -113,14 +114,14 @@ function mgf(d::SymTriangularDist, t::Real) (μ, σ) = params(d) a = σ * t a == zero(a) && return one(a) - 4*exp(μ * t) * (sinh(a/2) / a)^2 + 4 * exp(μ * t) * (sinh(a / 2) / a)^2 end function cf(d::SymTriangularDist, t::Real) (μ, σ) = params(d) a = σ * t a == zero(a) && return complex(one(a)) - 4*cis(μ * t) * (sin(a/2) / a)^2 + 4 * cis(μ * t) * (sin(a / 2) / a)^2 end diff --git a/src/univariate/continuous/tdist.jl b/src/univariate/continuous/tdist.jl index 873925c3eb..90ba4e87e4 100644 --- a/src/univariate/continuous/tdist.jl +++ b/src/univariate/continuous/tdist.jl @@ -22,15 +22,15 @@ External links """ struct TDist{T<:Real} <: ContinuousUnivariateDistribution ν::T - TDist{T}(ν::T) where {T <: Real} = new{T}(ν) + TDist{T}(ν::T) where {T<:Real} = new{T}(ν) end -function TDist(ν::Real; check_args::Bool=true) +function TDist(ν::Real; check_args::Bool = true) @check_args TDist (ν, ν > zero(ν)) return TDist{typeof(ν)}(ν) end -TDist(ν::Integer; check_args::Bool=true) = TDist(float(ν); check_args=check_args) +TDist(ν::Integer; check_args::Bool = true) = TDist(float(ν); check_args = check_args) @distr_support TDist -Inf Inf @@ -52,26 +52,24 @@ mean(d::TDist{T}) where {T<:Real} = d.ν > 1 ? zero(T) : T(NaN) median(d::TDist{T}) where {T<:Real} = zero(T) mode(d::TDist{T}) where {T<:Real} = zero(T) -function var(d::TDist{T}) where T<:Real +function var(d::TDist{T}) where {T<:Real} ν = d.ν isinf(ν) && return one(T) - ν > 2 ? ν / (ν - 2) : - ν > 1 ? T(Inf) : T(NaN) + ν > 2 ? ν / (ν - 2) : ν > 1 ? T(Inf) : T(NaN) end skewness(d::TDist{T}) where {T<:Real} = d.ν > 3 ? zero(T) : T(NaN) -function kurtosis(d::TDist{T}) where T<:Real +function kurtosis(d::TDist{T}) where {T<:Real} ν = d.ν - ν > 4 ? 6 / (ν - 4) : - ν > 2 ? T(Inf) : T(NaN) + ν > 4 ? 6 / (ν - 4) : ν > 2 ? T(Inf) : T(NaN) end -function entropy(d::TDist{T}) where T <: Real - isinf(d.ν) && return entropy( Normal(zero(T), one(T)) ) - h = d.ν/2 - h1 = h + 1//2 - h1 * (digamma(h1) - digamma(h)) + log(d.ν)/2 + logbeta(h, 1//2) +function entropy(d::TDist{T}) where {T<:Real} + isinf(d.ν) && return entropy(Normal(zero(T), one(T))) + h = d.ν / 2 + h1 = h + 1 // 2 + h1 * (digamma(h1) - digamma(h)) + log(d.ν) / 2 + logbeta(h, 1 // 2) end @@ -85,12 +83,13 @@ function rand(rng::AbstractRNG, d::TDist) return randn(rng, typeof(z)) / (isinf(ν) ? one(z) : z) end -function cf(d::TDist{T}, t::Real) where T <: Real +function cf(d::TDist{T}, t::Real) where {T<:Real} isinf(d.ν) && return cf(Normal(zero(T), one(T)), t) t == 0 && return complex(1) - h = d.ν/2 - q = d.ν/4 - complex(2(q*t^2)^q * besselk(h, sqrt(d.ν) * abs(t)) / gamma(h)) + h = d.ν / 2 + q = d.ν / 4 + complex(2(q * t^2)^q * besselk(h, sqrt(d.ν) * abs(t)) / gamma(h)) end -gradlogpdf(d::TDist{T}, x::Real) where {T<:Real} = isinf(d.ν) ? gradlogpdf(Normal(zero(T), one(T)), x) : -((d.ν + 1) * x) / (x^2 + d.ν) +gradlogpdf(d::TDist{T}, x::Real) where {T<:Real} = + isinf(d.ν) ? gradlogpdf(Normal(zero(T), one(T)), x) : -((d.ν + 1) * x) / (x^2 + d.ν) diff --git a/src/univariate/continuous/triangular.jl b/src/univariate/continuous/triangular.jl index aed3d15d5f..6df23a11c8 100644 --- a/src/univariate/continuous/triangular.jl +++ b/src/univariate/continuous/triangular.jl @@ -31,26 +31,29 @@ struct TriangularDist{T<:Real} <: ContinuousUnivariateDistribution a::T b::T c::T - TriangularDist{T}(a::T, b::T, c::T) where {T <: Real} = new{T}(a, b, c) + TriangularDist{T}(a::T, b::T, c::T) where {T<:Real} = new{T}(a, b, c) end -function TriangularDist(a::T, b::T, c::T; check_args::Bool=true) where {T <: Real} +function TriangularDist(a::T, b::T, c::T; check_args::Bool = true) where {T<:Real} @check_args TriangularDist (a <= c <= b) return TriangularDist{T}(a, b, c) end -TriangularDist(a::Real, b::Real, c::Real; check_args::Bool=true) = TriangularDist(promote(a, b, c)...; check_args=check_args) -function TriangularDist(a::Integer, b::Integer, c::Integer; check_args::Bool=true) - TriangularDist(float(a), float(b), float(c); check_args=check_args) +TriangularDist(a::Real, b::Real, c::Real; check_args::Bool = true) = + TriangularDist(promote(a, b, c)...; check_args = check_args) +function TriangularDist(a::Integer, b::Integer, c::Integer; check_args::Bool = true) + TriangularDist(float(a), float(b), float(c); check_args = check_args) end -TriangularDist(a::Real, b::Real) = TriangularDist(a, b, middle(a, b); check_args=false) +TriangularDist(a::Real, b::Real) = TriangularDist(a, b, middle(a, b); check_args = false) @distr_support TriangularDist d.a d.b #### Conversions -convert(::Type{TriangularDist{T}}, a::Real, b::Real, c::Real) where {T<:Real} = TriangularDist(T(a), T(b), T(c)) -Base.convert(::Type{TriangularDist{T}}, d::TriangularDist) where {T<:Real} = TriangularDist{T}(T(d.a), T(d.b), T(d.c)) +convert(::Type{TriangularDist{T}}, a::Real, b::Real, c::Real) where {T<:Real} = + TriangularDist(T(a), T(b), T(c)) +Base.convert(::Type{TriangularDist{T}}, d::TriangularDist) where {T<:Real} = + TriangularDist{T}(T(d.a), T(d.b), T(d.c)) Base.convert(::Type{TriangularDist{T}}, d::TriangularDist{T}) where {T<:Real} = d #### Parameters @@ -68,25 +71,24 @@ mean(d::TriangularDist) = (d.a + d.b + d.c) / 3 function median(d::TriangularDist) (a, b, c) = params(d) m = middle(a, b) - c >= m ? a + sqrt((b - a) * (c - a)/2) : - b - sqrt((b - a) * (b - c)/2) + c >= m ? a + sqrt((b - a) * (c - a) / 2) : b - sqrt((b - a) * (b - c) / 2) end -_pretvar(a::Real, b::Real, c::Real) = a*a + b*b + c*c - a*b - a*c - b*c +_pretvar(a::Real, b::Real, c::Real) = a * a + b * b + c * c - a * b - a * c - b * c function var(d::TriangularDist) (a, b, c) = params(d) _pretvar(a, b, c) / 18 end -function skewness(d::TriangularDist{T}) where T<:Real +function skewness(d::TriangularDist{T}) where {T<:Real} (a, b, c) = params(d) - sqrt2 * (a + b - 2c) * (2a - b - c) * (a - 2b + c) / ( 5 * _pretvar(a, b, c)^(T(3)/2) ) + sqrt2 * (a + b - 2c) * (2a - b - c) * (a - 2b + c) / (5 * _pretvar(a, b, c)^(T(3) / 2)) end -kurtosis(d::TriangularDist{T}) where {T<:Real} = T(-3)/5 +kurtosis(d::TriangularDist{T}) where {T<:Real} = T(-3) / 5 -entropy(d::TriangularDist{T}) where {T<:Real} = one(T)/2 + log((d.b - d.a) / 2) +entropy(d::TriangularDist{T}) where {T<:Real} = one(T) / 2 + log((d.b - d.a) / 2) #### Evaluation @@ -121,8 +123,7 @@ function quantile(d::TriangularDist, p::Real) c_m_a = c - a b_m_a = b - a rl = c_m_a / b_m_a - p <= rl ? a + sqrt(b_m_a * c_m_a * p) : - b - sqrt(b_m_a * (b - c) * (1 - p)) + p <= rl ? a + sqrt(b_m_a * c_m_a * p) : b - sqrt(b_m_a * (b - c) * (1 - p)) end """ @@ -145,7 +146,8 @@ function mgf(d::TriangularDist, t::Real) if a < c if c < b # Case: a < c < b - return exp(c * t) * ((c - a) * _phi2((a - c) * t) + (b - c) * _phi2((b - c) * t)) / (b - a) + return exp(c * t) * + ((c - a) * _phi2((a - c) * t) + (b - c) * _phi2((b - c) * t)) / (b - a) else # Case: a < c = b return exp(c * t) * _phi2((a - c) * t) @@ -180,7 +182,9 @@ function cf(d::TriangularDist, t::Real) if a < c if c < b # Case: a < c < b - return cis(c * t) * ((c - a) * _cisphi2((a - c) * t) + (b - c) * _cisphi2((b - c) * t)) / (b - a) + return cis(c * t) * + ((c - a) * _cisphi2((a - c) * t) + (b - c) * _cisphi2((b - c) * t)) / + (b - a) else # Case: a < c = b return cis(c * t) * _cisphi2((a - c) * t) @@ -202,5 +206,5 @@ function rand(rng::AbstractRNG, d::TriangularDist) b_m_a = b - a u = rand(rng) b_m_a * u < (c - a) ? d.a + sqrt(u * b_m_a * (c - a)) : - d.b - sqrt((1 - u) * b_m_a * (b - c)) + d.b - sqrt((1 - u) * b_m_a * (b - c)) end diff --git a/src/univariate/continuous/triweight.jl b/src/univariate/continuous/triweight.jl index e84642e3c1..a9cb0aaf12 100644 --- a/src/univariate/continuous/triweight.jl +++ b/src/univariate/continuous/triweight.jl @@ -7,21 +7,24 @@ struct Triweight{T<:Real} <: ContinuousUnivariateDistribution Triweight{T}(µ::T, σ::T) where {T} = new{T}(µ, σ) end -function Triweight(μ::T, σ::T; check_args::Bool=true) where {T <: Real} +function Triweight(μ::T, σ::T; check_args::Bool = true) where {T<:Real} @check_args Triweight (σ, σ > zero(σ)) return Triweight{T}(μ, σ) end -Triweight(μ::Real, σ::Real; check_args::Bool=true) = Triweight(promote(μ, σ)...; check_args=check_args) -Triweight(μ::Integer, σ::Integer; check_args::Bool=true) = Triweight(float(μ), float(σ); check_args=check_args) -Triweight(μ::Real=0.0) = Triweight(μ, one(μ); check_args=false) +Triweight(μ::Real, σ::Real; check_args::Bool = true) = + Triweight(promote(μ, σ)...; check_args = check_args) +Triweight(μ::Integer, σ::Integer; check_args::Bool = true) = + Triweight(float(μ), float(σ); check_args = check_args) +Triweight(μ::Real = 0.0) = Triweight(μ, one(μ); check_args = false) @distr_support Triweight d.μ - d.σ d.μ + d.σ ## Conversions convert(::Type{Triweight{T}}, μ::Real, σ::Real) where {T<:Real} = Triweight(T(μ), T(σ)) -Base.convert(::Type{Triweight{T}}, d::Triweight) where {T<:Real} = Triweight{T}(T(d.μ), T(d.σ)) +Base.convert(::Type{Triweight{T}}, d::Triweight) where {T<:Real} = + Triweight{T}(T(d.μ), T(d.σ)) Base.convert(::Type{Triweight{T}}, d::Triweight{T}) where {T<:Real} = d ## Parameters @@ -39,35 +42,37 @@ mode(d::Triweight) = d.μ var(d::Triweight) = d.σ^2 / 9 skewness(d::Triweight{T}) where {T<:Real} = zero(T) -kurtosis(d::Triweight{T}) where {T<:Real} = T(1)/33 - 3 +kurtosis(d::Triweight{T}) where {T<:Real} = T(1) / 33 - 3 ## Functions -function pdf(d::Triweight{T}, x::Real) where T<:Real - u = abs(x - d.μ)/d.σ - u >= 1 ? zero(T) : 1.09375*(1 - u*u)^3/d.σ +function pdf(d::Triweight{T}, x::Real) where {T<:Real} + u = abs(x - d.μ) / d.σ + u >= 1 ? zero(T) : 1.09375 * (1 - u * u)^3 / d.σ end logpdf(d::Triweight, x::Real) = log(pdf(d, x)) -function cdf(d::Triweight{T}, x::Real) where T<:Real - u = (x - d.μ)/d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125*(1 + u)^4*@horner(u,16,-29,20,-5) +function cdf(d::Triweight{T}, x::Real) where {T<:Real} + u = (x - d.μ) / d.σ + u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) end -function ccdf(d::Triweight{T}, x::Real) where T<:Real - u = (d.μ - x)/d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125*(1 + u)^4*@horner(u,16,-29,20,-5) +function ccdf(d::Triweight{T}, x::Real) where {T<:Real} + u = (d.μ - x) / d.σ + u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) end @quantile_newton Triweight -function mgf(d::Triweight{T}, t::Float64) where T<:Real - a = d.σ*t - a2 = a*a - a == 0 ? one(T) : 105*exp(d.μ*t)*((15/a2+1)*cosh(a)-(15/a2-6)/a*sinh(a))/(a2*a2) +function mgf(d::Triweight{T}, t::Float64) where {T<:Real} + a = d.σ * t + a2 = a * a + a == 0 ? one(T) : + 105 * exp(d.μ * t) * ((15 / a2 + 1) * cosh(a) - (15 / a2 - 6) / a * sinh(a)) / (a2 * a2) end -function cf(d::Triweight{T}, t::Float64) where T<:Real - a = d.σ*t - a2 = a*a - a == 0 ? complex(one(T)) : 105*cis(d.μ*t)*((1-15/a2)*cos(a)+(15/a2-6)/a*sin(a))/(a2*a2) +function cf(d::Triweight{T}, t::Float64) where {T<:Real} + a = d.σ * t + a2 = a * a + a == 0 ? complex(one(T)) : + 105 * cis(d.μ * t) * ((1 - 15 / a2) * cos(a) + (15 / a2 - 6) / a * sin(a)) / (a2 * a2) end diff --git a/src/univariate/continuous/uniform.jl b/src/univariate/continuous/uniform.jl index d4beca7d79..fa8c599cbc 100644 --- a/src/univariate/continuous/uniform.jl +++ b/src/univariate/continuous/uniform.jl @@ -26,16 +26,18 @@ External links struct Uniform{T<:Real} <: ContinuousUnivariateDistribution a::T b::T - Uniform{T}(a::Real, b::Real) where {T <: Real} = new{T}(a, b) + Uniform{T}(a::Real, b::Real) where {T<:Real} = new{T}(a, b) end -function Uniform(a::T, b::T; check_args::Bool=true) where {T <: Real} +function Uniform(a::T, b::T; check_args::Bool = true) where {T<:Real} @check_args Uniform (a < b) return Uniform{T}(a, b) end -Uniform(a::Real, b::Real; check_args::Bool=true) = Uniform(promote(a, b)...; check_args=check_args) -Uniform(a::Integer, b::Integer; check_args::Bool=true) = Uniform(float(a), float(b); check_args=check_args) +Uniform(a::Real, b::Real; check_args::Bool = true) = + Uniform(promote(a, b)...; check_args = check_args) +Uniform(a::Integer, b::Integer; check_args::Bool = true) = + Uniform(float(a), float(b); check_args = check_args) Uniform() = Uniform{Float64}(0.0, 1.0) @distr_support Uniform d.a d.b @@ -64,7 +66,7 @@ modes(d::Uniform) = Float64[] var(d::Uniform) = (w = d.b - d.a; w^2 / 12) skewness(d::Uniform{T}) where {T<:Real} = zero(T) -kurtosis(d::Uniform{T}) where {T<:Real} = -6/5*one(T) +kurtosis(d::Uniform{T}) where {T<:Real} = -6 / 5 * one(T) entropy(d::Uniform) = log(d.b - d.a) @@ -80,7 +82,7 @@ end function logpdf(d::Uniform, x::Real) # include dependency on `x` for return type to be consistent with `logcdf` a, b, _ = promote(d.a, d.b, x) - val = - log(b - a) + val = -log(b - a) return insupport(d, x) ? val : oftype(val, -Inf) end gradlogpdf(d::Uniform, x::Real) = zero(partype(d)) / oneunit(x) @@ -112,13 +114,13 @@ function cgf_uniform_around_zero_kernel(x) a1 = inv(T(6)) a2 = inv(T(24)) a3 = inv(T(120)) - x*@evalpoly(x, a0, a1, a2, a3) + x * @evalpoly(x, a0, a1, a2, a3) end function cgf(d::Uniform, t) # log((exp(t*b) - exp(t*a))/ (t*(b-a))) - a,b = params(d) - x = t*(b-a) + a, b = params(d) + x = t * (b - a) if abs(x) <= sqrt(eps(float(one(x)))) cgf_around_zero(d, t) else @@ -126,14 +128,14 @@ function cgf(d::Uniform, t) end end function cgf_around_zero(d::Uniform, t) - a,b = params(d) - x = t*(b-a) - t*a + log1p(cgf_uniform_around_zero_kernel(x)) + a, b = params(d) + x = t * (b - a) + t * a + log1p(cgf_uniform_around_zero_kernel(x)) end function cgf_away_from_zero(d::Uniform, t) - a,b = params(d) - x = t*(b-a) - logsubexp(t*b, t*a) - log(abs(x)) + a, b = params(d) + x = t * (b - a) + logsubexp(t * b, t * a) - log(abs(x)) end function cf(d::Uniform, t::Real) diff --git a/src/univariate/continuous/vonmises.jl b/src/univariate/continuous/vonmises.jl index 38a20408e7..cb4adfb643 100644 --- a/src/univariate/continuous/vonmises.jl +++ b/src/univariate/continuous/vonmises.jl @@ -24,15 +24,17 @@ struct VonMises{T<:Real} <: ContinuousUnivariateDistribution I0κx::T # I0(κ) * exp(-κ), where I0 is the modified Bessel function of order 0 end -function VonMises(μ::T, κ::T; check_args::Bool=true) where {T <: Real} +function VonMises(μ::T, κ::T; check_args::Bool = true) where {T<:Real} @check_args VonMises (κ, κ > zero(κ)) return VonMises{T}(μ, κ, besselix(zero(T), κ)) end -VonMises(μ::Real, κ::Real; check_args::Bool=true) = VonMises(promote(μ, κ)...; check_args=check_args) -VonMises(μ::Integer, κ::Integer; check_args::Bool=true) = VonMises(float(μ), float(κ); check_args=check_args) -VonMises(κ::Real; check_args::Bool=true) = VonMises(zero(κ), κ; check_args=check_args) -VonMises() = VonMises(0.0, 1.0; check_args=false) +VonMises(μ::Real, κ::Real; check_args::Bool = true) = + VonMises(promote(μ, κ)...; check_args = check_args) +VonMises(μ::Integer, κ::Integer; check_args::Bool = true) = + VonMises(float(μ), float(κ); check_args = check_args) +VonMises(κ::Real; check_args::Bool = true) = VonMises(zero(κ), κ; check_args = check_args) +VonMises() = VonMises(0.0, 1.0; check_args = false) show(io::IO, d::VonMises) = show(io, d, (:μ, :κ)) @@ -41,7 +43,8 @@ show(io::IO, d::VonMises) = show(io, d, (:μ, :κ)) #### Conversions convert(::Type{VonMises{T}}, μ::Real, κ::Real) where {T<:Real} = VonMises(T(μ), T(κ)) -Base.convert(::Type{VonMises{T}}, d::VonMises) where {T<:Real} = VonMises{T}(T(d.μ), T(d.κ), T(d.I0κx)) +Base.convert(::Type{VonMises{T}}, d::VonMises) where {T<:Real} = + VonMises{T}(T(d.μ), T(d.κ), T(d.I0κx)) Base.convert(::Type{VonMises{T}}, d::VonMises{T}) where {T<:Real} = d #### Parameters @@ -66,9 +69,9 @@ cf(d::VonMises, t::Real) = (besselix(abs(t), d.κ) / d.I0κx) * cis(t * d.μ) #### Evaluations #pdf(d::VonMises, x::Real) = exp(d.κ * (cos(x - d.μ) - 1)) / (twoπ * d.I0κx) -pdf(d::VonMises{T}, x::Real) where T<:Real = +pdf(d::VonMises{T}, x::Real) where {T<:Real} = minimum(d) ≤ x ≤ maximum(d) ? exp(d.κ * (cos(x - d.μ) - 1)) / (twoπ * d.I0κx) : zero(T) -logpdf(d::VonMises{T}, x::Real) where T<:Real = +logpdf(d::VonMises{T}, x::Real) where {T<:Real} = minimum(d) ≤ x ≤ maximum(d) ? d.κ * (cos(x - d.μ) - 1) - log(d.I0κx) - log2π : -T(Inf) function cdf(d::VonMises, x::Real) @@ -87,7 +90,7 @@ function _vmcdf(κ::Real, I0κx::Real, x::Real, tol::Real) cj = besselix(j, κ) / j s += cj * sin(j * x) end - return (x + 2s / I0κx) / twoπ + 1//2 + return (x + 2s / I0κx) / twoπ + 1 // 2 end diff --git a/src/univariate/continuous/weibull.jl b/src/univariate/continuous/weibull.jl index 3542350f9f..c07a901245 100644 --- a/src/univariate/continuous/weibull.jl +++ b/src/univariate/continuous/weibull.jl @@ -27,19 +27,21 @@ struct Weibull{T<:Real} <: ContinuousUnivariateDistribution α::T # shape θ::T # scale - function Weibull{T}(α::T, θ::T) where {T <: Real} + function Weibull{T}(α::T, θ::T) where {T<:Real} new{T}(α, θ) end end -function Weibull(α::T, θ::T; check_args::Bool=true) where {T <: Real} +function Weibull(α::T, θ::T; check_args::Bool = true) where {T<:Real} @check_args Weibull (α, α > zero(α)) (θ, θ > zero(θ)) return Weibull{T}(α, θ) end -Weibull(α::Real, θ::Real; check_args::Bool=true) = Weibull(promote(α, θ)...; check_args=check_args) -Weibull(α::Integer, θ::Integer; check_args::Bool=true) = Weibull(float(α), float(θ); check_args=check_args) -Weibull(α::Real=1.0) = Weibull(α, one(α); check_args=false) +Weibull(α::Real, θ::Real; check_args::Bool = true) = + Weibull(promote(α, θ)...; check_args = check_args) +Weibull(α::Integer, θ::Integer; check_args::Bool = true) = + Weibull(float(α), float(θ); check_args = check_args) +Weibull(α::Real = 1.0) = Weibull(α, one(α); check_args = false) @distr_support Weibull 0.0 Inf @@ -60,18 +62,18 @@ partype(::Weibull{T}) where {T<:Real} = T #### Statistics -mean(d::Weibull) = d.θ * gamma(1 + 1/d.α) -median(d::Weibull) = d.θ * logtwo ^ (1/d.α) +mean(d::Weibull) = d.θ * gamma(1 + 1 / d.α) +median(d::Weibull) = d.θ * logtwo^(1 / d.α) mode(d::Weibull{T}) where {T<:Real} = d.α > 1 ? (iα = 1 / d.α; d.θ * (1 - iα)^iα) : zero(T) -var(d::Weibull) = d.θ^2 * gamma(1 + 2/d.α) - mean(d)^2 +var(d::Weibull) = d.θ^2 * gamma(1 + 2 / d.α) - mean(d)^2 function skewness(d::Weibull) μ = mean(d) σ2 = var(d) σ = sqrt(σ2) r = μ / σ - gamma(1 + 3/d.α) * (d.θ/σ)^3 - 3r - r^3 + gamma(1 + 3 / d.α) * (d.θ / σ)^3 - 3r - r^3 end function kurtosis(d::Weibull) @@ -82,12 +84,12 @@ function kurtosis(d::Weibull) r = μ / σ r2 = r^2 r4 = r2^2 - (θ/σ)^4 * gamma(1 + 4/α) - 4γ*r - 6r2 - r4 - 3 + (θ / σ)^4 * gamma(1 + 4 / α) - 4γ * r - 6r2 - r4 - 3 end function entropy(d::Weibull) α, θ = params(d) - 0.5772156649015328606 * (1 - 1/α) + log(θ/α) + 1 + 0.5772156649015328606 * (1 - 1 / α) + log(θ / α) + 1 end @@ -107,20 +109,20 @@ function logpdf(d::Weibull, x::Real) x < 0 || isinf(x) ? oftype(res, -Inf) : res end -zval(d::Weibull, x::Real) = (max(x, 0) / d.θ) ^ d.α -xval(d::Weibull, z::Real) = d.θ * z ^ (1 / d.α) +zval(d::Weibull, x::Real) = (max(x, 0) / d.θ)^d.α +xval(d::Weibull, z::Real) = d.θ * z^(1 / d.α) -cdf(d::Weibull, x::Real) = -expm1(- zval(d, x)) -ccdf(d::Weibull, x::Real) = exp(- zval(d, x)) -logcdf(d::Weibull, x::Real) = log1mexp(- zval(d, x)) -logccdf(d::Weibull, x::Real) = - zval(d, x) +cdf(d::Weibull, x::Real) = -expm1(-zval(d, x)) +ccdf(d::Weibull, x::Real) = exp(-zval(d, x)) +logcdf(d::Weibull, x::Real) = log1mexp(-zval(d, x)) +logccdf(d::Weibull, x::Real) = -zval(d, x) quantile(d::Weibull, p::Real) = xval(d, -log1p(-p)) cquantile(d::Weibull, p::Real) = xval(d, -log(p)) invlogcdf(d::Weibull, lp::Real) = xval(d, -log1mexp(lp)) invlogccdf(d::Weibull, lp::Real) = xval(d, -lp) -function gradlogpdf(d::Weibull{T}, x::Real) where T<:Real +function gradlogpdf(d::Weibull{T}, x::Real) where {T<:Real} if insupport(Weibull, x) α, θ = params(d) (α - 1) / x - α * x^(α - 1) / (θ^α) @@ -142,18 +144,23 @@ rand(rng::AbstractRNG, d::Weibull) = xval(d, randexp(rng)) Compute the maximum likelihood estimate of the [`Weibull`](@ref) distribution with Newton's method. """ -function fit_mle(::Type{<:Weibull}, x::AbstractArray{<:Real}; - alpha0::Real = 1, maxiter::Int = 1000, tol::Real = 1e-16) +function fit_mle( + ::Type{<:Weibull}, + x::AbstractArray{<:Real}; + alpha0::Real = 1, + maxiter::Int = 1000, + tol::Real = 1e-16, +) N = 0 lnx = map(log, x) - lnxsq = lnx.^2 + lnxsq = lnx .^ 2 mean_lnx = mean(lnx) # first iteration outside loop, prevents type instability in α, ϵ - xpow0 = x.^alpha0 + xpow0 = x .^ alpha0 sum_xpow0 = sum(xpow0) dot_xpowlnx0 = dot(xpow0, lnx) @@ -168,7 +175,7 @@ function fit_mle(::Type{<:Weibull}, x::AbstractArray{<:Real}; while ϵ > tol && N < maxiter - xpow = x.^α + xpow = x .^ α sum_xpow = sum(xpow) dot_xpowlnx = dot(xpow, lnx) @@ -182,6 +189,6 @@ function fit_mle(::Type{<:Weibull}, x::AbstractArray{<:Real}; N += 1 end - θ = mean(x.^α)^(1 / α) + θ = mean(x .^ α)^(1 / α) return Weibull(α, θ) end diff --git a/src/univariate/discrete/bernoulli.jl b/src/univariate/discrete/bernoulli.jl index c1dee968ce..0559136fe2 100644 --- a/src/univariate/discrete/bernoulli.jl +++ b/src/univariate/discrete/bernoulli.jl @@ -27,15 +27,16 @@ External links: struct Bernoulli{T<:Real} <: DiscreteUnivariateDistribution p::T - Bernoulli{T}(p::Real) where {T <: Real} = new{T}(p) + Bernoulli{T}(p::Real) where {T<:Real} = new{T}(p) end -function Bernoulli(p::Real; check_args::Bool=true) +function Bernoulli(p::Real; check_args::Bool = true) @check_args Bernoulli (p, zero(p) <= p <= one(p)) return Bernoulli{typeof(p)}(p) end -Bernoulli(p::Integer; check_args::Bool=true) = Bernoulli(float(p); check_args=check_args) +Bernoulli(p::Integer; check_args::Bool = true) = + Bernoulli(float(p); check_args = check_args) Bernoulli() = Bernoulli{Float64}(0.5) @distr_support Bernoulli false true @@ -59,20 +60,19 @@ partype(::Bernoulli{T}) where {T} = T #### Properties mean(d::Bernoulli) = succprob(d) -var(d::Bernoulli) = succprob(d) * failprob(d) +var(d::Bernoulli) = succprob(d) * failprob(d) skewness(d::Bernoulli) = (p0 = failprob(d); p1 = succprob(d); (p0 - p1) / sqrt(p0 * p1)) kurtosis(d::Bernoulli) = 1 / var(d) - 6 -mode(d::Bernoulli) = ifelse(succprob(d) > 1/2, 1, 0) +mode(d::Bernoulli) = ifelse(succprob(d) > 1 / 2, 1, 0) function modes(d::Bernoulli) p = succprob(d) - p < 1/2 ? [0] : - p > 1/2 ? [1] : [0, 1] + p < 1 / 2 ? [0] : p > 1 / 2 ? [1] : [0, 1] end -median(d::Bernoulli) = ifelse(succprob(d) <= 1/2, 0, 1) +median(d::Bernoulli) = ifelse(succprob(d) <= 1 / 2, 0, 1) function entropy(d::Bernoulli) p0 = failprob(d) @@ -83,23 +83,20 @@ end #### Evaluation pdf(d::Bernoulli, x::Bool) = x ? succprob(d) : failprob(d) -pdf(d::Bernoulli, x::Real) = x == 0 ? failprob(d) : - x == 1 ? succprob(d) : zero(d.p) +pdf(d::Bernoulli, x::Real) = x == 0 ? failprob(d) : x == 1 ? succprob(d) : zero(d.p) logpdf(d::Bernoulli, x::Real) = log(pdf(d, x)) cdf(d::Bernoulli, x::Bool) = x ? one(d.p) : failprob(d) -cdf(d::Bernoulli, x::Int) = x < 0 ? zero(d.p) : - x < 1 ? failprob(d) : one(d.p) +cdf(d::Bernoulli, x::Int) = x < 0 ? zero(d.p) : x < 1 ? failprob(d) : one(d.p) ccdf(d::Bernoulli, x::Bool) = x ? zero(d.p) : succprob(d) -ccdf(d::Bernoulli, x::Int) = x < 0 ? one(d.p) : - x < 1 ? succprob(d) : zero(d.p) +ccdf(d::Bernoulli, x::Int) = x < 0 ? one(d.p) : x < 1 ? succprob(d) : zero(d.p) -function quantile(d::Bernoulli{T}, p::Real) where T<:Real +function quantile(d::Bernoulli{T}, p::Real) where {T<:Real} 0 <= p <= 1 ? (p <= failprob(d) ? zero(T) : one(T)) : T(NaN) end -function cquantile(d::Bernoulli{T}, p::Real) where T<:Real +function cquantile(d::Bernoulli{T}, p::Real) where {T<:Real} 0 <= p <= 1 ? (p >= succprob(d) ? zero(T) : one(T)) : T(NaN) end @@ -107,7 +104,7 @@ mgf(d::Bernoulli, t::Real) = failprob(d) + succprob(d) * exp(t) function cgf(d::Bernoulli, t) p, = params(d) # log(1-p+p*exp(t)) - logaddexp(log1p(-p), t+log(p)) + logaddexp(log1p(-p), t + log(p)) end cf(d::Bernoulli, t::Real) = failprob(d) + succprob(d) * cis(t) @@ -125,7 +122,8 @@ end BernoulliStats(c0::Real, c1::Real) = BernoulliStats(promote(c0, c1)...) -fit_mle(::Type{T}, ss::BernoulliStats) where {T<:Bernoulli} = T(ss.cnt1 / (ss.cnt0 + ss.cnt1)) +fit_mle(::Type{T}, ss::BernoulliStats) where {T<:Bernoulli} = + T(ss.cnt1 / (ss.cnt0 + ss.cnt1)) function suffstats(::Type{<:Bernoulli}, x::AbstractArray{<:Integer}) c0 = c1 = 0 @@ -141,7 +139,11 @@ function suffstats(::Type{<:Bernoulli}, x::AbstractArray{<:Integer}) BernoulliStats(c0, c1) end -function suffstats(::Type{<:Bernoulli}, x::AbstractArray{<:Integer}, w::AbstractArray{<:Real}) +function suffstats( + ::Type{<:Bernoulli}, + x::AbstractArray{<:Integer}, + w::AbstractArray{<:Real}, +) length(x) == length(w) || throw(DimensionMismatch("inconsistent argument dimensions")) z = zero(eltype(w)) c0 = c1 = z + z # possibly widened and different from `z`, e.g., if `z = true` diff --git a/src/univariate/discrete/bernoullilogit.jl b/src/univariate/discrete/bernoullilogit.jl index f82059fed9..37ec5de1df 100644 --- a/src/univariate/discrete/bernoullilogit.jl +++ b/src/univariate/discrete/bernoullilogit.jl @@ -27,7 +27,8 @@ BernoulliLogit() = BernoulliLogit(0.0) Base.eltype(::Type{<:BernoulliLogit}) = Bool #### Conversions -Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit) where {T<:Real} = BernoulliLogit{T}(T(d.logitp)) +Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit) where {T<:Real} = + BernoulliLogit{T}(T(d.logitp)) Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit{T}) where {T<:Real} = d #### Parameters @@ -43,7 +44,7 @@ partype(::BernoulliLogit{T}) where {T} = T #### Properties mean(d::BernoulliLogit) = succprob(d) -var(d::BernoulliLogit) = succprob(d) * failprob(d) +var(d::BernoulliLogit) = succprob(d) * failprob(d) function skewness(d::BernoulliLogit) p0 = failprob(d) p1 = succprob(d) @@ -63,28 +64,38 @@ median(d::BernoulliLogit) = d.logitp > 0 function entropy(d::BernoulliLogit) logitp = d.logitp - (logitp == -Inf || logitp == Inf) ? float(zero(logitp)) : (logitp > 0 ? -(succprob(d) * logitp + logfailprob(d)) : -(logsuccprob(d) - failprob(d) * logitp)) + (logitp == -Inf || logitp == Inf) ? float(zero(logitp)) : + ( + logitp > 0 ? -(succprob(d) * logitp + logfailprob(d)) : + -(logsuccprob(d) - failprob(d) * logitp) + ) end #### Evaluation pdf(d::BernoulliLogit, x::Bool) = x ? succprob(d) : failprob(d) -pdf(d::BernoulliLogit, x::Real) = x == 0 ? failprob(d) : (x == 1 ? succprob(d) : zero(float(d.logitp))) +pdf(d::BernoulliLogit, x::Real) = + x == 0 ? failprob(d) : (x == 1 ? succprob(d) : zero(float(d.logitp))) logpdf(d::BernoulliLogit, x::Bool) = x ? logsuccprob(d) : logfailprob(d) -logpdf(d::BernoulliLogit, x::Real) = x == 0 ? logfailprob(d) : (x == 1 ? logsuccprob(d) : oftype(float(d.logitp), -Inf)) +logpdf(d::BernoulliLogit, x::Real) = + x == 0 ? logfailprob(d) : (x == 1 ? logsuccprob(d) : oftype(float(d.logitp), -Inf)) cdf(d::BernoulliLogit, x::Bool) = x ? one(float(d.logitp)) : failprob(d) -cdf(d::BernoulliLogit, x::Int) = x < 0 ? zero(float(d.logitp)) : (x < 1 ? failprob(d) : one(float(d.logitp))) +cdf(d::BernoulliLogit, x::Int) = + x < 0 ? zero(float(d.logitp)) : (x < 1 ? failprob(d) : one(float(d.logitp))) logcdf(d::BernoulliLogit, x::Bool) = x ? zero(float(d.logitp)) : logfailprob(d) -logcdf(d::BernoulliLogit, x::Int) = x < 0 ? oftype(float(d.logitp), -Inf) : (x < 1 ? logfailprob(d) : zero(float(d.logitp))) +logcdf(d::BernoulliLogit, x::Int) = + x < 0 ? oftype(float(d.logitp), -Inf) : (x < 1 ? logfailprob(d) : zero(float(d.logitp))) ccdf(d::BernoulliLogit, x::Bool) = x ? zero(float(d.logitp)) : succprob(d) -ccdf(d::BernoulliLogit, x::Int) = x < 0 ? one(float(d.logitp)) : (x < 1 ? succprob(d) : zero(float(d.logitp))) +ccdf(d::BernoulliLogit, x::Int) = + x < 0 ? one(float(d.logitp)) : (x < 1 ? succprob(d) : zero(float(d.logitp))) logccdf(d::BernoulliLogit, x::Bool) = x ? oftype(float(d.logitp), -Inf) : logsuccprob(d) -logccdf(d::BernoulliLogit, x::Int) = x < 0 ? zero(float(d.logitp)) : (x < 1 ? logsuccprob(d) : oftype(float(d.logitp), -Inf)) +logccdf(d::BernoulliLogit, x::Int) = + x < 0 ? zero(float(d.logitp)) : (x < 1 ? logsuccprob(d) : oftype(float(d.logitp), -Inf)) function quantile(d::BernoulliLogit, p::Real) T = float(partype(d)) diff --git a/src/univariate/discrete/betabinomial.jl b/src/univariate/discrete/betabinomial.jl index 7f2a0ac0dc..1a42b1eec4 100644 --- a/src/univariate/discrete/betabinomial.jl +++ b/src/univariate/discrete/betabinomial.jl @@ -23,21 +23,23 @@ struct BetaBinomial{T<:Real} <: DiscreteUnivariateDistribution α::T β::T - BetaBinomial{T}(n::Integer, α::T, β::T) where {T <: Real} = new{T}(n, α, β) + BetaBinomial{T}(n::Integer, α::T, β::T) where {T<:Real} = new{T}(n, α, β) end -function BetaBinomial(n::Integer, α::T, β::T; check_args::Bool=true) where {T <: Real} +function BetaBinomial(n::Integer, α::T, β::T; check_args::Bool = true) where {T<:Real} @check_args BetaBinomial (n, n >= zero(n)) (α, α > zero(α)) (β, β > zero(β)) return BetaBinomial{T}(n, α, β) end -BetaBinomial(n::Integer, α::Real, β::Real; check_args::Bool=true) = BetaBinomial(n, promote(α, β)...; check_args=check_args) -BetaBinomial(n::Integer, α::Integer, β::Integer; check_args::Bool=true) = BetaBinomial(n, float(α), float(β); check_args=check_args) +BetaBinomial(n::Integer, α::Real, β::Real; check_args::Bool = true) = + BetaBinomial(n, promote(α, β)...; check_args = check_args) +BetaBinomial(n::Integer, α::Integer, β::Integer; check_args::Bool = true) = + BetaBinomial(n, float(α), float(β); check_args = check_args) @distr_support BetaBinomial 0 d.n #### Conversions -function convert(::Type{BetaBinomial{T}}, n::Int, α::S, β::S) where {T <: Real, S <: Real} +function convert(::Type{BetaBinomial{T}}, n::Int, α::S, β::S) where {T<:Real,S<:Real} BetaBinomial(n, T(α), T(β)) end function Base.convert(::Type{BetaBinomial{T}}, d::BetaBinomial) where {T<:Real} @@ -65,7 +67,7 @@ end function skewness(d::BetaBinomial) n, α, β = d.n, d.α, d.β t1 = (α + β + 2n) * (β - α) / (α + β + 2) - t2 = sqrt((1 + α +β) / (n * α * β * (n + α + β))) + t2 = sqrt((1 + α + β) / (n * α * β * (n + α + β))) return t1 * t2 end @@ -74,11 +76,18 @@ function kurtosis(d::BetaBinomial) alpha_beta_sum = α + β alpha_beta_product = α * β numerator = ((alpha_beta_sum)^2) * (1 + alpha_beta_sum) - denominator = (n * alpha_beta_product) * (alpha_beta_sum + 2) * (alpha_beta_sum + 3) * (alpha_beta_sum + n) + denominator = + (n * alpha_beta_product) * + (alpha_beta_sum + 2) * + (alpha_beta_sum + 3) * + (alpha_beta_sum + n) left = numerator / denominator - right = (alpha_beta_sum) * (alpha_beta_sum - 1 + 6n) + 3*alpha_beta_product * (n - 2) + 6n^2 - right -= (3*alpha_beta_product * n * (6 - n)) / alpha_beta_sum - right -= (18*alpha_beta_product * n^2) / (alpha_beta_sum)^2 + right = + (alpha_beta_sum) * (alpha_beta_sum - 1 + 6n) + + 3 * alpha_beta_product * (n - 2) + + 6n^2 + right -= (3 * alpha_beta_product * n * (6 - n)) / alpha_beta_sum + right -= (18 * alpha_beta_product * n^2) / (alpha_beta_sum)^2 return (left * right) - 3 end @@ -86,8 +95,8 @@ function logpdf(d::BetaBinomial, k::Real) n, α, β = d.n, d.α, d.β _insupport = insupport(d, k) _k = _insupport ? round(Int, k) : 0 - logbinom = - log1p(n) - logbeta(_k + 1, n - _k + 1) - lognum = logbeta(_k + α, n - _k + β) + logbinom = -log1p(n) - logbeta(_k + 1, n - _k + 1) + lognum = logbeta(_k + α, n - _k + β) logdenom = logbeta(α, β) result = logbinom + lognum - logdenom return _insupport ? result : oftype(result, -Inf) diff --git a/src/univariate/discrete/binomial.jl b/src/univariate/discrete/binomial.jl index 74c90ee18c..f15174543c 100644 --- a/src/univariate/discrete/binomial.jl +++ b/src/univariate/discrete/binomial.jl @@ -26,16 +26,17 @@ struct Binomial{T<:Real} <: DiscreteUnivariateDistribution n::Int p::T - Binomial{T}(n, p) where {T <: Real} = new{T}(n, p) + Binomial{T}(n, p) where {T<:Real} = new{T}(n, p) end -function Binomial(n::Integer, p::Real; check_args::Bool=true) +function Binomial(n::Integer, p::Real; check_args::Bool = true) @check_args Binomial (n, n >= zero(n)) (p, zero(p) <= p <= one(p)) return Binomial{typeof(p)}(n, p) end -Binomial(n::Integer, p::Integer; check_args::Bool=true) = Binomial(n, float(p); check_args=check_args) -function Binomial(n::Integer; check_args::Bool=true) +Binomial(n::Integer, p::Integer; check_args::Bool = true) = + Binomial(n, float(p); check_args = check_args) +function Binomial(n::Integer; check_args::Bool = true) @check_args Binomial (n, n >= zero(n)) Binomial{Float64}(n, 0.5) end @@ -45,7 +46,7 @@ Binomial() = Binomial{Float64}(1, 0.5) #### Conversions -function convert(::Type{Binomial{T}}, n::Int, p::Real) where T<:Real +function convert(::Type{Binomial{T}}, n::Int, p::Real) where {T<:Real} return Binomial(n, T(p)) end function Base.convert(::Type{Binomial{T}}, d::Binomial) where {T<:Real} @@ -67,7 +68,7 @@ params(d::Binomial) = (d.n, d.p) mean(d::Binomial) = ntrials(d) * succprob(d) var(d::Binomial) = ntrials(d) * succprob(d) * failprob(d) -function mode(d::Binomial{T}) where T<:Real +function mode(d::Binomial{T}) where {T<:Real} (n, p) = params(d) n > 0 ? floor(Int, (n + 1) * d.p) : zero(T) end @@ -81,12 +82,12 @@ function median(dist::Binomial) # Thus if |k - mean| < bound for one of the two candidates if p = 1/2 and n odd # or |k - mean| <= bound for one of the two candidates otherwise, # the other candidate can't satisfy the condition and hence k must be the median - bound = max(min(dist.p, 1-dist.p), loghalf) + bound = max(min(dist.p, 1 - dist.p), loghalf) dist_mean = mean(dist) - + floor_mean = floor(Int, dist_mean) difference = dist_mean - floor_mean - + if difference <= bound # The only case where the median satisfies |median - mean| <= 1 - bound with equality # is p = 1/2 and n odd @@ -116,7 +117,7 @@ function kurtosis(d::Binomial) (1 - 6u) / (n * u) end -function entropy(d::Binomial; approx::Bool=false) +function entropy(d::Binomial; approx::Bool = false) n, p1 = params(d) (p1 == 0 || p1 == 1 || n == 0) && return zero(p1) p0 = 1 - p1 @@ -127,8 +128,8 @@ function entropy(d::Binomial; approx::Bool=false) lp = n * log(p0) s = exp(lp) * lp for k = 1:n - lp += log((n - k + 1) / k) + lg - s += exp(lp) * lp + lp += log((n - k + 1) / k) + lg + s += exp(lp) * lp end return -s end @@ -139,7 +140,8 @@ function kldivergence(p::Binomial, q::Binomial; kwargs...) nq = ntrials(q) succp = succprob(p) succq = succprob(q) - res = np * kldivergence(Bernoulli{typeof(succp)}(succp), Bernoulli{typeof(succq)}(succq)) + res = + np * kldivergence(Bernoulli{typeof(succp)}(succp), Bernoulli{typeof(succq)}(succq)) if np == nq iszero(np) && return zero(res) return res @@ -162,19 +164,19 @@ function rand(rng::AbstractRNG, d::Binomial) if p <= 0.5 r = p else - r = 1.0-p + r = 1.0 - p end - if r*n <= 10.0 - y = rand(rng, BinomialGeomSampler(n,r)) + if r * n <= 10.0 + y = rand(rng, BinomialGeomSampler(n, r)) else - y = rand(rng, BinomialTPESampler(n,r)) + y = rand(rng, BinomialTPESampler(n, r)) end - p <= 0.5 ? y : n-y + p <= 0.5 ? y : n - y end function mgf(d::Binomial, t::Real) n, p = params(d) - (one(p) - p + p * exp(t)) ^ n + (one(p) - p + p * exp(t))^n end function cgf(d::Binomial, t) n, p = params(d) @@ -183,7 +185,7 @@ end function cf(d::Binomial, t::Real) n, p = params(d) - (one(p) - p + p * cis(t)) ^ n + (one(p) - p + p * cis(t))^n end @@ -207,7 +209,12 @@ function suffstats(::Type{<:Binomial}, n::Integer, x::AbstractArray{<:Integer}) BinomialStats(ns, length(x), n) end -function suffstats(::Type{<:Binomial}, n::Integer, x::AbstractArray{<:Integer}, w::AbstractArray{<:Real}) +function suffstats( + ::Type{<:Binomial}, + n::Integer, + x::AbstractArray{<:Integer}, + w::AbstractArray{<:Real}, +) z = zero(eltype(x)) * zero(eltype(w)) ns = ne = z + z # possibly widened and different from `z`, e.g., if `z = true` for (xi, wi) in zip(x, w) @@ -218,17 +225,26 @@ function suffstats(::Type{<:Binomial}, n::Integer, x::AbstractArray{<:Integer}, BinomialStats(ns, ne, n) end -const BinomData = Tuple{Int, AbstractArray} +const BinomData = Tuple{Int,AbstractArray} suffstats(::Type{T}, data::BinomData) where {T<:Binomial} = suffstats(T, data...) -suffstats(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = suffstats(T, data..., w) +suffstats(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = + suffstats(T, data..., w) fit_mle(::Type{T}, ss::BinomialStats) where {T<:Binomial} = T(ss.n, ss.ns / (ss.ne * ss.n)) -fit_mle(::Type{T}, n::Integer, x::AbstractArray{<:Integer}) where {T<:Binomial}= fit_mle(T, suffstats(T, n, x)) -fit_mle(::Type{T}, n::Integer, x::AbstractArray{<:Integer}, w::AbstractArray{<:Real}) where {T<:Binomial} = fit_mle(T, suffstats(T, n, x, w)) +fit_mle(::Type{T}, n::Integer, x::AbstractArray{<:Integer}) where {T<:Binomial} = + fit_mle(T, suffstats(T, n, x)) +fit_mle( + ::Type{T}, + n::Integer, + x::AbstractArray{<:Integer}, + w::AbstractArray{<:Real}, +) where {T<:Binomial} = fit_mle(T, suffstats(T, n, x, w)) fit_mle(::Type{T}, data::BinomData) where {T<:Binomial} = fit_mle(T, suffstats(T, data)) -fit_mle(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = fit_mle(T, suffstats(T, data, w)) +fit_mle(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = + fit_mle(T, suffstats(T, data, w)) fit(::Type{T}, data::BinomData) where {T<:Binomial} = fit_mle(T, data) -fit(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = fit_mle(T, data, w) +fit(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = + fit_mle(T, data, w) diff --git a/src/univariate/discrete/categorical.jl b/src/univariate/discrete/categorical.jl index 1ac338f6f0..e6cf29e772 100644 --- a/src/univariate/discrete/categorical.jl +++ b/src/univariate/discrete/categorical.jl @@ -24,32 +24,43 @@ External links: * [Categorical distribution on Wikipedia](http://en.wikipedia.org/wiki/Categorical_distribution) """ -const Categorical{P<:Real,Ps<:AbstractVector{P}} = DiscreteNonParametric{Int,P,Base.OneTo{Int},Ps} +const Categorical{P<:Real,Ps<:AbstractVector{P}} = + DiscreteNonParametric{Int,P,Base.OneTo{Int},Ps} -function Categorical{P,Ps}(p::Ps; check_args::Bool=true) where {P<:Real, Ps<:AbstractVector{P}} +function Categorical{P,Ps}( + p::Ps; + check_args::Bool = true, +) where {P<:Real,Ps<:AbstractVector{P}} @check_args Categorical (p, isprobvec(p), "vector p is not a probability vector") - return Categorical{P,Ps}(Base.OneTo(length(p)), p; check_args=check_args) + return Categorical{P,Ps}(Base.OneTo(length(p)), p; check_args = check_args) end -Categorical(p::AbstractVector{P}; check_args::Bool=true) where {P<:Real} = - Categorical{P,typeof(p)}(p; check_args=check_args) +Categorical(p::AbstractVector{P}; check_args::Bool = true) where {P<:Real} = + Categorical{P,typeof(p)}(p; check_args = check_args) -function Categorical(k::Integer; check_args::Bool=true) +function Categorical(k::Integer; check_args::Bool = true) @check_args Categorical (k, k >= 1, "at least one category is required") - return Categorical{Float64,Vector{Float64}}(Base.OneTo(k), fill(1/k, k); check_args=false) + return Categorical{Float64,Vector{Float64}}( + Base.OneTo(k), + fill(1 / k, k); + check_args = false, + ) end -Categorical(probabilities::Real...; check_args::Bool=true) = Categorical([probabilities...]; check_args=check_args) +Categorical(probabilities::Real...; check_args::Bool = true) = + Categorical([probabilities...]; check_args = check_args) ### Conversions -convert(::Type{Categorical{P,Ps}}, x::AbstractVector{<:Real}) where { - P<:Real,Ps<:AbstractVector{P}} = Categorical{P,Ps}(Ps(x)) +convert( + ::Type{Categorical{P,Ps}}, + x::AbstractVector{<:Real}, +) where {P<:Real,Ps<:AbstractVector{P}} = Categorical{P,Ps}(Ps(x)) ### Parameters ncategories(d::Categorical) = support(d).stop -params(d::Categorical{P,Ps}) where {P<:Real, Ps<:AbstractVector{P}} = (probs(d),) +params(d::Categorical{P,Ps}) where {P<:Real,Ps<:AbstractVector{P}} = (probs(d),) partype(::Categorical{T}) where {T<:Real} = T function Base.isapprox(c1::Categorical, c2::Categorical; kwargs...) @@ -58,7 +69,7 @@ function Base.isapprox(c1::Categorical, c2::Categorical; kwargs...) # we explicitly redefine the method for `DiscreteNonParametric` which also compares # the support since `isapprox(::OneTo, ::OneTo)` is broken on Julia 1.6 (issue #1675) return length(support(c1)) == length(support(c2)) && - isapprox(probs(c1), probs(c2); kwargs...) + isapprox(probs(c1), probs(c2); kwargs...) end ### Statistics @@ -68,7 +79,7 @@ function median(d::Categorical{T}) where {T<:Real} p = probs(d) cp = zero(T) i = 0 - while cp < 1/2 && i <= k + while cp < 1 / 2 && i <= k i += 1 @inbounds cp += p[i] end @@ -96,16 +107,16 @@ function _pdf!(r::AbstractArray{<:Real}, d::Categorical{T}, rgn::UnitRange) wher vr = min(vlast, ncategories(d)) p = probs(d) if vl > vfirst - for i = 1:(vl - vfirst) + for i = 1:(vl-vfirst) r[i] = zero(T) end end fm1 = vfirst - 1 for v = vl:vr - r[v - fm1] = p[v] + r[v-fm1] = p[v] end if vr < vlast - for i = (vr - vfirst + 2):length(rgn) + for i = (vr-vfirst+2):length(rgn) r[i] = zero(T) end end @@ -115,8 +126,7 @@ end # sampling -sampler(d::Categorical{P,Ps}) where {P<:Real,Ps<:AbstractVector{P}} = - AliasTable(probs(d)) +sampler(d::Categorical{P,Ps}) where {P<:Real,Ps<:AbstractVector{P}} = AliasTable(probs(d)) ### sufficient statistics @@ -125,20 +135,24 @@ struct CategoricalStats <: SufficientStats h::Vector{Float64} end -function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}) where T<:Integer - for i = 1 : length(x) +function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}) where {T<:Integer} + for i = 1:length(x) @inbounds xi = x[i] - h[xi] += 1. # cannot use @inbounds, as no guarantee that x[i] is in bound + h[xi] += 1.0 # cannot use @inbounds, as no guarantee that x[i] is in bound end h end -function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer +function add_categorical_counts!( + h::Vector{Float64}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} n = length(x) if n != length(w) throw(DimensionMismatch("Inconsistent array lengths.")) end - for i = 1 : n + for i = 1:n @inbounds xi = x[i] @inbounds wi = w[i] h[xi] += wi # cannot use @inbounds, as no guarantee that x[i] is in bound @@ -146,18 +160,24 @@ function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}, w::Abs h end -function suffstats(::Type{<:Categorical}, k::Int, x::AbstractArray{T}) where T<:Integer +function suffstats(::Type{<:Categorical}, k::Int, x::AbstractArray{T}) where {T<:Integer} CategoricalStats(add_categorical_counts!(zeros(k), x)) end -function suffstats(::Type{<:Categorical}, k::Int, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer +function suffstats( + ::Type{<:Categorical}, + k::Int, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} CategoricalStats(add_categorical_counts!(zeros(k), x, w)) end -const CategoricalData = Tuple{Int, AbstractArray} +const CategoricalData = Tuple{Int,AbstractArray} suffstats(::Type{<:Categorical}, data::CategoricalData) = suffstats(Categorical, data...) -suffstats(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = suffstats(Categorical, data..., w) +suffstats(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = + suffstats(Categorical, data..., w) # Model fitting @@ -165,19 +185,31 @@ function fit_mle(::Type{<:Categorical}, ss::CategoricalStats) Categorical(normalize!(ss.h, 1)) end -function fit_mle(::Type{<:Categorical}, k::Integer, x::AbstractArray{T}) where T<:Integer - Categorical(normalize!(add_categorical_counts!(zeros(k), x), 1), check_args=false) +function fit_mle(::Type{<:Categorical}, k::Integer, x::AbstractArray{T}) where {T<:Integer} + Categorical(normalize!(add_categorical_counts!(zeros(k), x), 1), check_args = false) end -function fit_mle(::Type{<:Categorical}, k::Integer, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer - Categorical(normalize!(add_categorical_counts!(zeros(k), x, w), 1), check_args=false) +function fit_mle( + ::Type{<:Categorical}, + k::Integer, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} + Categorical(normalize!(add_categorical_counts!(zeros(k), x, w), 1), check_args = false) end fit_mle(::Type{<:Categorical}, data::CategoricalData) = fit_mle(Categorical, data...) -fit_mle(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = fit_mle(Categorical, data..., w) +fit_mle(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = + fit_mle(Categorical, data..., w) -fit_mle(::Type{<:Categorical}, x::AbstractArray{T}) where {T<:Integer} = fit_mle(Categorical, maximum(x), x) -fit_mle(::Type{<:Categorical}, x::AbstractArray{T}, w::AbstractArray{Float64}) where {T<:Integer} = fit_mle(Categorical, maximum(x), x, w) +fit_mle(::Type{<:Categorical}, x::AbstractArray{T}) where {T<:Integer} = + fit_mle(Categorical, maximum(x), x) +fit_mle( + ::Type{<:Categorical}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} = fit_mle(Categorical, maximum(x), x, w) fit(::Type{<:Categorical}, data::CategoricalData) = fit_mle(Categorical, data) -fit(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = fit_mle(Categorical, data, w) +fit(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = + fit_mle(Categorical, data, w) diff --git a/src/univariate/discrete/dirac.jl b/src/univariate/discrete/dirac.jl index 94d082b0fa..222dda35f4 100644 --- a/src/univariate/discrete/dirac.jl +++ b/src/univariate/discrete/dirac.jl @@ -50,7 +50,7 @@ logccdf(d::Dirac, x::Real) = x < d.value ? 0.0 : isnan(x) ? NaN : -Inf quantile(d::Dirac{T}, p::Real) where {T} = 0 <= p <= 1 ? d.value : T(NaN) mgf(d::Dirac, t) = exp(t * d.value) -cgf(d::Dirac, t) = t*d.value +cgf(d::Dirac, t) = t * d.value cf(d::Dirac, t) = cis(t * d.value) #### Sampling diff --git a/src/univariate/discrete/discretenonparametric.jl b/src/univariate/discrete/discretenonparametric.jl index 8e1eefab6e..dcd8ee7ca1 100644 --- a/src/univariate/discrete/discretenonparametric.jl +++ b/src/univariate/discrete/discretenonparametric.jl @@ -17,16 +17,23 @@ External links * [Probability mass function on Wikipedia](http://en.wikipedia.org/wiki/Probability_mass_function) """ -struct DiscreteNonParametric{T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} <: DiscreteUnivariateDistribution +struct DiscreteNonParametric{T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} <: + DiscreteUnivariateDistribution support::Ts p::Ps - function DiscreteNonParametric{T,P,Ts,Ps}(xs::Ts, ps::Ps; check_args::Bool=true) where { - T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} + function DiscreteNonParametric{T,P,Ts,Ps}( + xs::Ts, + ps::Ps; + check_args::Bool = true, + ) where {T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} check_args || return new{T,P,Ts,Ps}(xs, ps) @check_args( DiscreteNonParametric, - (length(xs) == length(ps), "length of support and probability vector must be equal"), + ( + length(xs) == length(ps), + "length of support and probability vector must be equal", + ), (ps, isprobvec(ps), "vector is not a probability vector"), (xs, allunique(xs), "support must contain only unique elements"), ) @@ -35,16 +42,28 @@ struct DiscreteNonParametric{T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractV end end -DiscreteNonParametric(vs::AbstractVector{T}, ps::AbstractVector{P}; check_args::Bool=true) where { - T<:Real,P<:Real} = - DiscreteNonParametric{T,P,typeof(vs),typeof(ps)}(vs, ps; check_args=check_args) +DiscreteNonParametric( + vs::AbstractVector{T}, + ps::AbstractVector{P}; + check_args::Bool = true, +) where {T<:Real,P<:Real} = + DiscreteNonParametric{T,P,typeof(vs),typeof(ps)}(vs, ps; check_args = check_args) -Base.eltype(::Type{<:DiscreteNonParametric{T}}) where T = T +Base.eltype(::Type{<:DiscreteNonParametric{T}}) where {T} = T # Conversion -convert(::Type{DiscreteNonParametric{T,P,Ts,Ps}}, d::DiscreteNonParametric) where {T,P,Ts,Ps} = - DiscreteNonParametric{T,P,Ts,Ps}(convert(Ts, support(d)), convert(Ps, probs(d)), check_args=false) -Base.convert(::Type{DiscreteNonParametric{T,P,Ts,Ps}}, d::DiscreteNonParametric{T,P,Ts,Ps}) where {T,P,Ts,Ps} = d +convert( + ::Type{DiscreteNonParametric{T,P,Ts,Ps}}, + d::DiscreteNonParametric, +) where {T,P,Ts,Ps} = DiscreteNonParametric{T,P,Ts,Ps}( + convert(Ts, support(d)), + convert(Ps, probs(d)), + check_args = false, +) +Base.convert( + ::Type{DiscreteNonParametric{T,P,Ts,Ps}}, + d::DiscreteNonParametric{T,P,Ts,Ps}, +) where {T,P,Ts,Ps} = d # Accessors params(d::DiscreteNonParametric) = (d.support, d.p) @@ -61,14 +80,14 @@ support(d::DiscreteNonParametric) = d.support Get the vector of probabilities associated with the support of `d`. """ -probs(d::DiscreteNonParametric) = d.p +probs(d::DiscreteNonParametric) = d.p function Base.isapprox(c1::DiscreteNonParametric, c2::DiscreteNonParametric; kwargs...) support_c1 = support(c1) support_c2 = support(c2) return length(support_c1) == length(support_c2) && - isapprox(support_c1, support_c2; kwargs...) && - isapprox(probs(c1), probs(c2); kwargs...) + isapprox(support_c1, support_c2; kwargs...) && + isapprox(probs(c1), probs(c2); kwargs...) end # Sampling @@ -81,13 +100,12 @@ function rand(rng::AbstractRNG, d::DiscreteNonParametric) cp = p[1] i = 1 while cp <= draw && i < n - @inbounds cp += p[i +=1] + @inbounds cp += p[i+=1] end return x[i] end -sampler(d::DiscreteNonParametric) = - DiscreteNonParametricSampler(support(d), probs(d)) +sampler(d::DiscreteNonParametric) = DiscreteNonParametricSampler(support(d), probs(d)) # Override the method in testutils.jl since it assumes # an evenly-spaced integer support @@ -122,11 +140,11 @@ function cdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i in 1:stop_idx + @inbounds for i = 1:stop_idx s += ps[i] end else - @inbounds for i in (stop_idx + 1):n + @inbounds for i = (stop_idx+1):n s += ps[i] end s = 1 - s @@ -148,12 +166,12 @@ function ccdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i in 1:stop_idx + @inbounds for i = 1:stop_idx s += ps[i] end s = 1 - s else - @inbounds for i in (stop_idx + 1):n + @inbounds for i = (stop_idx+1):n s += ps[i] end end @@ -177,15 +195,14 @@ end minimum(d::DiscreteNonParametric) = first(support(d)) maximum(d::DiscreteNonParametric) = last(support(d)) -insupport(d::DiscreteNonParametric, x::Real) = - length(searchsorted(support(d), x)) > 0 +insupport(d::DiscreteNonParametric, x::Real) = length(searchsorted(support(d), x)) > 0 mean(d::DiscreteNonParametric) = dot(probs(d), support(d)) function var(d::DiscreteNonParametric) x = support(d) p = probs(d) - return var(x, Weights(p, one(eltype(p))); corrected=false) + return var(x, Weights(p, one(eltype(p))); corrected = false) end function skewness(d::DiscreteNonParametric) @@ -218,8 +235,8 @@ function mgf(d::DiscreteNonParametric, t::Real) x = support(d) p = probs(d) s = zero(Float64) - for i in 1:length(x) - s += p[i] * exp(t*x[i]) + for i = 1:length(x) + s += p[i] * exp(t * x[i]) end s end @@ -228,16 +245,20 @@ function cf(d::DiscreteNonParametric, t::Real) x = support(d) p = probs(d) s = zero(Complex{Float64}) - for i in 1:length(x) - s += p[i] * cis(t*x[i]) + for i = 1:length(x) + s += p[i] * cis(t * x[i]) end s end # Sufficient statistics -struct DiscreteNonParametricStats{T<:Real,W<:Real,Ts<:AbstractVector{T}, - Ws<:AbstractVector{W}} <: SufficientStats +struct DiscreteNonParametricStats{ + T<:Real, + W<:Real, + Ts<:AbstractVector{T}, + Ws<:AbstractVector{W}, +} <: SufficientStats support::Ts freq::Ws end @@ -248,12 +269,12 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { N == 0 && return DiscreteNonParametricStats(T[], Float64[]) n = 1 - vs = Vector{T}(undef,N) + vs = Vector{T}(undef, N) ps = zeros(Float64, N) x = sort(vec(x)) vs[1] = x[1] - ps[1] += 1. + ps[1] += 1.0 xprev = x[1] @inbounds for i = 2:N @@ -262,7 +283,7 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { n += 1 vs[n] = xi end - ps[n] += 1. + ps[n] += 1.0 xprev = xi end @@ -272,8 +293,12 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { end -function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}, - w::AbstractArray{W}; check_args::Bool=true) where {T<:Real,W<:Real} +function suffstats( + ::Type{<:DiscreteNonParametric}, + x::AbstractArray{T}, + w::AbstractArray{W}; + check_args::Bool = true, +) where {T<:Real,W<:Real} @check_args DiscreteNonParametric (length(x) == length(w)) @@ -311,6 +336,11 @@ end # # Model fitting -fit_mle(::Type{<:DiscreteNonParametric}, - ss::DiscreteNonParametricStats{T,W,Ts,Ws}) where {T,W,Ts,Ws} = - DiscreteNonParametric{T,W,Ts,Ws}(ss.support, normalize!(copy(ss.freq), 1), check_args=false) +fit_mle( + ::Type{<:DiscreteNonParametric}, + ss::DiscreteNonParametricStats{T,W,Ts,Ws}, +) where {T,W,Ts,Ws} = DiscreteNonParametric{T,W,Ts,Ws}( + ss.support, + normalize!(copy(ss.freq), 1), + check_args = false, +) diff --git a/src/univariate/discrete/discreteuniform.jl b/src/univariate/discrete/discreteuniform.jl index 3ffcaaf048..98414c2a30 100644 --- a/src/univariate/discrete/discreteuniform.jl +++ b/src/univariate/discrete/discreteuniform.jl @@ -26,11 +26,12 @@ struct DiscreteUniform <: DiscreteUnivariateDistribution b::Int pv::Float64 # individual probabilities - function DiscreteUniform(a::Real, b::Real; check_args::Bool=true) + function DiscreteUniform(a::Real, b::Real; check_args::Bool = true) @check_args DiscreteUniform (a <= b) new(a, b, 1 / (b - a + 1)) end - DiscreteUniform(b::Real; check_args::Bool=true) = DiscreteUniform(0, b; check_args=check_args) + DiscreteUniform(b::Real; check_args::Bool = true) = + DiscreteUniform(0, b; check_args = check_args) DiscreteUniform() = new(0, 1, 0.5) end @@ -92,14 +93,14 @@ quantile(d::DiscreteUniform, p::Real) = iszero(p) ? d.a : d.a - 1 + ceil(Int, p function mgf(d::DiscreteUniform, t::Real) a, b = d.a, d.b u = b - a + 1 - result = (exp(t*a) * expm1(t*u)) / (u*expm1(t)) + result = (exp(t * a) * expm1(t * u)) / (u * expm1(t)) return iszero(t) ? one(result) : result end function cf(d::DiscreteUniform, t::Real) a, b = d.a, d.b u = b - a + 1 - result = (im*cos(t*(a+b)/2) + sin(t*(a-b-1)/2)) / (u*sin(t/2)) + result = (im * cos(t * (a + b) / 2) + sin(t * (a - b - 1) / 2)) / (u * sin(t / 2)) return iszero(t) ? one(result) : result end diff --git a/src/univariate/discrete/geometric.jl b/src/univariate/discrete/geometric.jl index 20043e60fe..e36db7e810 100644 --- a/src/univariate/discrete/geometric.jl +++ b/src/univariate/discrete/geometric.jl @@ -24,12 +24,12 @@ External links struct Geometric{T<:Real} <: DiscreteUnivariateDistribution p::T - function Geometric{T}(p::T) where {T <: Real} + function Geometric{T}(p::T) where {T<:Real} new{T}(p) end end -function Geometric(p::Real; check_args::Bool=true) +function Geometric(p::Real; check_args::Bool = true) @check_args Geometric (p, zero(p) < p <= one(p)) return Geometric{typeof(p)}(p) end @@ -89,21 +89,21 @@ end function cdf(d::Geometric, x::Int) p = succprob(d) n = max(x + 1, 0) - p < 1/2 ? -expm1(log1p(-p)*n) : 1 - (1 - p)^n + p < 1 / 2 ? -expm1(log1p(-p) * n) : 1 - (1 - p)^n end ccdf(d::Geometric, x::Real) = ccdf_int(d, x) function ccdf(d::Geometric, x::Int) p = succprob(d) n = max(x + 1, 0) - p < 1/2 ? exp(log1p(-p)*n) : (1 - p)^n + p < 1 / 2 ? exp(log1p(-p) * n) : (1 - p)^n end logcdf(d::Geometric, x::Real) = logcdf_int(d, x) logcdf(d::Geometric, x::Int) = log1mexp(log1p(-d.p) * max(x + 1, 0)) logccdf(d::Geometric, x::Real) = logccdf_int(d, x) -logccdf(d::Geometric, x::Int) = log1p(-d.p) * max(x + 1, 0) +logccdf(d::Geometric, x::Int) = log1p(-d.p) * max(x + 1, 0) quantile(d::Geometric, p::Real) = invlogccdf(d, log1p(-p)) @@ -111,7 +111,7 @@ cquantile(d::Geometric, p::Real) = invlogccdf(d, log(p)) invlogcdf(d::Geometric, lp::Real) = invlogccdf(d, log1mexp(lp)) -function invlogccdf(d::Geometric{T}, lp::Real) where T<:Real +function invlogccdf(d::Geometric{T}, lp::Real) where {T<:Real} if (lp > zero(d.p)) || isnan(lp) return T(NaN) elseif isinf(lp) @@ -119,7 +119,7 @@ function invlogccdf(d::Geometric{T}, lp::Real) where T<:Real elseif lp == zero(d.p) return zero(T) end - max(ceil(lp/log1p(-d.p)) - 1, zero(T)) + max(ceil(lp / log1p(-d.p)) - 1, zero(T)) end function laplace_transform(d::Geometric, t) @@ -132,16 +132,16 @@ function cgf(d::Geometric, t) # log(p / (1 - (1-p) * exp(t))) log(p) - log1mexp(t + log1p(-p)) end -cf(d::Geometric, t::Real) = laplace_transform(d, -t*im) +cf(d::Geometric, t::Real) = laplace_transform(d, -t * im) ### Sampling # Inlining is required to hoist the d.p == 1//2 check when generating in bulk @inline function rand(rng::AbstractRNG, d::Geometric) - if d.p == 1//2 + if d.p == 1 // 2 leading_zeros(rand(rng, UInt)) # This branch is a performance optimization else - floor(Int,-randexp(rng) / log1p(-d.p)) + floor(Int, -randexp(rng) / log1p(-d.p)) end end @@ -154,15 +154,20 @@ struct GeometricStats <: SufficientStats GeometricStats(sx::Real, tw::Real) = new(sx, tw) end -suffstats(::Type{<:Geometric}, x::AbstractArray{T}) where {T<:Integer} = GeometricStats(sum(x), length(x)) +suffstats(::Type{<:Geometric}, x::AbstractArray{T}) where {T<:Integer} = + GeometricStats(sum(x), length(x)) -function suffstats(::Type{<:Geometric}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer +function suffstats( + ::Type{<:Geometric}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) end - sx = 0. - tw = 0. + sx = 0.0 + tw = 0.0 for i = 1:n wi = w[i] sx += wi * x[i] diff --git a/src/univariate/discrete/hypergeometric.jl b/src/univariate/discrete/hypergeometric.jl index a13127f2f6..55959a9b36 100644 --- a/src/univariate/discrete/hypergeometric.jl +++ b/src/univariate/discrete/hypergeometric.jl @@ -24,7 +24,7 @@ struct Hypergeometric <: DiscreteUnivariateDistribution nf::Int # number of failures in population n::Int # sample size - function Hypergeometric(ns::Real, nf::Real, n::Real; check_args::Bool=true) + function Hypergeometric(ns::Real, nf::Real, n::Real; check_args::Bool = true) @check_args( Hypergeometric, (ns, ns >= zero(ns)), @@ -58,22 +58,26 @@ mode(d::Hypergeometric) = floor(Int, (d.n + 1) * (d.ns + 1) / (d.ns + d.nf + 2)) function modes(d::Hypergeometric) if (d.ns == d.nf) && mod(d.n, 2) == 1 - [(d.n-1)/2, (d.n+1)/2] + [(d.n - 1) / 2, (d.n + 1) / 2] else [mode(d)] end end -skewness(d::Hypergeometric) = (d.nf-d.ns)*sqrt(d.ns+d.nf-1)*(d.ns+d.nf-2*d.n)/sqrt(d.n*d.ns*d.nf*(d.ns+d.nf-d.n))/(d.ns+d.nf-2) +skewness(d::Hypergeometric) = + (d.nf - d.ns) * sqrt(d.ns + d.nf - 1) * (d.ns + d.nf - 2 * d.n) / + sqrt(d.n * d.ns * d.nf * (d.ns + d.nf - d.n)) / (d.ns + d.nf - 2) function kurtosis(d::Hypergeometric) ns = float(d.ns) nf = float(d.nf) n = float(d.n) N = ns + nf - a = (N-1) * N^2 * (N * (N+1) - 6*ns * (N-ns) - 6*n*(N-n)) + 6*n*ns*(nf)*(N-n)*(5*N-6) - b = (n*ns*(N-ns) * (N-n)*(N-2)*(N-3)) - a/b + a = + (N - 1) * N^2 * (N * (N + 1) - 6 * ns * (N - ns) - 6 * n * (N - n)) + + 6 * n * ns * (nf) * (N - n) * (5 * N - 6) + b = (n * ns * (N - ns) * (N - n) * (N - 2) * (N - 3)) + a / b end entropy(d::Hypergeometric) = entropy(map(Base.Fix1(pdf, d), support(d))) @@ -90,5 +94,4 @@ entropy(d::Hypergeometric) = entropy(map(Base.Fix1(pdf, d), support(d))) # Journal of Statistical Computation and Simulation, 22(2):127-145 # doi:10.1080/00949658508810839 @rand_rdist(Hypergeometric) -rand(d::Hypergeometric) = - convert(Int, StatsFuns.RFunctions.hyperrand(d.ns, d.nf, d.n)) +rand(d::Hypergeometric) = convert(Int, StatsFuns.RFunctions.hyperrand(d.ns, d.nf, d.n)) diff --git a/src/univariate/discrete/negativebinomial.jl b/src/univariate/discrete/negativebinomial.jl index 0ca8f3c9bd..c957ff6785 100644 --- a/src/univariate/discrete/negativebinomial.jl +++ b/src/univariate/discrete/negativebinomial.jl @@ -30,19 +30,22 @@ struct NegativeBinomial{T<:Real} <: DiscreteUnivariateDistribution r::T p::T - function NegativeBinomial{T}(r::T, p::T) where {T <: Real} + function NegativeBinomial{T}(r::T, p::T) where {T<:Real} return new{T}(r, p) end end -function NegativeBinomial(r::T, p::T; check_args::Bool=true) where {T <: Real} +function NegativeBinomial(r::T, p::T; check_args::Bool = true) where {T<:Real} @check_args NegativeBinomial (r, r > zero(r)) (p, zero(p) < p <= one(p)) return NegativeBinomial{T}(r, p) end -NegativeBinomial(r::Real, p::Real; check_args::Bool=true) = NegativeBinomial(promote(r, p)...; check_args=check_args) -NegativeBinomial(r::Integer, p::Integer; check_args::Bool=true) = NegativeBinomial(float(r), float(p); check_args=check_args) -NegativeBinomial(r::Real; check_args::Bool=true) = NegativeBinomial(r, 0.5; check_args=check_args) +NegativeBinomial(r::Real, p::Real; check_args::Bool = true) = + NegativeBinomial(promote(r, p)...; check_args = check_args) +NegativeBinomial(r::Integer, p::Integer; check_args::Bool = true) = + NegativeBinomial(float(r), float(p); check_args = check_args) +NegativeBinomial(r::Real; check_args::Bool = true) = + NegativeBinomial(r, 0.5; check_args = check_args) NegativeBinomial() = NegativeBinomial{Float64}(1.0, 0.5) @distr_support NegativeBinomial 0 Inf @@ -70,15 +73,18 @@ failprob(d::NegativeBinomial{T}) where {T} = one(T) - d.p mean(d::NegativeBinomial{T}) where {T} = (p = succprob(d); (one(T) - p) * d.r / p) -var(d::NegativeBinomial{T}) where {T} = (p = succprob(d); (one(T) - p) * d.r / (p * p)) +var(d::NegativeBinomial{T}) where {T} = (p = succprob(d); (one(T) - p) * d.r / (p * p)) -std(d::NegativeBinomial{T}) where {T} = (p = succprob(d); sqrt((one(T) - p) * d.r) / p) +std(d::NegativeBinomial{T}) where {T} = (p = succprob(d); sqrt((one(T) - p) * d.r) / p) -skewness(d::NegativeBinomial{T}) where {T} = (p = succprob(d); (T(2) - p) / sqrt((one(T) - p) * d.r)) +skewness(d::NegativeBinomial{T}) where {T} = + (p = succprob(d); (T(2) - p) / sqrt((one(T) - p) * d.r)) -kurtosis(d::NegativeBinomial{T}) where {T} = (p = succprob(d); T(6) / d.r + (p * p) / ((one(T) - p) * d.r)) +kurtosis(d::NegativeBinomial{T}) where {T} = + (p = succprob(d); T(6) / d.r + (p * p) / ((one(T) - p) * d.r)) -mode(d::NegativeBinomial{T}) where {T} = (p = succprob(d); floor(Int,(one(T) - p) * (d.r - one(T)) / p)) +mode(d::NegativeBinomial{T}) where {T} = + (p = succprob(d); floor(Int, (one(T) - p) * (d.r - one(T)) / p)) function kldivergence(p::NegativeBinomial, q::NegativeBinomial; kwargs...) if p.r == q.r @@ -86,7 +92,13 @@ function kldivergence(p::NegativeBinomial, q::NegativeBinomial; kwargs...) else # There does not appear to be an analytical formula for # this case. Hence we fall back to the numerical approximation. - return invoke(kldivergence, Tuple{UnivariateDistribution{Discrete},UnivariateDistribution{Discrete}}, p, q; kwargs...) + return invoke( + kldivergence, + Tuple{UnivariateDistribution{Discrete},UnivariateDistribution{Discrete}}, + p, + q; + kwargs..., + ) end end @@ -103,7 +115,7 @@ function logpdf(d::NegativeBinomial, k::Real) # but unfortunately not numerically, so we handle this case separately to improve accuracy return z end - + return insupport(d, k) ? z - log(k + r) - logbeta(r, k + 1) : oftype(z, -Inf) end @@ -122,13 +134,13 @@ function rand(rng::AbstractRNG, d::NegativeBinomial) if isone(d.p) return 0 else - return rand(rng, Poisson(rand(rng, Gamma(d.r, (1 - d.p)/d.p)))) + return rand(rng, Poisson(rand(rng, Gamma(d.r, (1 - d.p) / d.p)))) end end function laplace_transform(d::NegativeBinomial, t) r, p = params(d) - return laplace_transform(Geometric(p, check_args=false), t)^r + return laplace_transform(Geometric(p, check_args = false), t)^r end mgf(d::NegativeBinomial, t::Real) = laplace_transform(d, -t) @@ -136,4 +148,4 @@ function cgf(d::NegativeBinomial, t) r, p = params(d) r * cgf(Geometric{typeof(p)}(p), t) end -cf(d::NegativeBinomial, t::Real) = laplace_transform(d, -t*im) +cf(d::NegativeBinomial, t::Real) = laplace_transform(d, -t * im) diff --git a/src/univariate/discrete/noncentralhypergeometric.jl b/src/univariate/discrete/noncentralhypergeometric.jl index 52853cfc08..10c92a16c1 100644 --- a/src/univariate/discrete/noncentralhypergeometric.jl +++ b/src/univariate/discrete/noncentralhypergeometric.jl @@ -9,12 +9,12 @@ abstract type NoncentralHypergeometric{T<:Real} <: DiscreteUnivariateDistributio # Functions -function quantile(d::NoncentralHypergeometric{T}, q::Real) where T<:Real +function quantile(d::NoncentralHypergeometric{T}, q::Real) where {T<:Real} if !(zero(q) <= q <= one(q)) T(NaN) else range = support(d) - if q > 1/2 + if q > 1 / 2 q = 1 - q range = reverse(range) end @@ -39,7 +39,13 @@ struct FisherNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} n::Int # sample size ω::T # odds ratio - function FisherNoncentralHypergeometric{T}(ns::Real, nf::Real, n::Real, ω::T; check_args::Bool=true) where T + function FisherNoncentralHypergeometric{T}( + ns::Real, + nf::Real, + n::Real, + ω::T; + check_args::Bool = true, + ) where {T} @check_args( FisherNoncentralHypergeometric, (ns, ns >= zero(ns)), @@ -51,24 +57,46 @@ struct FisherNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} end end -FisherNoncentralHypergeometric(ns::Integer, nf::Integer, n::Integer, ω::Real; check_args::Bool=true) = - FisherNoncentralHypergeometric{typeof(ω)}(ns, nf, n, ω; check_args=check_args) - -FisherNoncentralHypergeometric(ns::Integer, nf::Integer, n::Integer, ω::Integer; check_args::Bool=true) = - FisherNoncentralHypergeometric(ns, nf, n, float(ω); check_args=check_args) +FisherNoncentralHypergeometric( + ns::Integer, + nf::Integer, + n::Integer, + ω::Real; + check_args::Bool = true, +) = FisherNoncentralHypergeometric{typeof(ω)}(ns, nf, n, ω; check_args = check_args) + +FisherNoncentralHypergeometric( + ns::Integer, + nf::Integer, + n::Integer, + ω::Integer; + check_args::Bool = true, +) = FisherNoncentralHypergeometric(ns, nf, n, float(ω); check_args = check_args) # Conversions -convert(::Type{FisherNoncentralHypergeometric{T}}, ns::Real, nf::Real, n::Real, ω::Real) where {T<:Real} = FisherNoncentralHypergeometric(ns, nf, n, T(ω)) -function Base.convert(::Type{FisherNoncentralHypergeometric{T}}, d::FisherNoncentralHypergeometric) where {T<:Real} +convert( + ::Type{FisherNoncentralHypergeometric{T}}, + ns::Real, + nf::Real, + n::Real, + ω::Real, +) where {T<:Real} = FisherNoncentralHypergeometric(ns, nf, n, T(ω)) +function Base.convert( + ::Type{FisherNoncentralHypergeometric{T}}, + d::FisherNoncentralHypergeometric, +) where {T<:Real} FisherNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) end -Base.convert(::Type{FisherNoncentralHypergeometric{T}}, d::FisherNoncentralHypergeometric{T}) where {T<:Real} = d +Base.convert( + ::Type{FisherNoncentralHypergeometric{T}}, + d::FisherNoncentralHypergeometric{T}, +) where {T<:Real} = d function _mode(d::FisherNoncentralHypergeometric) A = d.ω - 1 - B = d.n - d.nf - (d.ns + d.n + 2)*d.ω - C = (d.ns + 1)*(d.n + 1)*d.ω - return -2C / (B - sqrt(B^2 - 4A*C)) + B = d.n - d.nf - (d.ns + d.n + 2) * d.ω + C = (d.ns + 1) * (d.n + 1) * d.ω + return -2C / (B - sqrt(B^2 - 4A * C)) end mode(d::FisherNoncentralHypergeometric) = floor(Int, _mode(d)) @@ -93,8 +121,8 @@ function pdf(d::FisherNoncentralHypergeometric, k::Integer) s = one(ω) fᵢ = one(ω) fₖ = one(ω) - for i in (η + 1):u - rᵢ = (d.ns - i + 1)*ω/(i*(d.nf - d.n + i))*(d.n - i + 1) + for i = (η+1):u + rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ # break if terms no longer contribute to s @@ -109,8 +137,8 @@ function pdf(d::FisherNoncentralHypergeometric, k::Integer) end end fᵢ = one(ω) - for i in (η - 1):-1:l - rᵢ₊ = (d.ns - i)*ω/((i + 1)*(d.nf - d.n + i + 1))*(d.n - i) + for i = (η-1):-1:l + rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ # break if terms no longer contribute to s @@ -125,7 +153,7 @@ function pdf(d::FisherNoncentralHypergeometric, k::Integer) end end - return fₖ/s + return fₖ / s end logpdf(d::FisherNoncentralHypergeometric, k::Real) = log(pdf(d, k)) @@ -143,8 +171,8 @@ function cdf(d::FisherNoncentralHypergeometric, k::Integer) s = one(ω) fᵢ = one(ω) Fₖ = k >= η ? one(ω) : zero(ω) - for i in (η + 1):u - rᵢ = (d.ns - i + 1)*ω/(i*(d.nf - d.n + i))*(d.n - i + 1) + for i = (η+1):u + rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ # break if terms no longer contribute to s @@ -158,8 +186,8 @@ function cdf(d::FisherNoncentralHypergeometric, k::Integer) end end fᵢ = one(ω) - for i in (η - 1):-1:l - rᵢ₊ = (d.ns - i)*ω/((i + 1)*(d.nf - d.n + i + 1))*(d.n - i) + for i = (η-1):-1:l + rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ # break if terms no longer contribute to s @@ -173,7 +201,7 @@ function cdf(d::FisherNoncentralHypergeometric, k::Integer) end end - return Fₖ/s + return Fₖ / s end function _expectation(f, d::FisherNoncentralHypergeometric) @@ -182,10 +210,10 @@ function _expectation(f, d::FisherNoncentralHypergeometric) u = min(d.ns, d.n) η = mode(d) s = one(ω) - m = f(η)*s + m = f(η) * s fᵢ = one(ω) - for i in (η + 1):u - rᵢ = (d.ns - i + 1)*ω/(i*(d.nf - d.n + i))*(d.n - i + 1) + for i = (η+1):u + rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ # break if terms no longer contribute to s @@ -195,11 +223,11 @@ function _expectation(f, d::FisherNoncentralHypergeometric) end s = sfᵢ - m += f(i)*fᵢ + m += f(i) * fᵢ end fᵢ = one(ω) - for i in (η - 1):-1:l - rᵢ₊ = (d.ns - i)*ω/((i + 1)*(d.nf - d.n + i + 1))*(d.n - i) + for i = (η-1):-1:l + rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ # break if terms no longer contribute to s @@ -209,10 +237,10 @@ function _expectation(f, d::FisherNoncentralHypergeometric) end s = sfᵢ - m += f(i)*fᵢ + m += f(i) * fᵢ end - return m/s + return m / s end mean(d::FisherNoncentralHypergeometric) = _expectation(identity, d) @@ -230,7 +258,13 @@ struct WalleniusNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} n::Int # sample size ω::T # odds ratio - function WalleniusNoncentralHypergeometric{T}(ns::Real, nf::Real, n::Real, ω::T; check_args::Bool=true) where T + function WalleniusNoncentralHypergeometric{T}( + ns::Real, + nf::Real, + n::Real, + ω::T; + check_args::Bool = true, + ) where {T} @check_args( WalleniusNoncentralHypergeometric, (ns, ns >= zero(ns)), @@ -242,25 +276,47 @@ struct WalleniusNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} end end -WalleniusNoncentralHypergeometric(ns::Integer, nf::Integer, n::Integer, ω::Real; check_args::Bool=true) = - WalleniusNoncentralHypergeometric{typeof(ω)}(ns, nf, n, ω; check_args=check_args) - -WalleniusNoncentralHypergeometric(ns::Integer, nf::Integer, n::Integer, ω::Integer; check_args::Bool=true) = - WalleniusNoncentralHypergeometric(ns, nf, n, float(ω); check_args=check_args) +WalleniusNoncentralHypergeometric( + ns::Integer, + nf::Integer, + n::Integer, + ω::Real; + check_args::Bool = true, +) = WalleniusNoncentralHypergeometric{typeof(ω)}(ns, nf, n, ω; check_args = check_args) + +WalleniusNoncentralHypergeometric( + ns::Integer, + nf::Integer, + n::Integer, + ω::Integer; + check_args::Bool = true, +) = WalleniusNoncentralHypergeometric(ns, nf, n, float(ω); check_args = check_args) # Conversions -convert(::Type{WalleniusNoncentralHypergeometric{T}}, ns::Real, nf::Real, n::Real, ω::Real) where {T<:Real} = WalleniusNoncentralHypergeometric(ns, nf, n, T(ω)) -function Base.convert(::Type{WalleniusNoncentralHypergeometric{T}}, d::WalleniusNoncentralHypergeometric) where {T<:Real} +convert( + ::Type{WalleniusNoncentralHypergeometric{T}}, + ns::Real, + nf::Real, + n::Real, + ω::Real, +) where {T<:Real} = WalleniusNoncentralHypergeometric(ns, nf, n, T(ω)) +function Base.convert( + ::Type{WalleniusNoncentralHypergeometric{T}}, + d::WalleniusNoncentralHypergeometric, +) where {T<:Real} WalleniusNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) end -Base.convert(::Type{WalleniusNoncentralHypergeometric{T}}, d::WalleniusNoncentralHypergeometric{T}) where {T<:Real} = d +Base.convert( + ::Type{WalleniusNoncentralHypergeometric{T}}, + d::WalleniusNoncentralHypergeometric{T}, +) where {T<:Real} = d # Properties function _discretenonparametric(d::WalleniusNoncentralHypergeometric) return DiscreteNonParametric(support(d), map(Base.Fix1(pdf, d), support(d))) end mean(d::WalleniusNoncentralHypergeometric) = mean(_discretenonparametric(d)) -var(d::WalleniusNoncentralHypergeometric) = var(_discretenonparametric(d)) +var(d::WalleniusNoncentralHypergeometric) = var(_discretenonparametric(d)) mode(d::WalleniusNoncentralHypergeometric) = mode(_discretenonparametric(d)) entropy(d::WalleniusNoncentralHypergeometric) = 1 @@ -272,8 +328,8 @@ function logpdf(d::WalleniusNoncentralHypergeometric, k::Real) D = d.ω * (d.ns - k) + (d.nf - d.n + k) f(t) = (1 - t^(d.ω / D))^k * (1 - t^(1 / D))^(d.n - k) I, _ = quadgk(f, 0, 1) - return -log(d.ns + 1) - logbeta(d.ns - k + 1, k + 1) - - log(d.nf + 1) - logbeta(d.nf - d.n + k + 1, d.n - k + 1) + log(I) + return -log(d.ns + 1) - logbeta(d.ns - k + 1, k + 1) - log(d.nf + 1) - + logbeta(d.nf - d.n + k + 1, d.n - k + 1) + log(I) else return log(zero(k)) end diff --git a/src/univariate/discrete/poisson.jl b/src/univariate/discrete/poisson.jl index b794fe7a3b..3f1ddd83b9 100644 --- a/src/univariate/discrete/poisson.jl +++ b/src/univariate/discrete/poisson.jl @@ -23,21 +23,21 @@ External links: struct Poisson{T<:Real} <: DiscreteUnivariateDistribution λ::T - Poisson{T}(λ::Real) where {T <: Real} = new{T}(λ) + Poisson{T}(λ::Real) where {T<:Real} = new{T}(λ) end -function Poisson(λ::Real; check_args::Bool=true) +function Poisson(λ::Real; check_args::Bool = true) @check_args Poisson (λ, λ >= zero(λ)) return Poisson{typeof(λ)}(λ) end -Poisson(λ::Integer; check_args::Bool=true) = Poisson(float(λ); check_args=check_args) +Poisson(λ::Integer; check_args::Bool = true) = Poisson(float(λ); check_args = check_args) Poisson() = Poisson{Float64}(1.0) @distr_support Poisson 0 (d.λ == zero(typeof(d.λ)) ? 0 : Inf) #### Conversions -convert(::Type{Poisson{T}}, λ::S) where {T <: Real, S <: Real} = Poisson(T(λ)) +convert(::Type{Poisson{T}}, λ::S) where {T<:Real,S<:Real} = Poisson(T(λ)) Base.convert(::Type{Poisson{T}}, d::Poisson) where {T<:Real} = Poisson{T}(T(d.λ)) Base.convert(::Type{Poisson{T}}, d::Poisson{T}) where {T<:Real} = d @@ -52,7 +52,7 @@ rate(d::Poisson) = d.λ mean(d::Poisson) = d.λ -mode(d::Poisson) = floor(Int,d.λ) +mode(d::Poisson) = floor(Int, d.λ) function modes(d::Poisson) λ = d.λ @@ -65,7 +65,7 @@ skewness(d::Poisson) = one(typeof(d.λ)) / sqrt(d.λ) kurtosis(d::Poisson) = one(typeof(d.λ)) / d.λ -function entropy(d::Poisson{T}) where T<:Real +function entropy(d::Poisson{T}) where {T<:Real} λ = rate(d) if λ == zero(T) return zero(T) @@ -78,9 +78,7 @@ function entropy(d::Poisson{T}) where T<:Real end return λ * (1 - log(λ)) + exp(-λ) * s else - return log(2 * pi * ℯ * λ)/2 - - (1 / (12 * λ)) - - (1 / (24 * λ * λ)) - + return log(2 * pi * ℯ * λ) / 2 - (1 / (12 * λ)) - (1 / (24 * λ * λ)) - (19 / (360 * λ * λ * λ)) end end @@ -116,13 +114,18 @@ struct PoissonStats <: SufficientStats tw::Float64 # total sample weight end -suffstats(::Type{<:Poisson}, x::AbstractArray{T}) where {T<:Integer} = PoissonStats(sum(x), length(x)) +suffstats(::Type{<:Poisson}, x::AbstractArray{T}) where {T<:Integer} = + PoissonStats(sum(x), length(x)) -function suffstats(::Type{<:Poisson}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer +function suffstats( + ::Type{<:Poisson}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, +) where {T<:Integer} n = length(x) n == length(w) || throw(DimensionMismatch("Inconsistent array lengths.")) - sx = 0. - tw = 0. + sx = 0.0 + tw = 0.0 for i in eachindex(x, w) @inbounds wi = w[i] @inbounds sx += x[i] * wi diff --git a/src/univariate/discrete/poissonbinomial.jl b/src/univariate/discrete/poissonbinomial.jl index 0db82f07bd..9f0972814b 100644 --- a/src/univariate/discrete/poissonbinomial.jl +++ b/src/univariate/discrete/poissonbinomial.jl @@ -23,11 +23,15 @@ External links: * [Poisson-binomial distribution on Wikipedia](http://en.wikipedia.org/wiki/Poisson_binomial_distribution) """ -mutable struct PoissonBinomial{T<:Real,P<:AbstractVector{T}} <: DiscreteUnivariateDistribution +mutable struct PoissonBinomial{T<:Real,P<:AbstractVector{T}} <: + DiscreteUnivariateDistribution p::P pmf::Union{Nothing,Vector{T}} # lazy computation of the probability mass function - function PoissonBinomial{T}(p::AbstractVector{T}; check_args::Bool=true) where {T <: Real} + function PoissonBinomial{T}( + p::AbstractVector{T}; + check_args::Bool = true, + ) where {T<:Real} @check_args( PoissonBinomial, ( @@ -40,8 +44,8 @@ mutable struct PoissonBinomial{T<:Real,P<:AbstractVector{T}} <: DiscreteUnivaria end end -function PoissonBinomial(p::AbstractVector{T}; check_args::Bool=true) where {T<:Real} - return PoissonBinomial{T}(p; check_args=check_args) +function PoissonBinomial(p::AbstractVector{T}; check_args::Bool = true) where {T<:Real} + return PoissonBinomial{T}(p; check_args = check_args) end function Base.getproperty(d::PoissonBinomial, x::Symbol) @@ -64,11 +68,11 @@ end #### Conversions -function PoissonBinomial(::Type{PoissonBinomial{T}}, p::AbstractVector{S}) where {T, S} +function PoissonBinomial(::Type{PoissonBinomial{T}}, p::AbstractVector{S}) where {T,S} return PoissonBinomial(AbstractVector{T}(p)) end -function PoissonBinomial(::Type{PoissonBinomial{T}}, d::PoissonBinomial{S}) where {T, S} - return PoissonBinomial(AbstractVector{T}(d.p), check_args=false) +function PoissonBinomial(::Type{PoissonBinomial{T}}, d::PoissonBinomial{S}) where {T,S} + return PoissonBinomial(AbstractVector{T}(d.p), check_args = false) end #### Parameters @@ -88,7 +92,7 @@ var(d::PoissonBinomial) = sum(p * (1 - p) for p in succprob(d)) function skewness(d::PoissonBinomial{T}) where {T} v = zero(T) s = zero(T) - p, = params(d) + p, = params(d) for i in eachindex(p) v += p[i] * (one(T) - p[i]) s += p[i] * (one(T) - p[i]) * (one(T) - T(2) * p[i]) @@ -99,7 +103,7 @@ end function kurtosis(d::PoissonBinomial{T}) where {T} v = zero(T) s = zero(T) - p, = params(d) + p, = params(d) for i in eachindex(p) v += p[i] * (one(T) - p[i]) s += p[i] * (one(T) - p[i]) * (one(T) - T(6) * (one(T) - p[i]) * p[i]) @@ -130,7 +134,7 @@ function cf(d::PoissonBinomial, t::Real) end end -pdf(d::PoissonBinomial, k::Real) = insupport(d, k) ? d.pmf[Int(k+1)] : zero(eltype(d.pmf)) +pdf(d::PoissonBinomial, k::Real) = insupport(d, k) ? d.pmf[Int(k + 1)] : zero(eltype(d.pmf)) logpdf(d::PoissonBinomial, k::Real) = log(pdf(d, k)) cdf(d::PoissonBinomial, k::Int) = integerunitrange_cdf(d, k) @@ -155,8 +159,8 @@ function poissonbinomial_pdf(p) S[1] = 1 @inbounds for (col, p_col) in enumerate(p) q_col = 1 - p_col - for row in col:(-1):1 - S[row + 1] = q_col * S[row + 1] + p_col * S[row] + for row = col:(-1):1 + S[row+1] = q_col * S[row+1] + p_col * S[row] end S[1] *= q_col end @@ -170,25 +174,25 @@ end # On computing the distribution function for the Poisson binomial # distribution. Computational Statistics and Data Analysis, 59, 41–51. # -function poissonbinomial_pdf_fft(p::AbstractArray{T}) where {T <: Real} +function poissonbinomial_pdf_fft(p::AbstractArray{T}) where {T<:Real} n = length(p) ω = 2 * one(T) / (n + 1) - x = Vector{Complex{T}}(undef, n+1) - lmax = ceil(Int, n/2) - x[1] = one(T)/(n + 1) - for l=1:lmax + x = Vector{Complex{T}}(undef, n + 1) + lmax = ceil(Int, n / 2) + x[1] = one(T) / (n + 1) + for l = 1:lmax logz = zero(T) argz = zero(T) - for j=1:n - zjl = 1 - p[j] + p[j] * cospi(ω*l) + im * p[j] * sinpi(ω * l) + for j = 1:n + zjl = 1 - p[j] + p[j] * cospi(ω * l) + im * p[j] * sinpi(ω * l) logz += log(abs(zjl)) argz += atan(imag(zjl), real(zjl)) end dl = exp(logz) - x[l + 1] = dl * cos(argz) / (n + 1) + dl * sin(argz) * im / (n + 1) + x[l+1] = dl * cos(argz) / (n + 1) + dl * sin(argz) * im / (n + 1) if n + 1 - l > l - x[n + 1 - l + 1] = conj(x[l + 1]) + x[n+1-l+1] = conj(x[l+1]) end end [max(0, real(xi)) for xi in _dft(x)] @@ -196,7 +200,7 @@ end # A simple implementation of a DFT to avoid introducing a dependency # on an external FFT package just for this one distribution -function _dft(x::Vector{T}) where T +function _dft(x::Vector{T}) where {T} n = length(x) y = zeros(complex(float(T)), n) @inbounds for j = 0:n-1, k = 0:n-1 @@ -222,28 +226,28 @@ sampler(d::PoissonBinomial) = PoissBinAliasSampler(d) function poissonbinomial_pdf_partialderivatives(p::AbstractVector{<:Real}) n = length(p) A = zeros(eltype(p), n, n + 1) - @inbounds for j in 1:n + @inbounds for j = 1:n A[j, end] = 1 end @inbounds for (i, pi) in enumerate(p) qi = 1 - pi - for k in (n - i + 1):n + for k = (n-i+1):n kp1 = k + 1 - for j in 1:(i - 1) + for j = 1:(i-1) A[j, k] = pi * A[j, k] + qi * A[j, kp1] end - for j in (i+1):n + for j = (i+1):n A[j, k] = pi * A[j, k] + qi * A[j, kp1] end end - for j in 1:(i-1) + for j = 1:(i-1) A[j, end] *= pi end - for j in (i+1):n + for j = (i+1):n A[j, end] *= pi end end - @inbounds for j in 1:n, i in 1:n + @inbounds for j = 1:n, i = 1:n A[i, j] -= A[i, j+1] end return A diff --git a/src/univariate/discrete/skellam.jl b/src/univariate/discrete/skellam.jl index e55fa4723e..64aa9acb7b 100644 --- a/src/univariate/discrete/skellam.jl +++ b/src/univariate/discrete/skellam.jl @@ -26,22 +26,24 @@ struct Skellam{T<:Real} <: DiscreteUnivariateDistribution μ1::T μ2::T - function Skellam{T}(μ1::T, μ2::T) where {T <: Real} + function Skellam{T}(μ1::T, μ2::T) where {T<:Real} return new{T}(μ1, μ2) end end -function Skellam(μ1::T, μ2::T; check_args::Bool=true) where {T <: Real} +function Skellam(μ1::T, μ2::T; check_args::Bool = true) where {T<:Real} @check_args Skellam (μ1, μ1 > zero(μ1)) (μ2, μ2 > zero(μ2)) return Skellam{T}(μ1, μ2) end -Skellam(μ1::Real, μ2::Real; check_args::Bool=true) = Skellam(promote(μ1, μ2)...; check_args=check_args) -Skellam(μ1::Integer, μ2::Integer; check_args::Bool=true) = Skellam(float(μ1), float(μ2); check_args=check_args) -function Skellam(μ::Real; check_args::Bool=true) +Skellam(μ1::Real, μ2::Real; check_args::Bool = true) = + Skellam(promote(μ1, μ2)...; check_args = check_args) +Skellam(μ1::Integer, μ2::Integer; check_args::Bool = true) = + Skellam(float(μ1), float(μ2); check_args = check_args) +function Skellam(μ::Real; check_args::Bool = true) @check_args Skellam (μ, μ > zero(μ)) - Skellam(μ, μ; check_args=false) + Skellam(μ, μ; check_args = false) end Skellam() = Skellam{Float64}(1.0, 1.0) @@ -49,7 +51,7 @@ Skellam() = Skellam{Float64}(1.0, 1.0) #### Conversions -convert(::Type{Skellam{T}}, μ1::S, μ2::S) where {T<:Real, S<:Real} = Skellam(T(μ1), T(μ2)) +convert(::Type{Skellam{T}}, μ1::S, μ2::S) where {T<:Real,S<:Real} = Skellam(T(μ1), T(μ2)) Base.convert(::Type{Skellam{T}}, d::Skellam) where {T<:Real} = Skellam{T}(T(d.μ1), T(d.μ2)) Base.convert(::Type{Skellam{T}}, d::Skellam{T}) where {T<:Real} = d @@ -65,7 +67,7 @@ mean(d::Skellam) = d.μ1 - d.μ2 var(d::Skellam) = d.μ1 + d.μ2 -skewness(d::Skellam) = mean(d) / (var(d)^(3//2)) +skewness(d::Skellam) = mean(d) / (var(d)^(3 // 2)) kurtosis(d::Skellam) = 1 / var(d) @@ -75,9 +77,11 @@ kurtosis(d::Skellam) = 1 / var(d) function logpdf(d::Skellam, x::Real) μ1, μ2 = params(d) if insupport(d, x) - return - (μ1 + μ2) + (x/2) * log(μ1/μ2) + log(besseli(x, 2*sqrt(μ1)*sqrt(μ2))) + return -(μ1 + μ2) + + (x / 2) * log(μ1 / μ2) + + log(besseli(x, 2 * sqrt(μ1) * sqrt(μ2))) else - return one(x) / 2 * log(zero(μ1/μ2)) + return one(x) / 2 * log(zero(μ1 / μ2)) end end @@ -105,12 +109,11 @@ Computing cdf of the Skellam distribution. function cdf(d::Skellam, t::Integer) μ1, μ2 = params(d) return if t < 0 - nchisqcdf(-2*t, 2*μ1, 2*μ2) + nchisqcdf(-2 * t, 2 * μ1, 2 * μ2) else - 1 - nchisqcdf(2*(t+1), 2*μ2, 2*μ1) + 1 - nchisqcdf(2 * (t + 1), 2 * μ2, 2 * μ1) end end #### Sampling -rand(rng::AbstractRNG, d::Skellam) = - rand(rng, Poisson(d.μ1)) - rand(rng, Poisson(d.μ2)) +rand(rng::AbstractRNG, d::Skellam) = rand(rng, Poisson(d.μ1)) - rand(rng, Poisson(d.μ2)) diff --git a/src/univariate/discrete/soliton.jl b/src/univariate/discrete/soliton.jl index f13d9e3df2..db9fce7609 100644 --- a/src/univariate/discrete/soliton.jl +++ b/src/univariate/discrete/soliton.jl @@ -33,21 +33,22 @@ struct Soliton <: DiscreteUnivariateDistribution degrees::Vector{Int} # Degrees with non-zero probability CDF::Vector{Float64} # CDF evaluated at each element in degrees - function Soliton(K::Integer, M::Integer, δ::Real, atol::Real=0) + function Soliton(K::Integer, M::Integer, δ::Real, atol::Real = 0) 0 < K || throw(DomainError(K, "Expected 0 < K.")) 0 < δ < 1 || throw(DomainError(δ, "Expected 0 < δ < 1.")) 0 < M <= K || throw(DomainError(M, "Expected 0 < M <= K.")) 0 <= atol < 1 || throw(DomainError(atol, "Expected 0 <= atol < 1.")) - PDF = [soliton_τ(K, M, δ, i)+soliton_ρ(K, i) for i in 1:K] + PDF = [soliton_τ(K, M, δ, i) + soliton_ρ(K, i) for i = 1:K] PDF ./= sum(PDF) - degrees = [i for i in 1:K if PDF[i] > atol] + degrees = [i for i = 1:K if PDF[i] > atol] CDF = cumsum([PDF[i] for i in degrees]) CDF ./= CDF[end] new(K, M, δ, atol, degrees, CDF) end end -Base.show(io::IO, Ω::Soliton) = print(io, "Soliton(K=$(Ω.K), M=$(Ω.M), δ=$(Ω.δ), atol=$(Ω.atol))") +Base.show(io::IO, Ω::Soliton) = + print(io, "Soliton(K=$(Ω.K), M=$(Ω.M), δ=$(Ω.δ), atol=$(Ω.atol))") """ degrees(Ω) @@ -105,13 +106,13 @@ function cdf(Ω::Soliton, i::Integer) return Ω.CDF[j-1] end -Statistics.mean(Ω::Soliton) = sum(i -> i*pdf(Ω, i), Ω.degrees) +Statistics.mean(Ω::Soliton) = sum(i -> i * pdf(Ω, i), Ω.degrees) function Statistics.var(Ω::Soliton) μ = mean(Ω) rv = 0.0 for d in Ω.degrees - rv += pdf(Ω, d)*(d-μ)^2 + rv += pdf(Ω, d) * (d - μ)^2 end return rv end diff --git a/src/univariate/locationscale.jl b/src/univariate/locationscale.jl index ded6c2b2ac..7d3ff8ca16 100644 --- a/src/univariate/locationscale.jl +++ b/src/univariate/locationscale.jl @@ -33,14 +33,20 @@ d = μ + σ * ρ # Create location-scale transformed distribution params(d) # Get the parameters, i.e. (μ, σ, ρ) ``` """ -struct AffineDistribution{T<:Real, S<:ValueSupport, D<:UnivariateDistribution{S}} <: UnivariateDistribution{S} +struct AffineDistribution{T<:Real,S<:ValueSupport,D<:UnivariateDistribution{S}} <: + UnivariateDistribution{S} μ::T σ::T ρ::D # TODO: Remove? It is not used in Distributions anymore - function AffineDistribution{T,S,D}(μ::T, σ::T, ρ::D; check_args::Bool=true) where {T<:Real, S<:ValueSupport, D<:UnivariateDistribution{S}} + function AffineDistribution{T,S,D}( + μ::T, + σ::T, + ρ::D; + check_args::Bool = true, + ) where {T<:Real,S<:ValueSupport,D<:UnivariateDistribution{S}} @check_args AffineDistribution (σ, !iszero(σ)) - new{T, S, D}(μ, σ, ρ) + new{T,S,D}(μ, σ, ρ) end function AffineDistribution{T}(μ::T, σ::T, ρ::UnivariateDistribution) where {T<:Real} D = typeof(ρ) @@ -49,28 +55,40 @@ struct AffineDistribution{T<:Real, S<:ValueSupport, D<:UnivariateDistribution{S} end end -function AffineDistribution(μ::T, σ::T, ρ::UnivariateDistribution; check_args::Bool=true) where {T<:Real} +function AffineDistribution( + μ::T, + σ::T, + ρ::UnivariateDistribution; + check_args::Bool = true, +) where {T<:Real} @check_args AffineDistribution (σ, !iszero(σ)) # μ and σ act on both random numbers and parameter-like quantities like mean # hence do not promote: but take care in eltype and partype return AffineDistribution{T}(μ, σ, ρ) end -function AffineDistribution(μ::Real, σ::Real, ρ::UnivariateDistribution; check_args::Bool=true) - return AffineDistribution(promote(μ, σ)..., ρ; check_args=check_args) +function AffineDistribution( + μ::Real, + σ::Real, + ρ::UnivariateDistribution; + check_args::Bool = true, +) + return AffineDistribution(promote(μ, σ)..., ρ; check_args = check_args) end # aliases const LocationScale{T,S,D} = AffineDistribution{T,S,D} -function LocationScale(μ::Real, σ::Real, ρ::UnivariateDistribution; check_args::Bool=true) +function LocationScale(μ::Real, σ::Real, ρ::UnivariateDistribution; check_args::Bool = true) Base.depwarn("`LocationScale` is deprecated. Use `+` and `*` instead", :LocationScale) # preparation for future PR where I remove σ > 0 check @check_args LocationScale (σ, σ > zero(σ)) - return AffineDistribution(μ, σ, ρ; check_args=false) + return AffineDistribution(μ, σ, ρ; check_args = false) end -const ContinuousAffineDistribution{T<:Real,D<:ContinuousUnivariateDistribution} = AffineDistribution{T,Continuous,D} -const DiscreteAffineDistribution{T<:Real,D<:DiscreteUnivariateDistribution} = AffineDistribution{T,Discrete,D} +const ContinuousAffineDistribution{T<:Real,D<:ContinuousUnivariateDistribution} = + AffineDistribution{T,Continuous,D} +const DiscreteAffineDistribution{T<:Real,D<:DiscreteUnivariateDistribution} = + AffineDistribution{T,Discrete,D} Base.eltype(::Type{<:AffineDistribution{T,S,D}}) where {T,S,D} = promote_type(eltype(D), T) @@ -86,13 +104,20 @@ function affinedistribution_support(μ::Real, σ::Real, support::RealInterval) return RealInterval(μ + σ * support.ub, μ + σ * support.lb) end end -affinedistribution_support(μ::Real, σ::Real, support) = σ > 0 ? μ .+ σ .* support : μ .+ σ .* reverse(support) +affinedistribution_support(μ::Real, σ::Real, support) = + σ > 0 ? μ .+ σ .* support : μ .+ σ .* reverse(support) -AffineDistribution(μ::Real, σ::Real, d::AffineDistribution) = AffineDistribution(μ + d.μ * σ, σ * d.σ, d.ρ) +AffineDistribution(μ::Real, σ::Real, d::AffineDistribution) = + AffineDistribution(μ + d.μ * σ, σ * d.σ, d.ρ) #### Conversions -convert(::Type{AffineDistribution{T}}, μ::Real, σ::Real, ρ::D) where {T<:Real, D<:UnivariateDistribution} = AffineDistribution(T(μ),T(σ),ρ) +convert( + ::Type{AffineDistribution{T}}, + μ::Real, + σ::Real, + ρ::D, +) where {T<:Real,D<:UnivariateDistribution} = AffineDistribution(T(μ), T(σ), ρ) function Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution) where {T<:Real} AffineDistribution{T}(T(d.μ), T(d.σ), d.ρ) end @@ -102,7 +127,7 @@ Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution{T}) where {T<: location(d::AffineDistribution) = d.μ scale(d::AffineDistribution) = d.σ -params(d::AffineDistribution) = (d.μ,d.σ,d.ρ) +params(d::AffineDistribution) = (d.μ, d.σ, d.ρ) partype(d::AffineDistribution{T}) where {T} = promote_type(partype(d.ρ), T) #### Statistics @@ -124,15 +149,16 @@ ismesokurtic(d::AffineDistribution) = ismesokurtic(d.ρ) entropy(d::ContinuousAffineDistribution) = entropy(d.ρ) + log(abs(d.σ)) entropy(d::DiscreteAffineDistribution) = entropy(d.ρ) -mgf(d::AffineDistribution,t::Real) = exp(d.μ*t) * mgf(d.ρ,d.σ*t) +mgf(d::AffineDistribution, t::Real) = exp(d.μ * t) * mgf(d.ρ, d.σ * t) #### Evaluation & Sampling -pdf(d::ContinuousAffineDistribution, x::Real) = pdf(d.ρ,(x-d.μ)/d.σ) / abs(d.σ) -pdf(d::DiscreteAffineDistribution, x::Real) = pdf(d.ρ,(x-d.μ)/d.σ) +pdf(d::ContinuousAffineDistribution, x::Real) = pdf(d.ρ, (x - d.μ) / d.σ) / abs(d.σ) +pdf(d::DiscreteAffineDistribution, x::Real) = pdf(d.ρ, (x - d.μ) / d.σ) -logpdf(d::ContinuousAffineDistribution,x::Real) = logpdf(d.ρ,(x-d.μ)/d.σ) - log(abs(d.σ)) -logpdf(d::DiscreteAffineDistribution, x::Real) = logpdf(d.ρ,(x-d.μ)/d.σ) +logpdf(d::ContinuousAffineDistribution, x::Real) = + logpdf(d.ρ, (x - d.μ) / d.σ) - log(abs(d.σ)) +logpdf(d::DiscreteAffineDistribution, x::Real) = logpdf(d.ρ, (x - d.μ) / d.σ) # CDF methods @@ -165,8 +191,9 @@ end quantile(d::AffineDistribution, q::Real) = d.μ + d.σ * quantile(d.ρ, d.σ > 0 ? q : 1 - q) rand(rng::AbstractRNG, d::AffineDistribution) = d.μ + d.σ * rand(rng, d.ρ) -cf(d::AffineDistribution, t::Real) = cf(d.ρ,t*d.σ) * exp(1im*t*d.μ) -gradlogpdf(d::ContinuousAffineDistribution, x::Real) = gradlogpdf(d.ρ,(x-d.μ)/d.σ) / d.σ +cf(d::AffineDistribution, t::Real) = cf(d.ρ, t * d.σ) * exp(1im * t * d.μ) +gradlogpdf(d::ContinuousAffineDistribution, x::Real) = + gradlogpdf(d.ρ, (x - d.μ) / d.σ) / d.σ #### Syntactic sugar for simple transforms of distributions, e.g., d + x, d - x, and so on diff --git a/src/univariate/orderstatistic.jl b/src/univariate/orderstatistic.jl index 1a7055ef91..36e067fd0c 100644 --- a/src/univariate/orderstatistic.jl +++ b/src/univariate/orderstatistic.jl @@ -45,7 +45,10 @@ struct OrderStatistic{D<:UnivariateDistribution,S<:ValueSupport} <: n::Int rank::Int function OrderStatistic( - dist::UnivariateDistribution, n::Int, rank::Int; check_args::Bool=true + dist::UnivariateDistribution, + n::Int, + rank::Int; + check_args::Bool = true, ) @check_args(OrderStatistic, 1 ≤ rank ≤ n) return new{typeof(dist),value_support(typeof(dist))}(dist, n, rank) diff --git a/src/univariates.jl b/src/univariates.jl index b60e5a2949..d43e304c90 100644 --- a/src/univariates.jl +++ b/src/univariates.jl @@ -12,12 +12,14 @@ Base.maximum(r::RealInterval) = r.ub Base.extrema(r::RealInterval) = (r.lb, r.ub) Base.in(x::Real, r::RealInterval) = r.lb <= x <= r.ub -isbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = isupperbounded(d) && islowerbounded(d) +isbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = + isupperbounded(d) && islowerbounded(d) islowerbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = minimum(d) > -Inf isupperbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = maximum(d) < +Inf -hasfinitesupport(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = isbounded(d) +hasfinitesupport(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = + isbounded(d) hasfinitesupport(d::Union{D,Type{D}}) where {D<:ContinuousUnivariateDistribution} = false Base.:(==)(r1::RealInterval, r2::RealInterval) = r1.lb == r2.lb && r1.ub == r2.ub @@ -90,12 +92,15 @@ Generic fallback methods are provided, but it is often the case that `insupport` done more efficiently, and a specialized `insupport` is thus desirable. You should also override this function if the support is composed of multiple disjoint intervals. """ -insupport{D<:UnivariateDistribution}(d::Union{D, Type{D}}, x::Any) +insupport{D<:UnivariateDistribution}(d::Union{D,Type{D}}, x::Any) -function insupport!(r::AbstractArray, d::Union{D,Type{D}}, X::AbstractArray) where D<:UnivariateDistribution - length(r) == length(X) || - throw(DimensionMismatch("Inconsistent array dimensions.")) - for i in 1 : length(X) +function insupport!( + r::AbstractArray, + d::Union{D,Type{D}}, + X::AbstractArray, +) where {D<:UnivariateDistribution} + length(r) == length(X) || throw(DimensionMismatch("Inconsistent array dimensions.")) + for i = 1:length(X) @inbounds r[i] = insupport(d, X[i]) end return r @@ -103,13 +108,17 @@ end insupport(d::Union{D,Type{D}}, X::AbstractArray) where {D<:UnivariateDistribution} = - insupport!(BitArray(undef, size(X)), d, X) + insupport!(BitArray(undef, size(X)), d, X) -insupport(d::Union{D,Type{D}},x::Real) where {D<:ContinuousUnivariateDistribution} = minimum(d) <= x <= maximum(d) -insupport(d::Union{D,Type{D}},x::Real) where {D<:DiscreteUnivariateDistribution} = isinteger(x) && minimum(d) <= x <= maximum(d) +insupport(d::Union{D,Type{D}}, x::Real) where {D<:ContinuousUnivariateDistribution} = + minimum(d) <= x <= maximum(d) +insupport(d::Union{D,Type{D}}, x::Real) where {D<:DiscreteUnivariateDistribution} = + isinteger(x) && minimum(d) <= x <= maximum(d) -support(d::Union{D,Type{D}}) where {D<:ContinuousUnivariateDistribution} = RealInterval(minimum(d), maximum(d)) -support(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = round(Int, minimum(d)):round(Int, maximum(d)) +support(d::Union{D,Type{D}}) where {D<:ContinuousUnivariateDistribution} = + RealInterval(minimum(d), maximum(d)) +support(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = + round(Int, minimum(d)):round(Int, maximum(d)) # Type used for dispatch on finite support # T = true or false @@ -118,10 +127,10 @@ struct FiniteSupport{T} end ## macros to declare support macro distr_support(D, lb, ub) - D_has_constantbounds = (isa(ub, Number) || ub == :Inf) && - (isa(lb, Number) || lb == :(-Inf)) + D_has_constantbounds = + (isa(ub, Number) || ub == :Inf) && (isa(lb, Number) || lb == :(-Inf)) - paramdecl = D_has_constantbounds ? :(d::Union{$D, Type{<:$D}}) : :(d::$D) + paramdecl = D_has_constantbounds ? :(d::Union{$D,Type{<:$D}}) : :(d::$D) # overall esc(quote @@ -186,7 +195,7 @@ Return the median value of distribution `d`. The median is the smallest `x` in t of `d` for which `cdf(d, x) ≥ 1/2`. Corresponding to this definition as 1/2-quantile, a fallback is provided calling the `quantile` function. """ -median(d::UnivariateDistribution) = quantile(d, 1//2) +median(d::UnivariateDistribution) = quantile(d, 1 // 2) """ modes(d::UnivariateDistribution) @@ -420,10 +429,15 @@ invlogccdf(d::UnivariateDistribution, lp::Real) = quantile(d, -expm1(lp)) # gradlogpdf -gradlogpdf(d::ContinuousUnivariateDistribution, x::Real) = throw(MethodError(gradlogpdf, (d, x))) +gradlogpdf(d::ContinuousUnivariateDistribution, x::Real) = + throw(MethodError(gradlogpdf, (d, x))) -function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) +function _pdf_fill_outside!( + r::AbstractArray, + d::DiscreteUnivariateDistribution, + X::UnitRange, +) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 @@ -442,7 +456,7 @@ function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, # fill left part if vl > vfirst - for i = 1:(vl - vfirst) + for i = 1:(vl-vfirst) r[i] = 0.0 end end @@ -450,7 +464,7 @@ function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr - r[v - fm1] = pdf(d, v) + r[v-fm1] = pdf(d, v) end # fill right part @@ -463,12 +477,12 @@ function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, end function _pdf!(r::AbstractArray{<:Real}, d::DiscreteUnivariateDistribution, X::UnitRange) - vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) + vl, vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr - r[v - fm1] = pdf(d, v) + r[v-fm1] = pdf(d, v) end return r end @@ -476,15 +490,20 @@ end abstract type RecursiveProbabilityEvaluator end -function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) - vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) +function _pdf!( + r::AbstractArray, + d::DiscreteUnivariateDistribution, + X::UnitRange, + rpe::RecursiveProbabilityEvaluator, +) + vl, vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 - r[vl - fm1] = pv = pdf(d, vl) + r[vl-fm1] = pv = pdf(d, vl) for v = (vl+1):vr - r[v - fm1] = pv = nextpdf(rpe, pv, v) + r[v-fm1] = pv = nextpdf(rpe, pv, v) end end @@ -563,13 +582,15 @@ function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") - result = if isfinite(minimum_d) && !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) - x < minimum_d ? zero(c) : c - else - c = 1 - sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) - x >= maximum_d ? one(c) : c - end + result = + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) + x < minimum_d ? zero(c) : c + else + c = 1 - sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? one(c) : c + end return result end @@ -578,13 +599,15 @@ function integerunitrange_ccdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") - result = if isfinite(minimum_d) && !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) - x < minimum_d ? one(c) : c - else - c = sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) - x >= maximum_d ? zero(c) : c - end + result = + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) + x < minimum_d ? one(c) : c + else + c = sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? zero(c) : c + end return result end @@ -593,13 +616,15 @@ function integerunitrange_logcdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") - result = if isfinite(minimum_d) && !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = logsumexp(logpdf(d, y) for y in minimum_d:(max(x, minimum_d))) - x < minimum_d ? oftype(c, -Inf) : c - else - c = log1mexp(logsumexp(logpdf(d, y) for y in (min(x + 1, maximum_d)):maximum_d)) - x >= maximum_d ? zero(c) : c - end + result = + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d))) + x < minimum_d ? oftype(c, -Inf) : c + else + c = log1mexp(logsumexp(logpdf(d, y) for y = (min(x + 1, maximum_d)):maximum_d)) + x >= maximum_d ? zero(c) : c + end return result end @@ -608,13 +633,15 @@ function integerunitrange_logccdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") - result = if isfinite(minimum_d) && !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = log1mexp(logsumexp(logpdf(d, y) for y in minimum_d:(max(x, minimum_d)))) - x < minimum_d ? zero(c) : c - else - c = logsumexp(logpdf(d, y) for y in (min(x + 1, maximum_d)):maximum_d) - x >= maximum_d ? oftype(c, -Inf) : c - end + result = + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = log1mexp(logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d)))) + x < minimum_d ? zero(c) : c + else + c = logsumexp(logpdf(d, y) for y = (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? oftype(c, -Inf) : c + end return result end @@ -651,8 +678,10 @@ macro _delegate_statsfuns(D, fpre, psyms...) $Distributions.quantile(d::$D, q::Real) = convert($T, $(finvcdf)($(pargs...), q)) $Distributions.cquantile(d::$D, q::Real) = convert($T, $(finvccdf)($(pargs...), q)) - $Distributions.invlogcdf(d::$D, lq::Real) = convert($T, $(finvlogcdf)($(pargs...), lq)) - $Distributions.invlogccdf(d::$D, lq::Real) = convert($T, $(finvlogccdf)($(pargs...), lq)) + $Distributions.invlogcdf(d::$D, lq::Real) = + convert($T, $(finvlogcdf)($(pargs...), lq)) + $Distributions.invlogccdf(d::$D, lq::Real) = + convert($T, $(finvlogccdf)($(pargs...), lq)) end end @@ -674,7 +703,7 @@ const discrete_distributions = [ "poisson", "skellam", "soliton", - "poissonbinomial" + "poissonbinomial", ] const continuous_distributions = [ @@ -691,7 +720,8 @@ const continuous_distributions = [ "exponential", "fdist", "frechet", - "gamma", "erlang", + "gamma", + "erlang", "pgeneralizedgaussian", # GeneralizedGaussian depends on Gamma "generalizedpareto", "generalizedextremevalue", @@ -730,7 +760,7 @@ const continuous_distributions = [ "loguniform", # depends on Uniform "vonmises", "weibull", - "skewedexponentialpower" + "skewedexponentialpower", ] include(joinpath("univariate", "locationscale.jl")) diff --git a/src/utils.jl b/src/utils.jl index cabe51dcd1..eaef6ec36e 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -114,8 +114,8 @@ end # because X == X' keeps failing due to floating point nonsense function isApproxSymmmetric(a::AbstractMatrix{Float64}) tmp = true - for j in 2:size(a, 1) - for i in 1:(j - 1) + for j = 2:size(a, 1) + for i = 1:(j-1) tmp &= abs(a[i, j] - a[j, i]) < 1e-8 end end @@ -138,17 +138,22 @@ julia> ispossemdef(A, 2) false ``` """ -function ispossemdef(X::AbstractMatrix, k::Int; - atol::Real=0.0, - rtol::Real=(minimum(size(X))*eps(real(float(one(eltype(X))))))*iszero(atol)) +function ispossemdef( + X::AbstractMatrix, + k::Int; + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), +) _check_rank_range(k, minimum(size(X))) ishermitian(X) || return false dp, dz, dn = eigsigns(Hermitian(X), atol, rtol) return dn == 0 && dp == k end -function ispossemdef(X::AbstractMatrix; - atol::Real=0.0, - rtol::Real=(minimum(size(X))*eps(real(float(one(eltype(X))))))*iszero(atol)) +function ispossemdef( + X::AbstractMatrix; + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), +) ishermitian(X) || return false dp, dz, dn = eigsigns(Hermitian(X), atol, rtol) return dn == 0 @@ -160,17 +165,19 @@ function _check_rank_range(k::Int, n::Int) end # return counts of the number of positive, zero, and negative eigenvalues -function eigsigns(X::AbstractMatrix, - atol::Real=0.0, - rtol::Real=(minimum(size(X))*eps(real(float(one(eltype(X))))))*iszero(atol)) +function eigsigns( + X::AbstractMatrix, + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), +) eigs = eigvals(X) eigsigns(eigs, atol, rtol) end -function eigsigns(eigs::Vector{<: Real}, atol::Real, rtol::Real) +function eigsigns(eigs::Vector{<:Real}, atol::Real, rtol::Real) tol = max(atol, rtol * eigs[end]) eigsigns(eigs, tol) end -function eigsigns(eigs::Vector{<: Real}, tol::Real) +function eigsigns(eigs::Vector{<:Real}, tol::Real) dp = count(x -> tol < x, eigs) # number of positive eigenvalues dz = count(x -> -tol < x < tol, eigs) # number of numerically zero eigenvalues dn = count(x -> x < -tol, eigs) # number of negative eigenvalues diff --git a/test/censored.jl b/test/censored.jl index 27f30cadfc..ef958d72d2 100644 --- a/test/censored.jl +++ b/test/censored.jl @@ -70,17 +70,17 @@ end @test d === d0 # bound keyword constructors - d = censored(d0; lower=-2, upper=1.5) + d = censored(d0; lower = -2, upper = 1.5) @test d isa Censored @test d.lower === -2.0 @test d.upper === 1.5 - d = censored(d0; upper=true) + d = censored(d0; upper = true) @test d isa Censored @test d.lower === nothing @test d.upper === true - d = censored(d0; lower=-3) + d = censored(d0; lower = -3) @test d isa Censored @test d.upper === nothing @test d.lower === -3 @@ -93,10 +93,10 @@ end @testset "basic" begin # check_args @test_throws ArgumentError Censored(Normal(0, 1), 2, 1) - @test_throws ArgumentError Censored(Normal(0, 1), 2, 1; check_args=true) - Censored(Normal(0, 1), 2, 1; check_args=false) - Censored(Normal(0, 1), nothing, 1; check_args=true) - Censored(Normal(0, 1), 2, nothing; check_args=true) + @test_throws ArgumentError Censored(Normal(0, 1), 2, 1; check_args = true) + Censored(Normal(0, 1), 2, 1; check_args = false) + Censored(Normal(0, 1), nothing, 1; check_args = true) + Censored(Normal(0, 1), 2, nothing; check_args = true) d = Censored(Normal(0.0, 1.0), -1, 2) @test d isa Censored @@ -111,7 +111,8 @@ end @test insupport(d, 2) @test !insupport(d, -1.1) @test !insupport(d, 2.1) - @test sprint(show, "text/plain", d) == "Censored($(Normal(0.0, 1.0)); lower=-1, upper=2)" + @test sprint(show, "text/plain", d) == + "Censored($(Normal(0.0, 1.0)); lower=-1, upper=2)" d = Censored(Cauchy(0, 1), nothing, 2) @test d isa Censored @@ -149,22 +150,32 @@ end @test insupport(d, 9.5) @test !insupport(d, 10) - @test censored(Censored(Normal(), 1, nothing), nothing, 2) == Censored(Normal(), 1, 2) - @test censored(Censored(Normal(), nothing, 1), -1, nothing) == Censored(Normal(), -1, 1) + @test censored(Censored(Normal(), 1, nothing), nothing, 2) == + Censored(Normal(), 1, 2) + @test censored(Censored(Normal(), nothing, 1), -1, nothing) == + Censored(Normal(), -1, 1) @test censored(Censored(Normal(), 1, 2), 1.5, 2.5) == Censored(Normal(), 1.5, 2.0) @test censored(Censored(Normal(), 1, 3), 1.5, 2.5) == Censored(Normal(), 1.5, 2.5) @test censored(Censored(Normal(), 1, 2), 0.5, 2.5) == Censored(Normal(), 1.0, 2.0) @test censored(Censored(Normal(), 1, 2), 0.5, 1.5) == Censored(Normal(), 1.0, 1.5) - @test censored(Censored(Normal(), nothing, 1), nothing, 1) == Censored(Normal(), nothing, 1) - @test censored(Censored(Normal(), nothing, 1), nothing, 2) == Censored(Normal(), nothing, 1) - @test censored(Censored(Normal(), nothing, 1), nothing, 1.5) == Censored(Normal(), nothing, 1) - @test censored(Censored(Normal(), nothing, 1.5), nothing, 1) == Censored(Normal(), nothing, 1) + @test censored(Censored(Normal(), nothing, 1), nothing, 1) == + Censored(Normal(), nothing, 1) + @test censored(Censored(Normal(), nothing, 1), nothing, 2) == + Censored(Normal(), nothing, 1) + @test censored(Censored(Normal(), nothing, 1), nothing, 1.5) == + Censored(Normal(), nothing, 1) + @test censored(Censored(Normal(), nothing, 1.5), nothing, 1) == + Censored(Normal(), nothing, 1) - @test censored(Censored(Normal(), 1, nothing), 1, nothing) == Censored(Normal(), 1, nothing) - @test censored(Censored(Normal(), 1, nothing), 2, nothing) == Censored(Normal(), 2, nothing) - @test censored(Censored(Normal(), 1, nothing), 1.5, nothing) == Censored(Normal(), 1.5, nothing) - @test censored(Censored(Normal(), 1.5, nothing), 1, nothing) == Censored(Normal(), 1.5, nothing) + @test censored(Censored(Normal(), 1, nothing), 1, nothing) == + Censored(Normal(), 1, nothing) + @test censored(Censored(Normal(), 1, nothing), 2, nothing) == + Censored(Normal(), 2, nothing) + @test censored(Censored(Normal(), 1, nothing), 1.5, nothing) == + Censored(Normal(), 1.5, nothing) + @test censored(Censored(Normal(), 1.5, nothing), 1, nothing) == + Censored(Normal(), 1.5, nothing) end @testset "Uniform" begin @@ -181,7 +192,10 @@ end (3.5, Inf), (-Inf, Inf), ] - @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for (lower, upper) in bounds + @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) l, u = extrema(d) @@ -196,10 +210,10 @@ end @test u == upper end @testset for f in [cdf, logcdf, ccdf, logccdf] - @test @inferred(f(d, l)) ≈ f(dmix, l) atol=1e-8 - @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol=1e-8 - @test @inferred(f(d, u)) ≈ f(dmix, u) atol=1e-8 - @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol=1e-8 + @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1e-8 + @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1e-8 + @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1e-8 + @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1e-8 @test @inferred(f(d, 5)) ≈ f(dmix, 5) end @testset for f in [mean, var] @@ -207,7 +221,8 @@ end end @test @inferred(median(d)) ≈ clamp(median(d0), l, u) @inferred quantile(d, 0.5) - @test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + @test Base.Fix1(quantile, d).(0:0.01:1) ≈ + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with # Normal(μ, 0), they are infinite if lower === nothing || !isfinite(lower) @@ -238,22 +253,26 @@ end @testset "Normal" begin d0 = Normal() bounds = [(nothing, 0.2), (-0.1, nothing), (-0.1, 0.2)] - @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for (lower, upper) in bounds + @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) l, u = extrema(d) @testset for f in [cdf, logcdf, ccdf, logccdf] - @test f(d, l) ≈ f(dmix, l) atol=1e-8 - @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol=1e-8 - @test f(d, u) ≈ f(dmix, u) atol=1e-8 - @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol=1e-8 + @test f(d, l) ≈ f(dmix, l) atol = 1e-8 + @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1e-8 + @test f(d, u) ≈ f(dmix, u) atol = 1e-8 + @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1e-8 @test f(d, 5) ≈ f(dmix, 5) end @testset for f in [mean, var] @test f(d) ≈ f(dmix) end @test median(d) ≈ clamp(median(d0), l, u) - @test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + @test Base.Fix1(quantile, d).(0:0.01:1) ≈ + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with # Normal(μ, 0), they are infinite if lower === nothing @@ -294,16 +313,19 @@ end (3.5, Inf), (-Inf, Inf), ] - @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for (lower, upper) in bounds + @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) l, u = extrema(d) @testset for f in [pdf, logpdf, cdf, logcdf, ccdf, logccdf] - @test @inferred(f(d, l)) ≈ f(dmix, l) atol=1e-8 - @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol=1e-8 - @test @inferred(f(d, u)) ≈ f(dmix, u) atol=1e-8 - @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol=1e-8 + @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1e-8 + @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1e-8 + @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1e-8 + @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1e-8 @test @inferred(f(d, 5)) ≈ f(dmix, 5) end @testset for f in [mean, var] @@ -311,7 +333,8 @@ end end @test @inferred(median(d)) ≈ clamp(median(d0), l, u) @inferred quantile(d, 0.5) - @test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + @test Base.Fix1(quantile, d).(0:0.01:1) ≈ + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # rand x = rand(d, 10_000) @test all(x -> insupport(d, x), x) @@ -333,20 +356,24 @@ end @testset "Poisson" begin d0 = Poisson(20) bounds = [(nothing, 12), (2, nothing), (2, 12), (8, nothing)] - @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for (lower, upper) in bounds + @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) l, u = extrema(d) @testset for f in [pdf, logpdf, cdf, logcdf, ccdf, logccdf] - @test f(d, l) ≈ f(dmix, l) atol=1e-8 - @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol=1e-8 - @test f(d, u) ≈ f(dmix, u) atol=1e-8 - @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol=1e-8 + @test f(d, l) ≈ f(dmix, l) atol = 1e-8 + @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1e-8 + @test f(d, u) ≈ f(dmix, u) atol = 1e-8 + @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1e-8 @test f(d, 5) ≈ f(dmix, 5) end @test median(d) ≈ clamp(median(d0), l, u) - @test Base.Fix1(quantile, d).(0:0.01:0.99) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:0.99), l, u) + @test Base.Fix1(quantile, d).(0:0.01:0.99) ≈ + clamp.(Base.Fix1(quantile, d0).(0:0.01:0.99), l, u) x = rand(d, 100) @test loglikelihood(d, x) ≈ loglikelihood(dmix, x) # rand @@ -354,20 +381,29 @@ end @test all(x -> insupport(d, x), x) # mean, std @test mean(x) ≈ mean(x) atol = 1e-1 - @test std(x) ≈ std(x) atol = 1e-1 + @test std(x) ≈ std(x) atol = 1e-1 end end @testset "mixed types are still type-inferrible" begin bounds = [(nothing, 8), (2, nothing), (2, 8)] - @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper), uncensored partype=$T0, partype=$T" for (lower, upper) in bounds, - T in (Int, Float32, Float64), T0 in (Int, Float32, Float64) + @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper), uncensored partype=$T0, partype=$T" for ( + lower, + upper, + ) in bounds, + T in (Int, Float32, Float64), + T0 in (Int, Float32, Float64) + d0 = Uniform(T0(0), T0(10)) - d = censored(d0, lower === nothing ? nothing : T(lower), upper === nothing ? nothing : T(upper)) + d = censored( + d0, + lower === nothing ? nothing : T(lower), + upper === nothing ? nothing : T(upper), + ) l, u = extrema(d) @testset for f in [pdf, logpdf, cdf, logcdf, ccdf, logccdf] @inferred f(d, 3) - @inferred f(d, 4f0) + @inferred f(d, 4.0f0) @inferred f(d, 5.0) end @testset for f in [median, mean, var, entropy] @@ -383,4 +419,4 @@ end end end -end # module \ No newline at end of file +end # module diff --git a/test/cholesky/lkjcholesky.jl b/test/cholesky/lkjcholesky.jl index 82e22f606c..077b6d3821 100644 --- a/test/cholesky/lkjcholesky.jl +++ b/test/cholesky/lkjcholesky.jl @@ -5,37 +5,38 @@ using Test using FiniteDifferences @testset "LKJCholesky" begin - function test_draw(d::LKJCholesky, x; check_uplo=true) + function test_draw(d::LKJCholesky, x; check_uplo = true) @test insupport(d, x) check_uplo && @test x.uplo == d.uplo end - function test_draws(d::LKJCholesky, xs; check_uplo=true, nkstests=1) + function test_draws(d::LKJCholesky, xs; check_uplo = true, nkstests = 1) @test all(x -> insupport(d, x), xs) check_uplo && @test all(x -> x.uplo == d.uplo, xs) p = d.d dmat = LKJ(p, d.η) - marginal = Distributions._marginal(dmat) + marginal = Distributions._marginal(dmat) ndraws = length(xs) zs = Array{eltype(d)}(undef, p, p, ndraws) - for k in 1:ndraws + for k = 1:ndraws zs[:, :, k] = Matrix(xs[k]) end @testset "LKJCholesky marginal moments" begin - @test mean(zs; dims=3)[:, :, 1] ≈ I atol=0.1 - @test var(zs; dims=3)[:, :, 1] ≈ var(marginal) * (ones(p, p) - I) atol=0.1 - @testset for n in 2:5 - for i in 1:p, j in 1:(i-1) - @test moment(zs[i, j, :], n) ≈ moment(rand(marginal, ndraws), n) atol=0.1 + @test mean(zs; dims = 3)[:, :, 1] ≈ I atol = 0.1 + @test var(zs; dims = 3)[:, :, 1] ≈ var(marginal) * (ones(p, p) - I) atol = 0.1 + @testset for n = 2:5 + for i = 1:p, j = 1:(i-1) + @test moment(zs[i, j, :], n) ≈ moment(rand(marginal, ndraws), n) atol = + 0.1 end end end @testset "LKJCholesky marginal KS test" begin α = 0.01 - L = sum(1:(p - 1)) - for i in 1:p, j in 1:(i-1) + L = sum(1:(p-1)) + for i = 1:p, j = 1:(i-1) @test pvalue_kolmogorovsmirnoff(zs[i, j, :], marginal) >= α / L / nkstests end end @@ -47,14 +48,14 @@ using FiniteDifferences J = jacobian(central_fdm(5, 1), cholesky_vec_to_corr_vec, stricttril_to_vec(L))[1] return logabsdet(J)[1] end - stricttril_to_vec(L) = [L[i, j] for i in axes(L, 1) for j in 1:(i - 1)] + stricttril_to_vec(L) = [L[i, j] for i in axes(L, 1) for j = 1:(i-1)] function vec_to_stricttril(l) n = length(l) p = Int((1 + sqrt(8n + 1)) / 2) L = similar(l, p, p) fill!(L, 0) k = 1 - for i in 1:p, j in 1:(i - 1) + for i = 1:p, j = 1:(i-1) L[i, j] = l[k] k += 1 end @@ -136,7 +137,9 @@ using FiniteDifferences @test m isa Cholesky{eltype(d)} @test Matrix(m) ≈ I else - @test_throws DomainError(η, "LKJCholesky: mode is defined only when η > 1.") mode(d) + @test_throws DomainError(η, "LKJCholesky: mode is defined only when η > 1.") mode( + d, + ) end m = mode(d; check_args = false) @test m isa Cholesky{eltype(d)} @@ -150,8 +153,10 @@ using FiniteDifferences @test insupport(LKJCholesky(40, 2, 'U'), cholesky(rand(LKJ(40, 2)))) @test insupport(LKJCholesky(40, 2), cholesky(rand(LKJ(40, 2)))) @test !insupport(LKJCholesky(40, 2), cholesky(rand(LKJ(41, 2)))) - for (d, η) in ((2, 4), (2, 1), (3, 1)), T in (Float32, Float64) - @test @inferred(logpdf(LKJCholesky(40, T(2)), cholesky(T.(rand(LKJ(41, 2)))))) === T(-Inf) + for (d, η) in ((2, 4), (2, 1), (3, 1)), T in (Float32, Float64) + @test @inferred( + logpdf(LKJCholesky(40, T(2)), cholesky(T.(rand(LKJ(41, 2))))) + ) === T(-Inf) end z = rand(LKJ(40, 1)) z .+= exp(Symmetric(randn(size(z)))) .* 1e-8 @@ -192,7 +197,7 @@ using FiniteDifferences @testset for p in (2, 4, 10), η in (0.5, 1, 3), uplo in ('L', 'U') d = LKJCholesky(p, η, uplo) test_draw(d, rand(rng, d)) - test_draws(d, rand(rng, d, 10^4); nkstests=nkstests) + test_draws(d, rand(rng, d, 10^4); nkstests = nkstests) end @test_broken rand(rng, LKJCholesky(5, Inf)) ≈ I end @@ -215,19 +220,19 @@ using FiniteDifferences # allocating xs = Vector{typeof(x)}(undef, 10^4) rand!(rng, d, xs) - test_draws(d, xs; nkstests=nkstests) + test_draws(d, xs; nkstests = nkstests) F2 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs2 = [deepcopy(F2) for _ in 1:10^4] + xs2 = [deepcopy(F2) for _ = 1:10^4] xs2[1] = cholesky(exp(Symmetric(randn(rng, p + 1, p + 1)))) rand!(rng, d, xs2) - test_draws(d, xs2; nkstests=nkstests) + test_draws(d, xs2; nkstests = nkstests) # non-allocating F3 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs3 = [deepcopy(F3) for _ in 1:10^4] + xs3 = [deepcopy(F3) for _ = 1:10^4] rand!(rng, d, xs3) - test_draws(d, xs3; check_uplo = uplo == 'U', nkstests=nkstests) + test_draws(d, xs3; check_uplo = uplo == 'U', nkstests = nkstests) end end end diff --git a/test/convolution.jl b/test/convolution.jl index 38ea3943d4..dfa057a1be 100644 --- a/test/convolution.jl +++ b/test/convolution.jl @@ -70,24 +70,55 @@ using Test @testset "DiscreteNonParametric" begin d1 = DiscreteNonParametric([0, 1], [0.5, 0.5]) d2 = DiscreteNonParametric([1, 2], [0.5, 0.5]) - d_eps = DiscreteNonParametric([prevfloat(0.0), 0.0, nextfloat(0.0), 1.0], fill(1//4, 4)) - d10 = DiscreteNonParametric((1//10):(1//10):1, fill(1//10, 10)) - + d_eps = DiscreteNonParametric( + [prevfloat(0.0), 0.0, nextfloat(0.0), 1.0], + fill(1 // 4, 4), + ) + d10 = DiscreteNonParametric((1//10):(1//10):1, fill(1 // 10, 10)) + d_int_simple = @inferred(convolve(d1, d2)) @test d_int_simple isa DiscreteNonParametric @test support(d_int_simple) == [1, 2, 3] @test probs(d_int_simple) == [0.25, 0.5, 0.25] - + d_rat = convolve(d10, d10) @test support(d_rat) == (1//5):(1//10):2 - @test probs(d_rat) == [1//100, 1//50, 3//100, 1//25, 1//20, 3//50, 7//100, 2//25, 9//100, 1//10, - 9//100, 2//25, 7//100, 3//50, 1//20, 1//25, 3//100, 1//50, 1//100] + @test probs(d_rat) == [ + 1 // 100, + 1 // 50, + 3 // 100, + 1 // 25, + 1 // 20, + 3 // 50, + 7 // 100, + 2 // 25, + 9 // 100, + 1 // 10, + 9 // 100, + 2 // 25, + 7 // 100, + 3 // 50, + 1 // 20, + 1 // 25, + 3 // 100, + 1 // 50, + 1 // 100, + ] d_float_supp = convolve(d_eps, d_eps) - @test support(d_float_supp) == [2 * prevfloat(0.0), prevfloat(0.0), 0.0, nextfloat(0.0), 2 * nextfloat(0.0), 1.0, 2.0] - @test probs(d_float_supp) == [1//16, 1//8, 3//16, 1//8, 1//16, 3//8, 1//16] + @test support(d_float_supp) == [ + 2 * prevfloat(0.0), + prevfloat(0.0), + 0.0, + nextfloat(0.0), + 2 * nextfloat(0.0), + 1.0, + 2.0, + ] + @test probs(d_float_supp) == + [1 // 16, 1 // 8, 3 // 16, 1 // 8, 1 // 16, 3 // 8, 1 // 16] end - + end @testset "continuous univariate" begin @@ -160,22 +191,22 @@ end zmdn1 = MvNormal(Diagonal([1.2, 0.3])) zmdn2 = MvNormal(Diagonal([0.8, 1.0])) - m1 = Symmetric(rand(2,2)) + m1 = Symmetric(rand(2, 2)) fn1 = MvNormal(ones(2), m1^2) - m2 = Symmetric(rand(2,2)) + m2 = Symmetric(rand(2, 2)) fn2 = MvNormal([2.1, 0.4], m2^2) - m3 = Symmetric(rand(2,2)) + m3 = Symmetric(rand(2, 2)) zm1 = MvNormal(m3^2) - m4 = Symmetric(rand(2,2)) + m4 = Symmetric(rand(2, 2)) zm2 = MvNormal(m4^2) dist_list = (in1, in2, zmin1, zmin2, dn1, dn2, zmdn1, zmdn2, fn1, fn2, zm1, zm2) for (d1, d2) in Iterators.product(dist_list, dist_list) - d3 = @inferred(convolve(d1, d2)) + d3 = @inferred(convolve(d1, d2)) @test d3 isa MvNormal @test d3.μ == d1.μ .+ d2.μ @test Matrix(d3.Σ) == Matrix(d1.Σ + d2.Σ) # isequal not defined for PDMats diff --git a/test/density_interface.jl b/test/density_interface.jl index 96400ea3a2..d4791f1a44 100644 --- a/test/density_interface.jl +++ b/test/density_interface.jl @@ -14,9 +14,12 @@ end for di_func in (logdensityof, densityof) - @test_throws ArgumentError di_func(d_uv_continuous, [rand(d_uv_continuous) for i in 1:3]) - @test_throws ArgumentError di_func(d_mv, hcat([rand(d_mv) for i in 1:3]...)) - @test_throws ArgumentError di_func(d_av, [rand(d_av) for i in 1:3]) + @test_throws ArgumentError di_func( + d_uv_continuous, + [rand(d_uv_continuous) for i = 1:3], + ) + @test_throws ArgumentError di_func(d_mv, hcat([rand(d_mv) for i = 1:3]...)) + @test_throws ArgumentError di_func(d_av, [rand(d_av) for i = 1:3]) end end end diff --git a/test/eachvariate.jl b/test/eachvariate.jl index 60cd1f68d3..03c1489cdd 100644 --- a/test/eachvariate.jl +++ b/test/eachvariate.jl @@ -12,7 +12,7 @@ function FiniteDifferences.to_vec(xs::Distributions.EachVariate{V}) where {V} end # MWE in #1817 -struct FooEachvariate <: Sampleable{Multivariate, Continuous} end +struct FooEachvariate <: Sampleable{Multivariate,Continuous} end Base.length(::FooEachvariate) = 3 Base.eltype(::FooEachvariate) = Float64 function Distributions._rand!(rng::AbstractRNG, ::FooEachvariate, x::AbstractVector{<:Real}) diff --git a/test/edgecases.jl b/test/edgecases.jl index 3ed5ab3557..1ed465e832 100644 --- a/test/edgecases.jl +++ b/test/edgecases.jl @@ -21,5 +21,9 @@ using Test, Distributions, StatsBase fnecdf = ecdf(z) y = [-1.96, -1.644854, -1.281552, -0.6744898, 0, 0.6744898, 1.281552, 1.644854, 1.96] - @test isapprox(fnecdf(y), [0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975], atol=1e-3) + @test isapprox( + fnecdf(y), + [0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975], + atol = 1e-3, + ) end diff --git a/test/edgeworth.jl b/test/edgeworth.jl index 9ab7869fbf..5da5cbd6b9 100644 --- a/test/edgeworth.jl +++ b/test/edgeworth.jl @@ -4,40 +4,37 @@ using Distributions using Test -dg = Gamma(1,1) +dg = Gamma(1, 1) -d_s = EdgeworthSum(dg,10) -d_m = EdgeworthMean(dg,10) -d_z = EdgeworthZ(dg,10) +d_s = EdgeworthSum(dg, 10) +d_m = EdgeworthMean(dg, 10) +d_z = EdgeworthZ(dg, 10) -dg_s = Gamma(10,1) -dg_m = Gamma(10,0.1) -dg_za = Gamma(10,1/std(dg_s)) +dg_s = Gamma(10, 1) +dg_m = Gamma(10, 0.1) +dg_za = Gamma(10, 1 / std(dg_s)) for i = 0.01:0.01:0.99 - q = quantile(dg_s,i) - @test isapprox(quantile(d_s, i) , q , atol=0.02) - @test isapprox(cquantile(d_s, 1 - i), q , atol=0.02) - @test isapprox(cdf(d_s, q) , i , atol=0.002) - @test isapprox(ccdf(d_s, q) , 1 - i , atol=0.002) - @test isapprox(pdf(d_s, q) , pdf(dg_s,q), atol=0.005) - - q = quantile(dg_m,i) - @test isapprox(quantile(d_m, i) , q , atol=0.01) - @test isapprox(cquantile(d_m, 1 - i), q , atol=0.01) - @test isapprox(cdf(d_m, q) , i , atol=0.002) - @test isapprox(ccdf(d_m, q) , 1 - i , atol=0.002) - @test isapprox(pdf(d_m, q) , pdf(dg_m,q), atol=0.05) - - q = quantile(dg_za,i) - mean(dg_za) - @test isapprox(quantile(d_z, i) , q , atol=0.01) - @test isapprox(cquantile(d_z, 1 - i), q , atol=0.01) - @test isapprox(cdf(d_z, q) , i , atol=0.002) - @test isapprox(ccdf(d_z, q) , 1 - i , atol=0.002) - @test isapprox(pdf(d_z, q) , pdf(dg_za,q+mean(dg_za)), atol=0.02) + q = quantile(dg_s, i) + @test isapprox(quantile(d_s, i), q, atol = 0.02) + @test isapprox(cquantile(d_s, 1 - i), q, atol = 0.02) + @test isapprox(cdf(d_s, q), i, atol = 0.002) + @test isapprox(ccdf(d_s, q), 1 - i, atol = 0.002) + @test isapprox(pdf(d_s, q), pdf(dg_s, q), atol = 0.005) + + q = quantile(dg_m, i) + @test isapprox(quantile(d_m, i), q, atol = 0.01) + @test isapprox(cquantile(d_m, 1 - i), q, atol = 0.01) + @test isapprox(cdf(d_m, q), i, atol = 0.002) + @test isapprox(ccdf(d_m, q), 1 - i, atol = 0.002) + @test isapprox(pdf(d_m, q), pdf(dg_m, q), atol = 0.05) + + q = quantile(dg_za, i) - mean(dg_za) + @test isapprox(quantile(d_z, i), q, atol = 0.01) + @test isapprox(cquantile(d_z, 1 - i), q, atol = 0.01) + @test isapprox(cdf(d_z, q), i, atol = 0.002) + @test isapprox(ccdf(d_z, q), 1 - i, atol = 0.002) + @test isapprox(pdf(d_z, q), pdf(dg_za, q + mean(dg_za)), atol = 0.02) end - - - diff --git a/test/fit.jl b/test/fit.jl index 143f0b6ee4..a866dcd90a 100644 --- a/test/fit.jl +++ b/test/fit.jl @@ -14,7 +14,7 @@ N = 10^5 rng = MersenneTwister(123) -const funcs = ([rand,rand], [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) +const funcs = ([rand, rand], [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) @testset "Testing fit for DiscreteUniform" begin for func in funcs @@ -40,28 +40,28 @@ end for x in (z, OffsetArray(z, -n0 ÷ 2)), w in (v, OffsetArray(v, -n0 ÷ 2)) ss = @inferred suffstats(D, x) @test ss isa Distributions.BernoulliStats - @test ss.cnt0 == n0 - count(t->t != 0, z) - @test ss.cnt1 == count(t->t != 0, z) + @test ss.cnt0 == n0 - count(t -> t != 0, z) + @test ss.cnt1 == count(t -> t != 0, z) ss = @inferred suffstats(D, x, w) @test ss isa Distributions.BernoulliStats - @test ss.cnt0 ≈ sum(v[z .== 0]) - @test ss.cnt1 ≈ sum(v[z .== 1]) + @test ss.cnt0 ≈ sum(v[z.==0]) + @test ss.cnt1 ≈ sum(v[z.==1]) d = @inferred fit(D, x) @test d isa D - @test mean(d) ≈ count(t->t != 0, z) / n0 + @test mean(d) ≈ count(t -> t != 0, z) / n0 d = @inferred fit(D, x, w) @test d isa D - @test mean(d) ≈ sum(v[z .== 1]) / sum(v) + @test mean(d) ≈ sum(v[z.==1]) / sum(v) end z = rand(rng..., Bernoulli(0.7), N) for x in (z, OffsetArray(z, -N ÷ 2)) d = @inferred fit(D, x) @test d isa D - @test mean(d) ≈ 0.7 atol=0.01 + @test mean(d) ≈ 0.7 atol = 0.01 end end end @@ -70,13 +70,13 @@ end for func in funcs, dist in (Beta, Beta{Float64}) d = fit(dist, func[2](dist(1.3, 3.7), N)) @test isa(d, dist) - @test isapprox(d.α, 1.3, atol=0.1) - @test isapprox(d.β, 3.7, atol=0.1) + @test isapprox(d.α, 1.3, atol = 0.1) + @test isapprox(d.β, 3.7, atol = 0.1) d = fit_mle(dist, func[2](dist(1.3, 3.7), N)) @test isa(d, dist) - @test isapprox(d.α, 1.3, atol=0.1) - @test isapprox(d.β, 3.7, atol=0.1) + @test isapprox(d.α, 1.3, atol = 0.1) + @test isapprox(d.β, 3.7, atol = 0.1) end end @@ -114,7 +114,7 @@ end d = @inferred fit(D, 100, x) @test d isa D @test ntrials(d) == 100 - @test succprob(d) ≈ 0.3 atol=0.01 + @test succprob(d) ≈ 0.3 atol = 0.01 end end end @@ -128,7 +128,7 @@ end w = func[1](n0) ss = suffstats(Categorical, (3, x)) - h = Float64[count(v->v == i, x) for i = 1 : 3] + h = Float64[count(v -> v == i, x) for i = 1:3] @test isa(ss, Distributions.CategoricalStats) @test ss.h ≈ h @@ -142,7 +142,7 @@ end @test probs(d2) == probs(d) ss = suffstats(Categorical, (3, x), w) - h = Float64[sum(w[x .== i]) for i = 1 : 3] + h = Float64[sum(w[x.==i]) for i = 1:3] @test isa(ss, Distributions.CategoricalStats) @test ss.h ≈ h @@ -156,7 +156,7 @@ end d = fit(Categorical, func[2](Categorical(p), N)) @test isa(d, Categorical) - @test isapprox(probs(d), p, atol=0.01) + @test isapprox(probs(d), p, atol = 0.01) end end @@ -190,7 +190,7 @@ end d = fit(dist, func[2](dist(0.5), N)) @test isa(d, dist) - @test isapprox(scale(d), 0.5, atol=0.01) + @test isapprox(scale(d), 0.5, atol = 0.01) end end @@ -204,32 +204,32 @@ end ss = suffstats(dist, x) @test isa(ss, Distributions.NormalStats) - @test ss.s ≈ sum(x) - @test ss.m ≈ mean(x) - @test ss.s2 ≈ sum((x .- ss.m).^2) + @test ss.s ≈ sum(x) + @test ss.m ≈ mean(x) + @test ss.s2 ≈ sum((x .- ss.m) .^ 2) @test ss.tw ≈ n0 ss = suffstats(dist, x, w) @test isa(ss, Distributions.NormalStats) - @test ss.s ≈ dot(x, w) - @test ss.m ≈ dot(x, w) / sum(w) - @test ss.s2 ≈ dot((x .- ss.m).^2, w) + @test ss.s ≈ dot(x, w) + @test ss.m ≈ dot(x, w) / sum(w) + @test ss.s2 ≈ dot((x .- ss.m) .^ 2, w) @test ss.tw ≈ sum(w) d = fit(dist, x) @test isa(d, dist) @test d.μ ≈ mean(x) - @test d.σ ≈ sqrt(mean((x .- d.μ).^2)) + @test d.σ ≈ sqrt(mean((x .- d.μ) .^ 2)) d = fit(dist, x, w) @test isa(d, dist) @test d.μ ≈ dot(x, w) / sum(w) - @test d.σ ≈ sqrt(dot((x .- d.μ).^2, w) / sum(w)) + @test d.σ ≈ sqrt(dot((x .- d.μ) .^ 2, w) / sum(w)) d = fit(dist, func[2](dist(μ, σ), N)) @test isa(d, dist) - @test isapprox(d.μ, μ, atol=0.1) - @test isapprox(d.σ, σ, atol=0.1) + @test isapprox(d.μ, μ, atol = 0.1) + @test isapprox(d.σ, σ, atol = 0.1) end end @@ -252,18 +252,18 @@ end ss = suffstats(NormalKnownMu(μ), x, w) @test isa(ss, Distributions.NormalKnownMuStats) @test ss.μ == μ - @test ss.s2 ≈ dot((x .- μ).^2, w) + @test ss.s2 ≈ dot((x .- μ) .^ 2, w) @test ss.tw ≈ sum(w) - d = fit_mle(Normal, x; mu=μ) + d = fit_mle(Normal, x; mu = μ) @test isa(d, Normal) @test d.μ == μ - @test d.σ ≈ sqrt(mean((x .- d.μ).^2)) + @test d.σ ≈ sqrt(mean((x .- d.μ) .^ 2)) - d = fit_mle(Normal, x, w; mu=μ) + d = fit_mle(Normal, x, w; mu = μ) @test isa(d, Normal) @test d.μ == μ - @test d.σ ≈ sqrt(dot((x .- d.μ).^2, w) / sum(w)) + @test d.σ ≈ sqrt(dot((x .- d.μ) .^ 2, w) / sum(w)) ss = suffstats(NormalKnownSigma(σ), x) @@ -278,12 +278,12 @@ end @test ss.sx ≈ dot(x, w) @test ss.tw ≈ sum(w) - d = fit_mle(Normal, x; sigma=σ) + d = fit_mle(Normal, x; sigma = σ) @test isa(d, Normal) @test d.σ == σ @test d.μ ≈ mean(x) - d = fit_mle(Normal, x, w; sigma=σ) + d = fit_mle(Normal, x, w; sigma = σ) @test isa(d, Normal) @test d.σ == σ @test d.μ ≈ dot(x, w) / sum(w) @@ -306,8 +306,8 @@ end d = fit(D, x) @test d isa D @test 1.2 <= minimum(d) <= maximum(d) <= 5.8 - @test minimum(d) ≈ 1.2 atol=0.02 - @test maximum(d) ≈ 5.8 atol=0.02 + @test minimum(d) ≈ 1.2 atol = 0.02 + @test maximum(d) ≈ 5.8 atol = 0.02 end end end @@ -319,20 +319,20 @@ end ss = suffstats(dist, x) @test isa(ss, Distributions.GammaStats) - @test ss.sx ≈ sum(x) + @test ss.sx ≈ sum(x) @test ss.slogx ≈ sum(log.(x)) - @test ss.tw ≈ n0 + @test ss.tw ≈ n0 ss = suffstats(dist, x, w) @test isa(ss, Distributions.GammaStats) - @test ss.sx ≈ dot(x, w) + @test ss.sx ≈ dot(x, w) @test ss.slogx ≈ dot(log.(x), w) - @test ss.tw ≈ sum(w) + @test ss.tw ≈ sum(w) d = fit(dist, func[2](dist(3.9, 2.1), N)) @test isa(d, dist) - @test isapprox(shape(d), 3.9, atol=0.1) - @test isapprox(scale(d), 2.1, atol=0.2) + @test isapprox(shape(d), 3.9, atol = 0.1) + @test isapprox(scale(d), 2.1, atol = 0.2) end end @@ -353,15 +353,15 @@ end d = fit(dist, x) @test isa(d, dist) - @test succprob(d) ≈ inv(1. + mean(x)) + @test succprob(d) ≈ inv(1.0 + mean(x)) d = fit(dist, x, w) @test isa(d, dist) - @test succprob(d) ≈ inv(1. + dot(x, w) / sum(w)) + @test succprob(d) ≈ inv(1.0 + dot(x, w) / sum(w)) d = fit(dist, func[2](dist(0.3), N)) @test isa(d, dist) - @test isapprox(succprob(d), 0.3, atol=0.01) + @test isapprox(succprob(d), 0.3, atol = 0.01) end end @@ -369,19 +369,19 @@ end for func in funcs, dist in (Laplace, Laplace{Float64}) d = fit(dist, func[2](dist(5.0, 3.0), N + 1)) @test isa(d, dist) - @test isapprox(location(d), 5.0, atol=0.03) - @test isapprox(scale(d) , 3.0, atol=0.03) + @test isapprox(location(d), 5.0, atol = 0.03) + @test isapprox(scale(d), 3.0, atol = 0.03) end end @testset "Testing fit for Pareto" begin for func in funcs, dist in (Pareto, Pareto{Float64}) - x = func[2](dist(3., 7.), N) + x = func[2](dist(3.0, 7.0), N) d = fit(dist, x) @test isa(d, dist) - @test isapprox(shape(d), 3., atol=0.1) - @test isapprox(scale(d), 7., atol=0.1) + @test isapprox(shape(d), 3.0, atol = 0.1) + @test isapprox(scale(d), 7.0, atol = 0.1) end end @@ -410,7 +410,7 @@ end d = fit(dist, func[2](dist(8.2), N)) @test isa(d, dist) - @test isapprox(mean(d), 8.2, atol=0.2) + @test isapprox(mean(d), 8.2, atol = 0.2) end end @@ -421,24 +421,24 @@ end ss = suffstats(dist, x) @test isa(ss, Distributions.InverseGaussianStats) - @test ss.sx ≈ sum(x) + @test ss.sx ≈ sum(x) @test ss.sinvx ≈ sum(1 ./ x) - @test ss.sw ≈ n0 + @test ss.sw ≈ n0 ss = suffstats(dist, x, w) @test isa(ss, Distributions.InverseGaussianStats) - @test ss.sx ≈ dot(x, w) + @test ss.sx ≈ dot(x, w) @test ss.sinvx ≈ dot(1 ./ x, w) - @test ss.sw ≈ sum(w) + @test ss.sw ≈ sum(w) d = fit(dist, rand(dist(3.9, 2.1), N)) @test isa(d, dist) - @test isapprox(mean(d), 3.9, atol=0.1) - @test isapprox(shape(d), 2.1, atol=0.1) + @test isapprox(mean(d), 3.9, atol = 0.1) + @test isapprox(shape(d), 2.1, atol = 0.1) d = fit_mle(dist, rand(dist(3.9, 2.1), N)) - @test isapprox(mean(d), 3.9, atol=0.1) - @test isapprox(shape(d), 2.1, atol=0.1) + @test isapprox(mean(d), 3.9, atol = 0.1) + @test isapprox(shape(d), 2.1, atol = 0.1) end end @@ -448,7 +448,7 @@ end d = fit(dist, x) @test isa(d, dist) - @test isapprox(mode(d), 3.6, atol=0.1) + @test isapprox(mode(d), 3.6, atol = 0.1) # Test automatic differentiation f(x) = mean(fit(Rayleigh, x)) diff --git a/test/functionals.jl b/test/functionals.jl index 01357355a5..bbc04aeeaa 100644 --- a/test/functionals.jl +++ b/test/functionals.jl @@ -25,8 +25,8 @@ end # univariate distributions for d in (Normal(), Poisson(2.0), Binomial(10, 0.4)) m = Distributions.expectation(identity, d) - @test m ≈ mean(d) atol=1e-3 - @test Distributions.expectation(x -> (x - mean(d))^2, d) ≈ var(d) atol=1e-3 + @test m ≈ mean(d) atol = 1e-3 + @test Distributions.expectation(x -> (x - mean(d))^2, d) ≈ var(d) atol = 1e-3 @test @test_deprecated(Distributions.expectation(d, identity, 1e-10)) == m @test @test_deprecated(Distributions.expectation(d, identity)) == m @@ -34,19 +34,31 @@ end # multivariate distribution d = MvNormal([1.5, -0.5], I) - @test Distributions.expectation(identity, d; nsamples=10_000) ≈ mean(d) atol=5e-2 - @test @test_deprecated(Distributions.expectation(d, identity; nsamples=10_000)) ≈ mean(d) atol=5e-2 + @test Distributions.expectation(identity, d; nsamples = 10_000) ≈ mean(d) atol = 5e-2 + @test @test_deprecated(Distributions.expectation(d, identity; nsamples = 10_000)) ≈ + mean(d) atol = 5e-2 end @testset "KL divergences" begin function test_kl(p, q) @test kldivergence(p, q) >= 0 - @test kldivergence(p, p) ≈ 0 atol=1e-1 - @test kldivergence(q, q) ≈ 0 atol=1e-1 + @test kldivergence(p, p) ≈ 0 atol = 1e-1 + @test kldivergence(q, q) ≈ 0 atol = 1e-1 if p isa UnivariateDistribution - @test kldivergence(p, q) ≈ invoke(kldivergence, Tuple{UnivariateDistribution,UnivariateDistribution}, p, q) atol=1e-1 + @test kldivergence(p, q) ≈ invoke( + kldivergence, + Tuple{UnivariateDistribution,UnivariateDistribution}, + p, + q, + ) atol = 1e-1 elseif p isa MultivariateDistribution - @test kldivergence(p, q) ≈ invoke(kldivergence, Tuple{MultivariateDistribution,MultivariateDistribution}, p, q; nsamples=10000) atol=1e-1 + @test kldivergence(p, q) ≈ invoke( + kldivergence, + Tuple{MultivariateDistribution,MultivariateDistribution}, + p, + q; + nsamples = 10000, + ) atol = 1e-1 end end @@ -68,22 +80,23 @@ end @test kldivergence(p, q) ≈ 3 * kldivergence(Bernoulli(0.3), Bernoulli(0.5)) end @testset "Categorical" begin - @test kldivergence(Categorical([0.0, 0.1, 0.9]), Categorical([0.1, 0.1, 0.8])) ≥ 0 + @test kldivergence(Categorical([0.0, 0.1, 0.9]), Categorical([0.1, 0.1, 0.8])) ≥ + 0 @test kldivergence(Categorical([0.0, 0.1, 0.9]), Categorical([0.1, 0.1, 0.8])) ≈ - kldivergence([0.0, 0.1, 0.9], [0.1, 0.1, 0.8]) + kldivergence([0.0, 0.1, 0.9], [0.1, 0.1, 0.8]) end @testset "Chi" begin p = Chi(4.0) q = Chi(3.0) test_kl(p, q) - @test kldivergence(p, q) ≈ kldivergence(Gamma(2., 0.5), Gamma(1.5, 0.5)) + @test kldivergence(p, q) ≈ kldivergence(Gamma(2.0, 0.5), Gamma(1.5, 0.5)) end @testset "Chisq" begin p = Chisq(4.0) q = Chisq(3.0) test_kl(p, q) @test kldivergence(p, q) ≈ kldivergence(Chi(4.0), Chi(3.0)) - @test kldivergence(p, q) ≈ kldivergence(Gamma(2., 0.5), Gamma(1.5, 0.5)) + @test kldivergence(p, q) ≈ kldivergence(Gamma(2.0, 0.5), Gamma(1.5, 0.5)) end @testset "Exponential" begin p = Exponential(2.0) @@ -99,7 +112,7 @@ end p = Geometric(0.3) q = Geometric(0.4) test_kl(p, q) - + x1 = nextfloat(0.0) x2 = prevfloat(1.0) p1 = Geometric(x1) @@ -110,8 +123,8 @@ end @test kldivergence(p2, p1) ≈ -log(x1) @test isinf(kldivergence(p1, Geometric(0.5))) @test kldivergence(p2, Geometric(0.5)) ≈ -log(0.5) - @test kldivergence(Geometric(0.5), p2) ≈ 2*log(0.5) - log(1-x2) - @test kldivergence(Geometric(0.5), p1) ≈ 2*log(0.5) - log(x1) + @test kldivergence(Geometric(0.5), p2) ≈ 2 * log(0.5) - log(1 - x2) + @test kldivergence(Geometric(0.5), p1) ≈ 2 * log(0.5) - log(x1) end @testset "InverseGamma" begin p = InverseGamma(2.0, 1.0) @@ -150,7 +163,8 @@ end p = NormalCanon(1, 2) q = NormalCanon(3, 4) test_kl(p, q) - @test kldivergence(p, q) ≈ kldivergence(Normal(1/2, 1/sqrt(2)), Normal(3/4, 1/2)) + @test kldivergence(p, q) ≈ + kldivergence(Normal(1 / 2, 1 / sqrt(2)), Normal(3 / 4, 1 / 2)) end @testset "Poisson" begin p = Poisson(4.0) diff --git a/test/gradlogpdf.jl b/test/gradlogpdf.jl index f4216a67e6..2192855d25 100644 --- a/test/gradlogpdf.jl +++ b/test/gradlogpdf.jl @@ -4,24 +4,30 @@ using Test # Test for gradlogpdf on univariate distributions -@test isapprox(gradlogpdf(Beta(1.5, 3.0), 0.7) , -5.9523809523809526 , atol=1.0e-8) -@test isapprox(gradlogpdf(Chi(5.0), 5.5) , -4.7727272727272725 , atol=1.0e-8) -@test isapprox(gradlogpdf(Chisq(7.0), 12.0) , -0.29166666666666663, atol=1.0e-8) -@test isapprox(gradlogpdf(Exponential(2.0), 7.0) , -0.5 , atol=1.0e-8) -@test isapprox(gradlogpdf(Gamma(9.0, 0.5), 11.0) , -1.2727272727272727 , atol=1.0e-8) -@test isapprox(gradlogpdf(Gumbel(3.5, 1.0), 4.0) , -0.3934693402873666 , atol=1.0e-8) -@test isapprox(gradlogpdf(Laplace(7.0), 34.0) , -1.0 , atol=1.0e-8) -@test isapprox(gradlogpdf(Logistic(-6.0), 1.0) , -0.9981778976111987 , atol=1.0e-8) -@test isapprox(gradlogpdf(LogNormal(5.5), 2.0) , 1.9034264097200273 , atol=1.0e-8) -@test isapprox(gradlogpdf(Normal(-4.5, 2.0), 1.6) , -1.525 , atol=1.0e-8) -@test isapprox(gradlogpdf(TDist(8.0), 9.1) , -0.9018830525272548 , atol=1.0e-8) -@test isapprox(gradlogpdf(Weibull(2.0), 3.5) , -6.714285714285714 , atol=1.0e-8) -@test isapprox(gradlogpdf(Uniform(-1.0, 1.0), 0.3), 0.0 , atol=1.0e-8) +@test isapprox(gradlogpdf(Beta(1.5, 3.0), 0.7), -5.9523809523809526, atol = 1.0e-8) +@test isapprox(gradlogpdf(Chi(5.0), 5.5), -4.7727272727272725, atol = 1.0e-8) +@test isapprox(gradlogpdf(Chisq(7.0), 12.0), -0.29166666666666663, atol = 1.0e-8) +@test isapprox(gradlogpdf(Exponential(2.0), 7.0), -0.5, atol = 1.0e-8) +@test isapprox(gradlogpdf(Gamma(9.0, 0.5), 11.0), -1.2727272727272727, atol = 1.0e-8) +@test isapprox(gradlogpdf(Gumbel(3.5, 1.0), 4.0), -0.3934693402873666, atol = 1.0e-8) +@test isapprox(gradlogpdf(Laplace(7.0), 34.0), -1.0, atol = 1.0e-8) +@test isapprox(gradlogpdf(Logistic(-6.0), 1.0), -0.9981778976111987, atol = 1.0e-8) +@test isapprox(gradlogpdf(LogNormal(5.5), 2.0), 1.9034264097200273, atol = 1.0e-8) +@test isapprox(gradlogpdf(Normal(-4.5, 2.0), 1.6), -1.525, atol = 1.0e-8) +@test isapprox(gradlogpdf(TDist(8.0), 9.1), -0.9018830525272548, atol = 1.0e-8) +@test isapprox(gradlogpdf(Weibull(2.0), 3.5), -6.714285714285714, atol = 1.0e-8) +@test isapprox(gradlogpdf(Uniform(-1.0, 1.0), 0.3), 0.0, atol = 1.0e-8) # Test for gradlogpdf on multivariate distributions -@test isapprox(gradlogpdf(MvNormal([1., 2.], [1. 0.1; 0.1 1.]), [0.7, 0.9]) , - [0.191919191919192, 1.080808080808081] ,atol=1.0e-8) -@test isapprox(gradlogpdf(MvTDist(5., [1., 2.], [1. 0.1; 0.1 1.]), [0.7, 0.9]), - [0.2150711513583442, 1.2111901681759383] ,atol=1.0e-8) +@test isapprox( + gradlogpdf(MvNormal([1.0, 2.0], [1.0 0.1; 0.1 1.0]), [0.7, 0.9]), + [0.191919191919192, 1.080808080808081], + atol = 1.0e-8, +) +@test isapprox( + gradlogpdf(MvTDist(5.0, [1.0, 2.0], [1.0 0.1; 0.1 1.0]), [0.7, 0.9]), + [0.2150711513583442, 1.2111901681759383], + atol = 1.0e-8, +) diff --git a/test/matrixvariates.jl b/test/matrixvariates.jl index 9235cdaf5d..aadde2fef5 100644 --- a/test/matrixvariates.jl +++ b/test/matrixvariates.jl @@ -8,539 +8,591 @@ import JSON import Distributions: _univariate, _multivariate, _rand_params @testset "matrixvariates" begin -#= - 1. baseline tests - 2. compare 1 x 1 matrix-variate with univariate - 3. compare row/column matrix-variate with multivariate - 4. compare density evaluation against archived Stan output - 5. special, distribution-specific tests - 6. main testing method - 7. run matrix-variate unit tests -=# - -# ============================================================================= -# 1. baseline test -# ============================================================================= - -# -------------------------------------------------- -# Check that a random draw from d has the right properties -# -------------------------------------------------- - -function test_draw(d::MatrixDistribution, X::AbstractMatrix) - @test size(d) == size(X) - @test size(d, 1) == size(X, 1) - @test size(d, 2) == size(X, 2) - @test length(d) == length(X) - @test rank(d) == rank(X) - @test insupport(d, X) - @test logpdf(d, X) ≈ log(pdf(d, X)) - @test logpdf(d, [X, X]) ≈ log.(pdf(d, [X, X])) - @test loglikelihood(d, X) ≈ logpdf(d, X) - @test loglikelihood(d, [X, X]) ≈ 2 * logpdf(d, X) - if d isa MatrixFDist - # Broken since `pdadd` is not defined for SubArray - @test_broken loglikelihood(d, cat(X, X; dims=3)) ≈ 2 * logpdf(d, X) - else - @test loglikelihood(d, cat(X, X; dims=3)) ≈ 2 * logpdf(d, X) + #= + 1. baseline tests + 2. compare 1 x 1 matrix-variate with univariate + 3. compare row/column matrix-variate with multivariate + 4. compare density evaluation against archived Stan output + 5. special, distribution-specific tests + 6. main testing method + 7. run matrix-variate unit tests + =# + + # ============================================================================= + # 1. baseline test + # ============================================================================= + + # -------------------------------------------------- + # Check that a random draw from d has the right properties + # -------------------------------------------------- + + function test_draw(d::MatrixDistribution, X::AbstractMatrix) + @test size(d) == size(X) + @test size(d, 1) == size(X, 1) + @test size(d, 2) == size(X, 2) + @test length(d) == length(X) + @test rank(d) == rank(X) + @test insupport(d, X) + @test logpdf(d, X) ≈ log(pdf(d, X)) + @test logpdf(d, [X, X]) ≈ log.(pdf(d, [X, X])) + @test loglikelihood(d, X) ≈ logpdf(d, X) + @test loglikelihood(d, [X, X]) ≈ 2 * logpdf(d, X) + if d isa MatrixFDist + # Broken since `pdadd` is not defined for SubArray + @test_broken loglikelihood(d, cat(X, X; dims = 3)) ≈ 2 * logpdf(d, X) + else + @test loglikelihood(d, cat(X, X; dims = 3)) ≈ 2 * logpdf(d, X) + end + nothing end - nothing -end - -test_draw(d::MatrixDistribution) = test_draw(d, rand(d)) -# -------------------------------------------------- -# Check that sample quantities are close to population quantities -# -------------------------------------------------- + test_draw(d::MatrixDistribution) = test_draw(d, rand(d)) -function test_draws(d::MatrixDistribution, draws::AbstractArray{<:AbstractMatrix}) - @test mean(draws) ≈ mean(d) rtol = 0.01 - draws_matrix = mapreduce(vec, hcat, draws) - @test cov(draws_matrix; dims=2) ≈ cov(d) rtol = 0.1 - nothing -end - -function test_draws(d::LKJ, draws::AbstractArray{<:AbstractMatrix}) - @test isapprox(mean(draws), mean(d), atol = 0.1) - @test isapprox(var(draws), var(d) , atol = 0.1) - nothing -end + # -------------------------------------------------- + # Check that sample quantities are close to population quantities + # -------------------------------------------------- -function test_draws(d::MatrixDistribution, M::Integer) - rng = MersenneTwister(123) - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) - test_draws(d, func[2](d, M)) + function test_draws(d::MatrixDistribution, draws::AbstractArray{<:AbstractMatrix}) + @test mean(draws) ≈ mean(d) rtol = 0.01 + draws_matrix = mapreduce(vec, hcat, draws) + @test cov(draws_matrix; dims = 2) ≈ cov(d) rtol = 0.1 + nothing end - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand!(..., false)" => [(dist, X) -> rand!(dist, X, false), rand!], - "rand!(rng, ..., false)" => [(dist, X) -> rand!(rng, dist, X, false), (dist, X) -> rand!(rng, dist, X)]) - m = [Matrix{partype(d)}(undef, size(d)) for _ in Base.OneTo(M)] - x = func[1](d, m) - @test x ≡ m - @test isapprox(mean(x) , mean(d) , atol = 0.1) - m3 = Array{partype(d), 3}(undef, size(d)..., M) - x = func[2](d, m3) - @test x ≡ m3 - @test isapprox(mean(x) , mean(mean(d)) , atol = 0.1) + + function test_draws(d::LKJ, draws::AbstractArray{<:AbstractMatrix}) + @test isapprox(mean(draws), mean(d), atol = 0.1) + @test isapprox(var(draws), var(d), atol = 0.1) + nothing end - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand!(..., true)" => (dist, X) -> rand!(dist, X, true), - "rand!(rng, true)" => (dist, X) -> rand!(rng, dist, X, true)) - m = Vector{Matrix{partype(d)}}(undef, M) - x = func(d, m) - @test x ≡ m - @test isapprox(mean(x), mean(d) , atol = 0.1) + + function test_draws(d::MatrixDistribution, M::Integer) + rng = MersenneTwister(123) + @testset "Testing matrix-variates with $key" for (key, func) in Dict( + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) + test_draws(d, func[2](d, M)) + end + @testset "Testing matrix-variates with $key" for (key, func) in Dict( + "rand!(..., false)" => [(dist, X) -> rand!(dist, X, false), rand!], + "rand!(rng, ..., false)" => + [(dist, X) -> rand!(rng, dist, X, false), (dist, X) -> rand!(rng, dist, X)], + ) + m = [Matrix{partype(d)}(undef, size(d)) for _ in Base.OneTo(M)] + x = func[1](d, m) + @test x ≡ m + @test isapprox(mean(x), mean(d), atol = 0.1) + m3 = Array{partype(d),3}(undef, size(d)..., M) + x = func[2](d, m3) + @test x ≡ m3 + @test isapprox(mean(x), mean(mean(d)), atol = 0.1) + end + @testset "Testing matrix-variates with $key" for (key, func) in Dict( + "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), + "rand!(rng, true)" => (dist, X) -> rand!(rng, dist, X, true), + ) + m = Vector{Matrix{partype(d)}}(undef, M) + x = func(d, m) + @test x ≡ m + @test isapprox(mean(x), mean(d), atol = 0.1) + end + repeats = 10 + m = Vector{Matrix{partype(d)}}(undef, repeats) + rand!(d, m) + @test isassigned(m, 1) + m1 = m[1] + rand!(d, m) + @test m1 ≡ m[1] + rand!(d, m, true) + @test m1 ≢ m[1] + m1 = m[1] + rand!(d, m, false) + @test m1 ≡ m[1] + nothing end - repeats = 10 - m = Vector{Matrix{partype(d)}}(undef, repeats) - rand!(d, m) - @test isassigned(m, 1) - m1=m[1] - rand!(d, m) - @test m1 ≡ m[1] - rand!(d, m, true) - @test m1 ≢ m[1] - m1 = m[1] - rand!(d, m, false) - @test m1 ≡ m[1] - nothing -end -# -------------------------------------------------- -# Check that the convert and partype methods work -# -------------------------------------------------- - -function test_convert(d::MatrixDistribution) - distname = getproperty(parentmodule(typeof(d)), nameof(typeof(d))) - @test distname(params(d)...) == d - @test d == deepcopy(d) - for elty in (Float32, Float64, BigFloat) - del1 = convert(distname{elty}, d) - del2 = convert(distname{elty}, (Base.Fix1(getfield, d)).(fieldnames(typeof(d)))...) - @test del1 isa distname{elty} - @test del2 isa distname{elty} - @test partype(del1) == elty - @test partype(del2) == elty - if elty === partype(d) - @test del1 === d + # -------------------------------------------------- + # Check that the convert and partype methods work + # -------------------------------------------------- + + function test_convert(d::MatrixDistribution) + distname = getproperty(parentmodule(typeof(d)), nameof(typeof(d))) + @test distname(params(d)...) == d + @test d == deepcopy(d) + for elty in (Float32, Float64, BigFloat) + del1 = convert(distname{elty}, d) + del2 = + convert(distname{elty}, (Base.Fix1(getfield, d)).(fieldnames(typeof(d)))...) + @test del1 isa distname{elty} + @test del2 isa distname{elty} + @test partype(del1) == elty + @test partype(del2) == elty + if elty === partype(d) + @test del1 === d + end end + nothing end - nothing -end -# -------------------------------------------------- -# Check that cov and var agree -# -------------------------------------------------- + # -------------------------------------------------- + # Check that cov and var agree + # -------------------------------------------------- -function test_cov(d::MatrixDistribution) - @test vec(var(d)) ≈ diag(cov(d)) - @test reshape(cov(d), size(d)..., size(d)...) ≈ cov(d, Val(false)) -end + function test_cov(d::MatrixDistribution) + @test vec(var(d)) ≈ diag(cov(d)) + @test reshape(cov(d), size(d)..., size(d)...) ≈ cov(d, Val(false)) + end -test_cov(d::LKJ) = nothing + test_cov(d::LKJ) = nothing -# -------------------------------------------------- -# Check dim -# -------------------------------------------------- + # -------------------------------------------------- + # Check dim + # -------------------------------------------------- -function test_dim(d::MatrixDistribution) - n = @test_deprecated(dim(d)) - @test n == size(d, 1) - @test n == size(d, 2) - @test n == size(mean(d), 1) - @test n == size(mean(d), 2) -end + function test_dim(d::MatrixDistribution) + n = @test_deprecated(dim(d)) + @test n == size(d, 1) + @test n == size(d, 2) + @test n == size(mean(d), 1) + @test n == size(mean(d), 2) + end -test_dim(d::Union{MatrixNormal, MatrixTDist}) = nothing + test_dim(d::Union{MatrixNormal,MatrixTDist}) = nothing -# -------------------------------------------------- -# RUN EVERYTHING -# -------------------------------------------------- + # -------------------------------------------------- + # RUN EVERYTHING + # -------------------------------------------------- -function test_distr(d::MatrixDistribution, M::Integer) - test_draw(d) - test_draws(d, M) - test_cov(d) - test_convert(d) - test_dim(d) - nothing -end + function test_distr(d::MatrixDistribution, M::Integer) + test_draw(d) + test_draws(d, M) + test_cov(d) + test_convert(d) + test_dim(d) + nothing + end -function test_distr(dist::Type{<:MatrixDistribution}, - n::Integer, - p::Integer, - M::Integer) - d = dist(_rand_params(dist, Float64, n, p)...) - test_distr(d, M) - nothing -end + function test_distr( + dist::Type{<:MatrixDistribution}, + n::Integer, + p::Integer, + M::Integer, + ) + d = dist(_rand_params(dist, Float64, n, p)...) + test_distr(d, M) + nothing + end -# ============================================================================= -# 2. test matrix-variate against the univariate it collapses to in the 1 x 1 case -# ============================================================================= - -function test_against_univariate(D::MatrixDistribution, d::UnivariateDistribution) - X = rand(D) - x = X[1] - @test logpdf(D, X) ≈ logpdf(d, x) - @test pdf(D, X) ≈ pdf(d, x) - @test mean(D)[1] ≈ mean(d) - @test var(D)[1] ≈ var(d) - nothing -end + # ============================================================================= + # 2. test matrix-variate against the univariate it collapses to in the 1 x 1 case + # ============================================================================= + + function test_against_univariate(D::MatrixDistribution, d::UnivariateDistribution) + X = rand(D) + x = X[1] + @test logpdf(D, X) ≈ logpdf(d, x) + @test pdf(D, X) ≈ pdf(d, x) + @test mean(D)[1] ≈ mean(d) + @test var(D)[1] ≈ var(d) + nothing + end -function test_draws_against_univariate_cdf(D::MatrixDistribution, d::UnivariateDistribution) - α = 0.025 - M = 100000 - matvardraws = [rand(D)[1] for m in 1:M] - @test pvalue_kolmogorovsmirnoff(matvardraws, d) >= α - nothing -end + function test_draws_against_univariate_cdf( + D::MatrixDistribution, + d::UnivariateDistribution, + ) + α = 0.025 + M = 100000 + matvardraws = [rand(D)[1] for m = 1:M] + @test pvalue_kolmogorovsmirnoff(matvardraws, d) >= α + nothing + end -test_draws_against_univariate_cdf(D::LKJ, d::UnivariateDistribution) = nothing + test_draws_against_univariate_cdf(D::LKJ, d::UnivariateDistribution) = nothing -function test_against_univariate(dist::Type{<:MatrixDistribution}) - D = dist(_rand_params(dist, Float64, 1, 1)...) - d = _univariate(D) - test_against_univariate(D, d) - test_draws_against_univariate_cdf(D, d) - nothing -end + function test_against_univariate(dist::Type{<:MatrixDistribution}) + D = dist(_rand_params(dist, Float64, 1, 1)...) + d = _univariate(D) + test_against_univariate(D, d) + test_draws_against_univariate_cdf(D, d) + nothing + end -# ============================================================================= -# 3. test matrix-variate against the multivariate it collapses to in the row/column case -# ============================================================================= - -function test_against_multivariate(D::MatrixDistribution, d::MultivariateDistribution) - X = rand(D) - x = vec(X) - @test logpdf(D, X) ≈ logpdf(d, x) - @test pdf(D, X) ≈ pdf(d, x) - @test vec(mean(D)) ≈ mean(d) - @test cov(D) ≈ cov(d) - nothing -end + # ============================================================================= + # 3. test matrix-variate against the multivariate it collapses to in the row/column case + # ============================================================================= + + function test_against_multivariate(D::MatrixDistribution, d::MultivariateDistribution) + X = rand(D) + x = vec(X) + @test logpdf(D, X) ≈ logpdf(d, x) + @test pdf(D, X) ≈ pdf(d, x) + @test vec(mean(D)) ≈ mean(d) + @test cov(D) ≈ cov(d) + nothing + end -function test_against_multivariate(dist::Union{Type{MatrixNormal}, Type{MatrixTDist}}, n::Integer, p::Integer) - D = dist(_rand_params(dist, Float64, n, 1)...) - d = _multivariate(D) - test_against_multivariate(D, d) - D = dist(_rand_params(dist, Float64, 1, p)...) - d = _multivariate(D) - test_against_multivariate(D, d) - nothing -end + function test_against_multivariate( + dist::Union{Type{MatrixNormal},Type{MatrixTDist}}, + n::Integer, + p::Integer, + ) + D = dist(_rand_params(dist, Float64, n, 1)...) + d = _multivariate(D) + test_against_multivariate(D, d) + D = dist(_rand_params(dist, Float64, 1, p)...) + d = _multivariate(D) + test_against_multivariate(D, d) + nothing + end -test_against_multivariate(dist::Type{<:MatrixDistribution}, n::Integer, p::Integer) = nothing + test_against_multivariate(dist::Type{<:MatrixDistribution}, n::Integer, p::Integer) = + nothing -# ============================================================================= -# 4. test density evaluation against archived output from Stan -# ============================================================================= + # ============================================================================= + # 4. test density evaluation against archived output from Stan + # ============================================================================= -function jsonparams2dist(dist::Type{Wishart}, dict) - ν = dict["params"][1][1] - S = zeros(Float64, dict["dims"]...) - S[:] = Vector{Float64}( dict["params"][2] ) - return Wishart(ν, S) -end + function jsonparams2dist(dist::Type{Wishart}, dict) + ν = dict["params"][1][1] + S = zeros(Float64, dict["dims"]...) + S[:] = Vector{Float64}(dict["params"][2]) + return Wishart(ν, S) + end -function jsonparams2dist(dist::Type{InverseWishart}, dict) - ν = dict["params"][1][1] - S = zeros(Float64, dict["dims"]...) - S[:] = Vector{Float64}( dict["params"][2] ) - return InverseWishart(ν, S) -end + function jsonparams2dist(dist::Type{InverseWishart}, dict) + ν = dict["params"][1][1] + S = zeros(Float64, dict["dims"]...) + S[:] = Vector{Float64}(dict["params"][2]) + return InverseWishart(ν, S) + end -function jsonparams2dist(dist::Type{LKJ}, dict) - d = dict["params"][1][1] - η = dict["params"][2][1] - return LKJ(d, η) -end + function jsonparams2dist(dist::Type{LKJ}, dict) + d = dict["params"][1][1] + η = dict["params"][2][1] + return LKJ(d, η) + end -function unpack_matvar_json_dict(dist::Type{<:MatrixDistribution}, dict) - d = jsonparams2dist(dist, dict) - X = zeros(Float64, dict["dims"]...) - X[:] = Vector{Float64}(dict["X"]) - lpdf = dict["lpdf"][1] - return d, X, lpdf -end + function unpack_matvar_json_dict(dist::Type{<:MatrixDistribution}, dict) + d = jsonparams2dist(dist, dict) + X = zeros(Float64, dict["dims"]...) + X[:] = Vector{Float64}(dict["X"]) + lpdf = dict["lpdf"][1] + return d, X, lpdf + end -function test_against_stan(dist::Type{<:MatrixDistribution}) - filename = joinpath(@__DIR__, "ref", "matrixvariates", "jsonfiles", "$(dist)_stan_output.json") - stan_output = JSON.parsefile(filename) - K = length(stan_output) - for k in 1:K - d, X, lpdf = unpack_matvar_json_dict(dist, stan_output[k]) - @test isapprox(logpdf(d, X), lpdf, atol = 1e-10) - @test isapprox(logpdf(d, [X, X]), [lpdf, lpdf], atol=1e-8) - @test isapprox(pdf(d, X), exp(lpdf), atol = 1e-6) - @test isapprox(pdf(d, [X, X]), exp.([lpdf, lpdf]), atol=1e-6) + function test_against_stan(dist::Type{<:MatrixDistribution}) + filename = joinpath( + @__DIR__, + "ref", + "matrixvariates", + "jsonfiles", + "$(dist)_stan_output.json", + ) + stan_output = JSON.parsefile(filename) + K = length(stan_output) + for k = 1:K + d, X, lpdf = unpack_matvar_json_dict(dist, stan_output[k]) + @test isapprox(logpdf(d, X), lpdf, atol = 1e-10) + @test isapprox(logpdf(d, [X, X]), [lpdf, lpdf], atol = 1e-8) + @test isapprox(pdf(d, X), exp(lpdf), atol = 1e-6) + @test isapprox(pdf(d, [X, X]), exp.([lpdf, lpdf]), atol = 1e-6) + end + nothing end - nothing -end -function test_against_stan(dist::Union{Type{MatrixNormal}, Type{MatrixTDist}, Type{MatrixBeta}, Type{MatrixFDist}}) - nothing -end + function test_against_stan( + dist::Union{ + Type{MatrixNormal}, + Type{MatrixTDist}, + Type{MatrixBeta}, + Type{MatrixFDist}, + }, + ) + nothing + end -# ============================================================================= -# 5. special, distribution-specific tests -# ============================================================================= + # ============================================================================= + # 5. special, distribution-specific tests + # ============================================================================= -test_special(dist::Type{<:MatrixDistribution}) = nothing + test_special(dist::Type{<:MatrixDistribution}) = nothing -function test_special(dist::Type{MatrixNormal}) - D = MatrixNormal(_rand_params(MatrixNormal, Float64, 2, 2)...) - @testset "X ~ MN(M, U, V) ⟺ vec(X) ~ N(vec(M), V ⊗ U)" begin - d = vec(D) - test_against_multivariate(D, d) - end - @testset "MatrixNormal mode" begin - @test mode(D) == D.M - end - @testset "PDMat mixing and matching" begin - n = 3 - p = 4 - M = randn(n, p) - u = rand() - U_scale = ScalMat(n, u) - U_dense = Matrix(U_scale) - U_pd = PDMat(U_dense) - U_pdiag = PDiagMat(u*ones(n)) - v = rand(p) - V_pdiag = PDiagMat(v) - V_dense = Matrix(V_pdiag) - V_pd = PDMat(V_dense) - UV = kron(V_dense, U_dense) - baseeval = logpdf(MatrixNormal(M, U_dense, V_dense), M) - for U in [U_scale, U_dense, U_pd, U_pdiag] - for V in [V_pdiag, V_dense, V_pd] - d = MatrixNormal(M, U, V) - @test cov(d) ≈ UV - @test logpdf(d, M) ≈ baseeval + function test_special(dist::Type{MatrixNormal}) + D = MatrixNormal(_rand_params(MatrixNormal, Float64, 2, 2)...) + @testset "X ~ MN(M, U, V) ⟺ vec(X) ~ N(vec(M), V ⊗ U)" begin + d = vec(D) + test_against_multivariate(D, d) + end + @testset "MatrixNormal mode" begin + @test mode(D) == D.M + end + @testset "PDMat mixing and matching" begin + n = 3 + p = 4 + M = randn(n, p) + u = rand() + U_scale = ScalMat(n, u) + U_dense = Matrix(U_scale) + U_pd = PDMat(U_dense) + U_pdiag = PDiagMat(u * ones(n)) + v = rand(p) + V_pdiag = PDiagMat(v) + V_dense = Matrix(V_pdiag) + V_pd = PDMat(V_dense) + UV = kron(V_dense, U_dense) + baseeval = logpdf(MatrixNormal(M, U_dense, V_dense), M) + for U in [U_scale, U_dense, U_pd, U_pdiag] + for V in [V_pdiag, V_dense, V_pd] + d = MatrixNormal(M, U, V) + @test cov(d) ≈ UV + @test logpdf(d, M) ≈ baseeval + end end end + nothing end - nothing -end -function test_special(dist::Type{Wishart}) - n = 3 - M = 5000 - α = 0.05 - ν, Σ = _rand_params(Wishart, Float64, n, n) - d = Wishart(ν, Σ) - H = rand(d, M) - @testset "deprecations" begin - for warn in (true, false) - @test @test_deprecated(Wishart(n - 1, Σ, warn)) == Wishart(n - 1, Σ) + function test_special(dist::Type{Wishart}) + n = 3 + M = 5000 + α = 0.05 + ν, Σ = _rand_params(Wishart, Float64, n, n) + d = Wishart(ν, Σ) + H = rand(d, M) + @testset "deprecations" begin + for warn in (true, false) + @test @test_deprecated(Wishart(n - 1, Σ, warn)) == Wishart(n - 1, Σ) + end end - end - @testset "meanlogdet" begin - @test isapprox(Distributions.meanlogdet(d), mean(logdet.(H)), atol = 0.1) - end - @testset "H ~ W(ν, Σ), a ~ p(a) independent ⟹ a'Ha / a'Σa ~ χ²(ν)" begin - q = MvTDist(10, randn(n), rand(d)) - ρ = Chisq(ν) - A = rand(q, M) - z = [A[:, m]'*H[m]*A[:, m] / (A[:, m]'*Σ*A[:, m]) for m in 1:M] - @test pvalue_kolmogorovsmirnoff(z, ρ) >= α - end - @testset "H ~ W(ν, I) ⟹ H[i, i] ~ χ²(ν)" begin - κ = n + 1 - ρ = Chisq(κ) - g = Wishart(κ, ScalMat(n, 1)) - mymats = zeros(n, n, M) - for m in 1:M - mymats[:, :, m] = rand(g) + @testset "meanlogdet" begin + @test isapprox(Distributions.meanlogdet(d), mean(logdet.(H)), atol = 0.1) end - for i in 1:n - @test pvalue_kolmogorovsmirnoff(mymats[i, i, :], ρ) >= α / n + @testset "H ~ W(ν, Σ), a ~ p(a) independent ⟹ a'Ha / a'Σa ~ χ²(ν)" begin + q = MvTDist(10, randn(n), rand(d)) + ρ = Chisq(ν) + A = rand(q, M) + z = [A[:, m]' * H[m] * A[:, m] / (A[:, m]' * Σ * A[:, m]) for m = 1:M] + @test pvalue_kolmogorovsmirnoff(z, ρ) >= α end + @testset "H ~ W(ν, I) ⟹ H[i, i] ~ χ²(ν)" begin + κ = n + 1 + ρ = Chisq(κ) + g = Wishart(κ, ScalMat(n, 1)) + mymats = zeros(n, n, M) + for m = 1:M + mymats[:, :, m] = rand(g) + end + for i = 1:n + @test pvalue_kolmogorovsmirnoff(mymats[i, i, :], ρ) >= α / n + end + end + @testset "Check Singular Branch" begin + # Check that no warnings are shown: #1410 + rank1 = @test_logs Wishart(n - 2, Σ) + rank2 = @test_logs Wishart(n - 1, Σ) + test_draw(rank1) + test_draw(rank2) + test_draws(rank1, rand(rank1, 10^6)) + test_draws(rank2, rand(rank2, 10^6)) + test_cov(rank1) + test_cov(rank2) + + X = H[1] + @test Distributions.singular_wishart_logkernel(d, X) ≈ + Distributions.nonsingular_wishart_logkernel(d, X) + @test Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) ≈ + Distributions.nonsingular_wishart_logc0(n, ν, d.S) + @test logpdf(d, X) ≈ + Distributions.singular_wishart_logkernel(d, X) + + Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) + end + nothing end - @testset "Check Singular Branch" begin - # Check that no warnings are shown: #1410 - rank1 = @test_logs Wishart(n - 2, Σ) - rank2 = @test_logs Wishart(n - 1, Σ) - test_draw(rank1) - test_draw(rank2) - test_draws(rank1, rand(rank1, 10^6)) - test_draws(rank2, rand(rank2, 10^6)) - test_cov(rank1) - test_cov(rank2) - - X = H[1] - @test Distributions.singular_wishart_logkernel(d, X) ≈ Distributions.nonsingular_wishart_logkernel(d, X) - @test Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) ≈ Distributions.nonsingular_wishart_logc0(n, ν, d.S) - @test logpdf(d, X) ≈ Distributions.singular_wishart_logkernel(d, X) + Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) - end - nothing -end -function test_special(dist::Type{InverseWishart}) - @testset "InverseWishart constructor" begin - # Tests https://github.com/JuliaStats/Distributions.jl/issues/1948 - @test typeof(InverseWishart(5, ScalMat(5, 1))) == InverseWishart{Float64, ScalMat{Float64}} - @test typeof(InverseWishart(5, PDiagMat(ones(Int, 5)))) == InverseWishart{Float64, PDiagMat{Float64, Vector{Float64}}} + function test_special(dist::Type{InverseWishart}) + @testset "InverseWishart constructor" begin + # Tests https://github.com/JuliaStats/Distributions.jl/issues/1948 + @test typeof(InverseWishart(5, ScalMat(5, 1))) == + InverseWishart{Float64,ScalMat{Float64}} + @test typeof(InverseWishart(5, PDiagMat(ones(Int, 5)))) == + InverseWishart{Float64,PDiagMat{Float64,Vector{Float64}}} + end end -end -function test_special(dist::Type{MatrixTDist}) - @testset "MT(v, M, vΣ, Ω) → MN(M, Σ, Ω) as v → ∞" begin - n, p = (6, 3) - M, Σ, Ω = _rand_params(MatrixNormal, Float64, n, p) - MT = MatrixTDist(1000, M, 1000Σ, Ω) - MN = MatrixNormal(M, Σ, Ω) - A = rand(MN) - @test isapprox(logpdf(MT, A), logpdf(MN, A), atol = 0.1) - end - @testset "PDMat mixing and matching" begin - n = 3 - p = 4 - ν = max(n, p) + 1 - M = randn(n, p) - u = rand() - U_scale = ScalMat(n, u) - U_dense = Matrix(U_scale) - U_pd = PDMat(U_dense) - U_pdiag = PDiagMat(u*ones(n)) - v = rand(p) - V_pdiag = PDiagMat(v) - V_dense = Matrix(V_pdiag) - V_pd = PDMat(V_dense) - UV = kron(V_dense, U_dense) ./ (ν - 2) - baseeval = logpdf(MatrixTDist(ν, M, U_dense, V_dense), M) - for U in [U_scale, U_dense, U_pd, U_pdiag] - for V in [V_pdiag, V_dense, V_pd] - d = MatrixTDist(ν, M, U, V) - @test cov(d) ≈ UV - @test logpdf(d, M) ≈ baseeval + function test_special(dist::Type{MatrixTDist}) + @testset "MT(v, M, vΣ, Ω) → MN(M, Σ, Ω) as v → ∞" begin + n, p = (6, 3) + M, Σ, Ω = _rand_params(MatrixNormal, Float64, n, p) + MT = MatrixTDist(1000, M, 1000Σ, Ω) + MN = MatrixNormal(M, Σ, Ω) + A = rand(MN) + @test isapprox(logpdf(MT, A), logpdf(MN, A), atol = 0.1) + end + @testset "PDMat mixing and matching" begin + n = 3 + p = 4 + ν = max(n, p) + 1 + M = randn(n, p) + u = rand() + U_scale = ScalMat(n, u) + U_dense = Matrix(U_scale) + U_pd = PDMat(U_dense) + U_pdiag = PDiagMat(u * ones(n)) + v = rand(p) + V_pdiag = PDiagMat(v) + V_dense = Matrix(V_pdiag) + V_pd = PDMat(V_dense) + UV = kron(V_dense, U_dense) ./ (ν - 2) + baseeval = logpdf(MatrixTDist(ν, M, U_dense, V_dense), M) + for U in [U_scale, U_dense, U_pd, U_pdiag] + for V in [V_pdiag, V_dense, V_pd] + d = MatrixTDist(ν, M, U, V) + @test cov(d) ≈ UV + @test logpdf(d, M) ≈ baseeval + end end end + nothing end - nothing -end -function test_special(dist::Type{LKJ}) - @testset "LKJ mode" begin - @test mode(LKJ(5, 1.5)) == mean(LKJ(5, 1.5)) - @test_throws DomainError mode( LKJ(5, 0.5) ) - end - @testset "LKJ marginals" begin - d = 4 - η = abs(3randn()) - G = LKJ(d, η) - M = 10000 - α = 0.05 - L = sum(1:(d - 1)) - ρ = Distributions._marginal(G) - mymats = zeros(d, d, M) - for m in 1:M - mymats[:, :, m] = rand(G) + function test_special(dist::Type{LKJ}) + @testset "LKJ mode" begin + @test mode(LKJ(5, 1.5)) == mean(LKJ(5, 1.5)) + @test_throws DomainError mode(LKJ(5, 0.5)) end - for i in 1:d - for j in 1:i-1 - @test pvalue_kolmogorovsmirnoff(mymats[i, j, :], ρ) >= α / L + @testset "LKJ marginals" begin + d = 4 + η = abs(3randn()) + G = LKJ(d, η) + M = 10000 + α = 0.05 + L = sum(1:(d-1)) + ρ = Distributions._marginal(G) + mymats = zeros(d, d, M) + for m = 1:M + mymats[:, :, m] = rand(G) + end + for i = 1:d + for j = 1:i-1 + @test pvalue_kolmogorovsmirnoff(mymats[i, j, :], ρ) >= α / L + end end end + @testset "LKJ integrating constant" begin + # ============= + # odd non-uniform + # ============= + d = 5 + η = 2.3 + lkj = LKJ(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_loginvconst_alt(d, η) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) + # ============= + # odd uniform + # ============= + d = 5 + η = 1.0 + lkj = LKJ(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_vine_loginvconst_uniform(d) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.corr_logvolume(d) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) + # ============= + # even non-uniform + # ============= + d = 6 + η = 2.3 + lkj = LKJ(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_loginvconst_alt(d, η) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) + # ============= + # even uniform + # ============= + d = 6 + η = 1.0 + lkj = LKJ(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ + Distributions.lkj_vine_loginvconst_uniform(d) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ + Distributions.corr_logvolume(d) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) + end + @testset "check integrating constant as a volume" begin + # d = 2: Lebesgue measure of the set of correlation matrices is 2. + volume2D = 2 + @test volume2D ≈ exp(Distributions.lkj_onion_loginvconst(2, 1)) + @test 1 / volume2D ≈ exp(LKJ(2, 1).logc0) + # d = 3: Lebesgue measure of the set of correlation matrices is π²/2. + # See here: https://www.jstor.org/stable/2684832 + volume3D = 0.5π^2 + @test volume3D ≈ exp(Distributions.lkj_onion_loginvconst(3, 1)) + @test 1 / volume3D ≈ exp(LKJ(3, 1).logc0) + # d = 4: Lebesgue measure of the set of correlation matrices is (32/27)π². + # See here: https://doi.org/10.4169/amer.math.monthly.123.9.909 + volume4D = (32 / 27) * π^2 + @test volume4D ≈ exp(Distributions.lkj_onion_loginvconst(4, 1)) + @test 1 / volume4D ≈ exp(LKJ(4, 1).logc0) + end + nothing end - @testset "LKJ integrating constant" begin - # ============= - # odd non-uniform - # ============= - d = 5 - η = 2.3 - lkj = LKJ(d, η) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) - # ============= - # odd uniform - # ============= - d = 5 - η = 1.0 - lkj = LKJ(d, η) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_vine_loginvconst_uniform(d) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.corr_logvolume(d) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) - # ============= - # even non-uniform - # ============= - d = 6 - η = 2.3 - lkj = LKJ(d, η) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) - # ============= - # even uniform - # ============= - d = 6 - η = 1.0 - lkj = LKJ(d, η) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_vine_loginvconst_uniform(d) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.corr_logvolume(d) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) - end - @testset "check integrating constant as a volume" begin - # d = 2: Lebesgue measure of the set of correlation matrices is 2. - volume2D = 2 - @test volume2D ≈ exp( Distributions.lkj_onion_loginvconst(2, 1) ) - @test 1 / volume2D ≈ exp( LKJ(2, 1).logc0 ) - # d = 3: Lebesgue measure of the set of correlation matrices is π²/2. - # See here: https://www.jstor.org/stable/2684832 - volume3D = 0.5π^2 - @test volume3D ≈ exp( Distributions.lkj_onion_loginvconst(3, 1) ) - @test 1 / volume3D ≈ exp( LKJ(3, 1).logc0 ) - # d = 4: Lebesgue measure of the set of correlation matrices is (32/27)π². - # See here: https://doi.org/10.4169/amer.math.monthly.123.9.909 - volume4D = (32 / 27)*π^2 - @test volume4D ≈ exp( Distributions.lkj_onion_loginvconst(4, 1) ) - @test 1 / volume4D ≈ exp( LKJ(4, 1).logc0 ) - end - nothing -end -# ============================================================================= -# 6. main method -# ============================================================================= - -function test_matrixvariate(dist::Type{<:MatrixDistribution}, - n::Integer, - p::Integer, - M::Integer) - test_distr(dist, n, p, M) - test_against_univariate(dist) - test_against_multivariate(dist, n, p) - test_against_stan(dist) - test_special(dist) - nothing -end + # ============================================================================= + # 6. main method + # ============================================================================= + + function test_matrixvariate( + dist::Type{<:MatrixDistribution}, + n::Integer, + p::Integer, + M::Integer, + ) + test_distr(dist, n, p, M) + test_against_univariate(dist) + test_against_multivariate(dist, n, p) + test_against_stan(dist) + test_special(dist) + nothing + end -# ============================================================================= -# 7. run unit tests for matrix-variate distributions -# ============================================================================= - -matrixvariates = [(MatrixNormal, 2, 4, 10^5), - (Wishart, 2, 2, 10^6), - (InverseWishart, 2, 2, 10^6), - (MatrixTDist, 2, 4, 10^5), - (MatrixBeta, 3, 3, 10^5), - (MatrixFDist, 3, 3, 10^5), - (LKJ, 3, 3, 10^5)] - -for distribution in matrixvariates - dist, n, p, M = distribution - println(" testing $(dist)") - @testset "$(dist)" begin - test_matrixvariate(dist, n, p, M) + # ============================================================================= + # 7. run unit tests for matrix-variate distributions + # ============================================================================= + + matrixvariates = [ + (MatrixNormal, 2, 4, 10^5), + (Wishart, 2, 2, 10^6), + (InverseWishart, 2, 2, 10^6), + (MatrixTDist, 2, 4, 10^5), + (MatrixBeta, 3, 3, 10^5), + (MatrixFDist, 3, 3, 10^5), + (LKJ, 3, 3, 10^5), + ] + + for distribution in matrixvariates + dist, n, p, M = distribution + println(" testing $(dist)") + @testset "$(dist)" begin + test_matrixvariate(dist, n, p, M) + end end end -end diff --git a/test/mixture.jl b/test/mixture.jl index d2b2742ebc..1a64cbfedd 100644 --- a/test/mixture.jl +++ b/test/mixture.jl @@ -6,8 +6,12 @@ using ForwardDiff: Dual # Core testing procedure -function test_mixture(g::UnivariateMixture, n::Int, ns::Int, - rng::Union{AbstractRNG, Missing} = missing) +function test_mixture( + g::UnivariateMixture, + n::Int, + ns::Int, + rng::Union{AbstractRNG,Missing} = missing, +) if g isa UnivariateGMM T = eltype(g.means) else @@ -50,8 +54,8 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, c_k = component(g, k) for i = 1:n x_i = X[i] - P0[i,k] = pdf(c_k, x_i) - LP0[i,k] = logpdf(c_k, x_i) + P0[i, k] = pdf(c_k, x_i) + LP0[i, k] = logpdf(c_k, x_i) end end @@ -61,8 +65,8 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, for i = 1:n @test @inferred(pdf(g, X[i])) ≈ mix_p0[i] @test @inferred(logpdf(g, X[i])) ≈ mix_lp0[i] - @test @inferred(componentwise_pdf(g, X[i])) ≈ vec(P0[i,:]) - @test @inferred(componentwise_logpdf(g, X[i])) ≈ vec(LP0[i,:]) + @test @inferred(componentwise_pdf(g, X[i])) ≈ vec(P0[i, :]) + @test @inferred(componentwise_logpdf(g, X[i])) ≈ vec(LP0[i, :]) end @test @inferred(map(Base.Fix1(pdf, g), X)) ≈ mix_p0 @@ -75,7 +79,7 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, for α in αs @test cdf(g, @inferred(quantile(g, α))) ≈ α end - @test @inferred(median(g)) ≈ quantile(g, 1//2) + @test @inferred(median(g)) ≈ quantile(g, 1 // 2) # sampling # sampling does not work with `Float32` since `AliasTable` does not support `Float32` @@ -88,12 +92,16 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, end @test isa(Xs, Vector{T}) @test length(Xs) == ns - @test isapprox(mean(Xs), mean(g), atol=0.01) + @test isapprox(mean(Xs), mean(g), atol = 0.01) end end -function test_mixture(g::MultivariateMixture, n::Int, ns::Int, - rng::Union{AbstractRNG, Missing} = missing) +function test_mixture( + g::MultivariateMixture, + n::Int, + ns::Int, + rng::Union{AbstractRNG,Missing} = missing, +) X = zeros(length(g), n) for i = 1:n if ismissing(rng) @@ -120,9 +128,9 @@ function test_mixture(g::MultivariateMixture, n::Int, ns::Int, for k = 1:K c_k = component(g, k) for i = 1:n - x_i = X[:,i] - P0[i,k] = pdf(c_k, x_i) - LP0[i,k] = logpdf(c_k, x_i) + x_i = X[:, i] + P0[i, k] = pdf(c_k, x_i) + LP0[i, k] = logpdf(c_k, x_i) end end @@ -130,17 +138,17 @@ function test_mixture(g::MultivariateMixture, n::Int, ns::Int, mix_lp0 = log.(mix_p0) for i = 1:n - x_i = X[:,i] + x_i = X[:, i] @test @inferred(pdf(g, x_i)) ≈ mix_p0[i] @test @inferred(logpdf(g, x_i)) ≈ mix_lp0[i] - @test @inferred(componentwise_pdf(g, x_i)) ≈ vec(P0[i,:]) - @test @inferred(componentwise_logpdf(g, x_i)) ≈ vec(LP0[i,:]) + @test @inferred(componentwise_pdf(g, x_i)) ≈ vec(P0[i, :]) + @test @inferred(componentwise_logpdf(g, x_i)) ≈ vec(LP0[i, :]) end -#= - @show g - @show size(X) - @show size(mix_p0) -=# + #= + @show g + @show size(X) + @show size(mix_p0) + =# @test @inferred(pdf(g, X)) ≈ mix_p0 @test @inferred(logpdf(g, X)) ≈ mix_lp0 @test @inferred(componentwise_pdf(g, X)) ≈ P0 @@ -154,9 +162,9 @@ function test_mixture(g::MultivariateMixture, n::Int, ns::Int, end @test isa(Xs, Matrix{Float64}) @test size(Xs) == (length(g), ns) - @test isapprox(vec(mean(Xs, dims=2)), mean(g), atol=0.1) - @test isapprox(cov(Xs, dims=2) , cov(g) , atol=0.1) - @test isapprox(var(Xs, dims=2) , var(g) , atol=0.1) + @test isapprox(vec(mean(Xs, dims = 2)), mean(g), atol = 0.1) + @test isapprox(cov(Xs, dims = 2), cov(g), atol = 0.1) + @test isapprox(var(Xs, dims = 2), var(g), atol = 0.1) end function test_params(g::AbstractMixtureModel) @@ -177,13 +185,14 @@ end # Tests -@testset "Testing Mixtures with $key" for (key, rng) in - Dict("rand(...)" => missing, - "rand(rng, ...)" => MersenneTwister(123)) +@testset "Testing Mixtures with $key" for (key, rng) in Dict( + "rand(...)" => missing, + "rand(rng, ...)" => MersenneTwister(123), +) @testset "Testing UnivariateMixture" begin g_u = MixtureModel([Normal(), Normal()]) - @test isa(g_u, MixtureModel{Univariate, Continuous, <:Normal}) + @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) @test ncomponents(g_u) == 2 test_mixture(g_u, 1000, 10^6, rng) test_params(g_u) @@ -193,7 +202,11 @@ end @test @inferred(median(g_u)) === 0.0 @test @inferred(quantile(g_u, 0.5f0)) === 0.0 - g_u = MixtureModel(Normal{Float64}, [(0.0, 1.0), (2.0, 1.0), (-4.0, 1.5)], [0.2, 0.5, 0.3]) + g_u = MixtureModel( + Normal{Float64}, + [(0.0, 1.0), (2.0, 1.0), (-4.0, 1.5)], + [0.2, 0.5, 0.3], + ) @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) @test ncomponents(g_u) == 3 test_mixture(g_u, 1000, 10^6, rng) @@ -202,7 +215,8 @@ end @test maximum(g_u) == Inf @test extrema(g_u) == (-Inf, Inf) - g_u = MixtureModel(Normal{Float32}, [(0f0, 1f0), (0f0, 2f0)], [0.4f0, 0.6f0]) + g_u = + MixtureModel(Normal{Float32}, [(0.0f0, 1.0f0), (0.0f0, 2.0f0)], [0.4f0, 0.6f0]) @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) @test ncomponents(g_u) == 2 test_mixture(g_u, 1000, 10^6, rng) @@ -210,20 +224,29 @@ end @test minimum(g_u) == -Inf @test maximum(g_u) == Inf @test extrema(g_u) == (-Inf, Inf) - @test @inferred(median(g_u)) === 0f0 - @test @inferred(quantile(g_u, 0.5f0)) === 0f0 - - g_u = MixtureModel([TriangularDist(-1,2,0),TriangularDist(-.5,3,1),TriangularDist(-2,0,-1)]) + @test @inferred(median(g_u)) === 0.0f0 + @test @inferred(quantile(g_u, 0.5f0)) === 0.0f0 + + g_u = MixtureModel([ + TriangularDist(-1, 2, 0), + TriangularDist(-0.5, 3, 1), + TriangularDist(-2, 0, -1), + ]) @test minimum(g_u) ≈ -2.0 @test maximum(g_u) ≈ 3.0 @test extrema(g_u) == (minimum(g_u), maximum(g_u)) @test insupport(g_u, 2.5) == true @test insupport(g_u, 3.5) == false - μ = [0.0, 2.0, -4.0]; σ = [1.0, 1.2, 1.5]; p = [0.2, 0.5, 0.3] - for T = [Float64, Dual] - g_u = @inferred UnivariateGMM(map(Dual, μ), map(Dual, σ), - Categorical(map(Dual, p))) + μ = [0.0, 2.0, -4.0] + σ = [1.0, 1.2, 1.5] + p = [0.2, 0.5, 0.3] + for T in [Float64, Dual] + g_u = @inferred UnivariateGMM( + map(Dual, μ), + map(Dual, σ), + Categorical(map(Dual, p)), + ) @test isa(g_u, UnivariateGMM) @test ncomponents(g_u) == 3 test_mixture(g_u, 1000, 10^6, rng) @@ -237,12 +260,8 @@ end @test @inferred(logpdf(UnivariateGMM(μ, σ, Categorical(p)), 42)) isa Float64 @testset "Product 0 NaN in mixtures" begin - distributions = [ - Normal(-1.0, 0.3), - Normal(0.0, 0.5), - Normal(3.0, 1.0), - Normal(NaN, 1.0), - ] + distributions = + [Normal(-1.0, 0.3), Normal(0.0, 0.5), Normal(3.0, 1.0), Normal(NaN, 1.0)] priors = [0.25, 0.25, 0.5, 0.0] gmm_normal = MixtureModel(distributions, priors) for x in rand(10) @@ -255,11 +274,14 @@ end @testset "Testing MultivariatevariateMixture" begin for T in (Float32, Float64) g_m = MixtureModel( - IsoNormal[ MvNormal([0.0, 0.0], I), - MvNormal([0.2, 1.0], I), - MvNormal([-0.5, -3.0], 1.6 * I) ], - T[0.2, 0.5, 0.3]) - @test isa(g_m, MixtureModel{Multivariate, Continuous, IsoNormal}) + IsoNormal[ + MvNormal([0.0, 0.0], I), + MvNormal([0.2, 1.0], I), + MvNormal([-0.5, -3.0], 1.6 * I), + ], + T[0.2, 0.5, 0.3], + ) + @test isa(g_m, MixtureModel{Multivariate,Continuous,IsoNormal}) @test length(components(g_m)) == 3 @test length(g_m) == 2 @test insupport(g_m, [0.0, 0.0]) @@ -267,16 +289,16 @@ end test_params(g_m) end - u1 = Uniform() - u2 = Uniform(1.0, 2.0) - utot =Uniform(0.0, 2.0) + u1 = Uniform() + u2 = Uniform(1.0, 2.0) + utot = Uniform(0.0, 2.0) # mixture supposed to be a uniform on [0.0,2.0] - unif_mixt = MixtureModel([u1,u2]) - @test var(utot) ≈ var(unif_mixt) + unif_mixt = MixtureModel([u1, u2]) + @test var(utot) ≈ var(unif_mixt) @test mean(utot) ≈ mean(unif_mixt) - for x in -1.0:0.5:2.5 - @test cdf(utot,x) ≈ cdf(utot,x) + for x = -1.0:0.5:2.5 + @test cdf(utot, x) ≈ cdf(utot, x) end end @@ -284,7 +306,7 @@ end @testset "quantile of mixture with single component" begin for T in (Float32, Float64) d = MixtureModel([Normal{T}(T(1), T(0))]) - for p in (0.2, 0.2f0, 1//3) + for p in (0.2, 0.2f0, 1 // 3) @test @inferred(quantile(d, p)) == 1 @test @inferred(median(d)) == 1 end diff --git a/test/multivariate/dirichlet.jl b/test/multivariate/dirichlet.jl index 998dac8533..82ca5b22c8 100644 --- a/test/multivariate/dirichlet.jl +++ b/test/multivariate/dirichlet.jl @@ -1,6 +1,6 @@ # Tests for Dirichlet distribution -using Distributions +using Distributions using Test, Random, LinearAlgebra using ChainRulesCore using ChainRulesTestUtils @@ -10,9 +10,10 @@ Random.seed!(34567) rng = MersenneTwister(123) -@testset "Testing Dirichlet with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) +@testset "Testing Dirichlet with $key" for (key, func) in Dict( + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], +) for T in (Int, Float64) d = Dirichlet(3, T(2)) @@ -22,19 +23,19 @@ rng = MersenneTwister(123) @test d.alpha == [2, 2, 2] @test d.alpha0 == 6 - @test mean(d) ≈ fill(1/3, 3) - @test mode(d) ≈ fill(1/3, 3) - @test cov(d) ≈ [8 -4 -4; -4 8 -4; -4 -4 8] / (36 * 7) - @test var(d) ≈ diag(cov(d)) - @test std(d) ≈ sqrt.(var(d)) + @test mean(d) ≈ fill(1 / 3, 3) + @test mode(d) ≈ fill(1 / 3, 3) + @test cov(d) ≈ [8 -4 -4; -4 8 -4; -4 -4 8] / (36 * 7) + @test var(d) ≈ diag(cov(d)) + @test std(d) ≈ sqrt.(var(d)) r = Vector{Float64}(undef, 3) Distributions.dirichlet_mode!(r, d.alpha, d.alpha0) - @test r ≈ fill(1/3, 3) + @test r ≈ fill(1 / 3, 3) @test pdf(Dirichlet([1, 1]), [0, 1]) ≈ 1 - @test pdf(Dirichlet([1f0, 1f0]), [0f0, 1f0]) ≈ 1 - @test typeof(pdf(Dirichlet([1f0, 1f0]), [0f0, 1f0])) === Float32 + @test pdf(Dirichlet([1.0f0, 1.0f0]), [0.0f0, 1.0f0]) ≈ 1 + @test typeof(pdf(Dirichlet([1.0f0, 1.0f0]), [0.0f0, 1.0f0])) === Float32 @test iszero(pdf(d, [-1, 1, 0])) @test iszero(pdf(d, [0, 0, 1])) @@ -46,9 +47,9 @@ rng = MersenneTwister(123) x = func[2](d, 100) p = pdf(d, x) lp = logpdf(d, x) - for i in 1 : size(x, 2) - @test lp[i] ≈ logpdf(d, x[:,i]) - @test p[i] ≈ pdf(d, x[:,i]) + for i = 1:size(x, 2) + @test lp[i] ≈ logpdf(d, x[:, i]) + @test p[i] ≈ pdf(d, x[:, i]) end v = [2, 1, 3] @@ -64,9 +65,9 @@ rng = MersenneTwister(123) @test d == deepcopy(d) @test mean(d) ≈ v / sum(v) - @test cov(d) ≈ [8 -2 -6; -2 5 -3; -6 -3 9] / (36 * 7) - @test var(d) ≈ diag(cov(d)) - @test std(d) ≈ sqrt.(var(d)) + @test cov(d) ≈ [8 -2 -6; -2 5 -3; -6 -3 9] / (36 * 7) + @test var(d) ≈ diag(cov(d)) + @test std(d) ≈ sqrt.(var(d)) @test pdf(d, [0.2, 0.3, 0.5]) ≈ 3 @test pdf(d, [0.4, 0.5, 0.1]) ≈ 0.24 @@ -76,9 +77,9 @@ rng = MersenneTwister(123) x = func[2](d, 100) p = pdf(d, x) lp = logpdf(d, x) - for i in 1 : size(x, 2) - @test p[i] ≈ pdf(d, x[:,i]) - @test lp[i] ≈ logpdf(d, x[:,i]) + for i = 1:size(x, 2) + @test p[i] ≈ pdf(d, x[:, i]) + @test lp[i] ≈ logpdf(d, x[:, i]) end # Sampling @@ -111,12 +112,12 @@ rng = MersenneTwister(123) n = 10000 x = func[2](d, n) - x = x ./ sum(x, dims=1) + x = x ./ sum(x, dims = 1) r = fit_mle(Dirichlet, x) - @test r.alpha ≈ d.alpha atol=0.25 + @test r.alpha ≈ d.alpha atol = 0.25 r = fit(Dirichlet{Float32}, x) - @test r.alpha ≈ d.alpha atol=0.25 + @test r.alpha ≈ d.alpha atol = 0.25 # r = fit_mle(Dirichlet, x, fill(2.0, n)) # @test r.alpha ≈ d.alpha atol=0.25 @@ -139,8 +140,8 @@ end @testset "constructor $T" for T in (Dirichlet, Dirichlet{Float64}) # Avoid issues with finite differencing if values in `alpha` become negative or zero # by using forward differencing - test_frule(T, alpha; fdm=forward_fdm(5, 1)) - test_rrule(T, alpha; fdm=forward_fdm(5, 1)) + test_frule(T, alpha; fdm = forward_fdm(5, 1)) + test_rrule(T, alpha; fdm = forward_fdm(5, 1)) end @testset "_logpdf" begin @@ -151,12 +152,12 @@ end # Use special finite differencing method that tries to avoid moving outside of the # support by limiting the range of the points around the input that are evaluated - fdm = central_fdm(5, 1; max_range=1e-9) + fdm = central_fdm(5, 1; max_range = 1e-9) for x in (x1, x2) # We have to adjust the tolerance since the finite differencing method is rough - test_frule(Distributions._logpdf, d, x; fdm=fdm, rtol=1e-5, nans=true) - test_rrule(Distributions._logpdf, d, x; fdm=fdm, rtol=1e-5, nans=true) + test_frule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1e-5, nans = true) + test_rrule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1e-5, nans = true) end end end diff --git a/test/multivariate/dirichletmultinomial.jl b/test/multivariate/dirichletmultinomial.jl index 995f243094..04d0a656f2 100644 --- a/test/multivariate/dirichletmultinomial.jl +++ b/test/multivariate/dirichletmultinomial.jl @@ -8,84 +8,87 @@ Random.seed!(123) rng = MersenneTwister(123) -@testset "Testing DirichletMultinomial with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) - -# Test Constructors -d = DirichletMultinomial(10, ones(5)) -d2 = DirichletMultinomial(10, ones(Int, 5)) -@test typeof(d) == typeof(d2) -@test d == deepcopy(d) - -α = func[1](5) -d = DirichletMultinomial(10, α) - -# test parameters -@test length(d) == 5 -@test ntrials(d) == 10 -@test params(d) == (10, α) -@test d.α == α -@test d.α0 == sum(α) -@test partype(d) == eltype(α) - -# test statistics -@test mean(d) ≈ α * (d.n / d.α0) -p = d.α / d.α0 -@test var(d) ≈ d.n * (d.n + d.α0) / (1 + d.α0) .* p .* (1.0 .- p) -@test std(d) ≈ sqrt.(var(d)) -x = func[2](d, 10_000) - -# test statistics with mle fit -d = fit(DirichletMultinomial, x) -@test isapprox(mean(d), vec(mean(x, dims=2)), atol=.5) -@test isapprox(std(d) , vec(std(x, dims=2)) , atol=.5) -@test isapprox(var(d) , vec(var(x, dims=2)) , atol=.5) -@test isapprox(cov(d) , cov(x, dims=2) , atol=.5) - -# test Evaluation -d = DirichletMultinomial(10, 5) -@test typeof(d) == DirichletMultinomial{Float64} -@test !insupport(d, func[1](5)) -@test insupport(d, [2, 2, 2, 2, 2]) -@test insupport(d, 2.0 * ones(5)) -@test !insupport(d, 3.0 * ones(5)) - -for x in (2 * ones(5), [1, 2, 3, 4, 0], [3.0, 0.0, 3.0, 0.0, 4.0], [0, 0, 0, 0, 10]) - @test pdf(d, x) ≈ - factorial(d.n) * gamma(d.α0) / gamma(d.n + d.α0) * prod(gamma.(d.α .+ x) ./ (gamma.(x .+ 1) .* gamma.(d.α))) - @test logpdf(d, x) ≈ - logfactorial(d.n) + loggamma(d.α0) - loggamma(d.n + d.α0) + sum(loggamma, d.α + x) - sum(loggamma, d.α) - sum(loggamma.(x .+ 1)) -end - -# test Sampling -x = func[1](d) -@test isa(x, Vector{Int}) -@test sum(x) == d.n -@test length(x) == length(d) -@test insupport(d, x) - -x = func[2](d, 50) -@test all(x -> (x >= 0), x) -@test size(x, 1) == length(d) -@test size(x, 2) == 50 -@test all(sum(x, dims=1) .== ntrials(d)) -@test all(insupport(d, x)) - -# test MLE -x = func[2](d, 10_000) -ss = suffstats(DirichletMultinomial, x) -@test size(ss.s, 1) == length(d) -@test size(ss.s, 2) == ntrials(d) -mle = fit(DirichletMultinomial, x) -@test isapprox(mle.α, d.α, atol=.2) -mle = fit(DirichletMultinomial{Float64}, x) -@test isapprox(mle.α, d.α, atol=.2) - -# test MLE with weights -for w in (.1 * ones(10_000), ones(10_000), 10 * ones(10_000)) - mle2 = fit(DirichletMultinomial, x, w) - @test mle.α ≈ mle2.α -end +@testset "Testing DirichletMultinomial with $key" for (key, func) in Dict( + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], +) + + # Test Constructors + d = DirichletMultinomial(10, ones(5)) + d2 = DirichletMultinomial(10, ones(Int, 5)) + @test typeof(d) == typeof(d2) + @test d == deepcopy(d) + + α = func[1](5) + d = DirichletMultinomial(10, α) + + # test parameters + @test length(d) == 5 + @test ntrials(d) == 10 + @test params(d) == (10, α) + @test d.α == α + @test d.α0 == sum(α) + @test partype(d) == eltype(α) + + # test statistics + @test mean(d) ≈ α * (d.n / d.α0) + p = d.α / d.α0 + @test var(d) ≈ d.n * (d.n + d.α0) / (1 + d.α0) .* p .* (1.0 .- p) + @test std(d) ≈ sqrt.(var(d)) + x = func[2](d, 10_000) + + # test statistics with mle fit + d = fit(DirichletMultinomial, x) + @test isapprox(mean(d), vec(mean(x, dims = 2)), atol = 0.5) + @test isapprox(std(d), vec(std(x, dims = 2)), atol = 0.5) + @test isapprox(var(d), vec(var(x, dims = 2)), atol = 0.5) + @test isapprox(cov(d), cov(x, dims = 2), atol = 0.5) + + # test Evaluation + d = DirichletMultinomial(10, 5) + @test typeof(d) == DirichletMultinomial{Float64} + @test !insupport(d, func[1](5)) + @test insupport(d, [2, 2, 2, 2, 2]) + @test insupport(d, 2.0 * ones(5)) + @test !insupport(d, 3.0 * ones(5)) + + for x in (2 * ones(5), [1, 2, 3, 4, 0], [3.0, 0.0, 3.0, 0.0, 4.0], [0, 0, 0, 0, 10]) + @test pdf(d, x) ≈ + factorial(d.n) * gamma(d.α0) / gamma(d.n + d.α0) * + prod(gamma.(d.α .+ x) ./ (gamma.(x .+ 1) .* gamma.(d.α))) + @test logpdf(d, x) ≈ + logfactorial(d.n) + loggamma(d.α0) - loggamma(d.n + d.α0) + + sum(loggamma, d.α + x) - sum(loggamma, d.α) - sum(loggamma.(x .+ 1)) + end + + # test Sampling + x = func[1](d) + @test isa(x, Vector{Int}) + @test sum(x) == d.n + @test length(x) == length(d) + @test insupport(d, x) + + x = func[2](d, 50) + @test all(x -> (x >= 0), x) + @test size(x, 1) == length(d) + @test size(x, 2) == 50 + @test all(sum(x, dims = 1) .== ntrials(d)) + @test all(insupport(d, x)) + + # test MLE + x = func[2](d, 10_000) + ss = suffstats(DirichletMultinomial, x) + @test size(ss.s, 1) == length(d) + @test size(ss.s, 2) == ntrials(d) + mle = fit(DirichletMultinomial, x) + @test isapprox(mle.α, d.α, atol = 0.2) + mle = fit(DirichletMultinomial{Float64}, x) + @test isapprox(mle.α, d.α, atol = 0.2) + + # test MLE with weights + for w in (0.1 * ones(10_000), ones(10_000), 10 * ones(10_000)) + mle2 = fit(DirichletMultinomial, x, w) + @test mle.α ≈ mle2.α + end end diff --git a/test/multivariate/jointorderstatistics.jl b/test/multivariate/jointorderstatistics.jl index d5d65a752e..9960192636 100644 --- a/test/multivariate/jointorderstatistics.jl +++ b/test/multivariate/jointorderstatistics.jl @@ -19,17 +19,17 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test_throws DomainError JointOrderStatistics(dist, 5, Int[]) @test_throws DomainError JointOrderStatistics(dist, 5, (3, 2)) @test_throws DomainError JointOrderStatistics(dist, 5, (3, 3)) - JointOrderStatistics(dist, 0, 1:2; check_args=false) - JointOrderStatistics(dist, 2, 2:3; check_args=false) - JointOrderStatistics(dist, 3, 0:3; check_args=false) - JointOrderStatistics(dist, 5, 3:-1:2; check_args=false) - JointOrderStatistics(dist, 5, 2:1:1; check_args=false) - JointOrderStatistics(dist, 0, [1, 2]; check_args=false) - JointOrderStatistics(dist, 2, [2, 3]; check_args=false) - JointOrderStatistics(dist, 3, [0, 1, 2, 3]; check_args=false) - JointOrderStatistics(dist, 5, Int[]; check_args=false) - JointOrderStatistics(dist, 5, (3, 2); check_args=false) - JointOrderStatistics(dist, 5, (3, 3); check_args=false) + JointOrderStatistics(dist, 0, 1:2; check_args = false) + JointOrderStatistics(dist, 2, 2:3; check_args = false) + JointOrderStatistics(dist, 3, 0:3; check_args = false) + JointOrderStatistics(dist, 5, 3:-1:2; check_args = false) + JointOrderStatistics(dist, 5, 2:1:1; check_args = false) + JointOrderStatistics(dist, 0, [1, 2]; check_args = false) + JointOrderStatistics(dist, 2, [2, 3]; check_args = false) + JointOrderStatistics(dist, 3, [0, 1, 2, 3]; check_args = false) + JointOrderStatistics(dist, 5, Int[]; check_args = false) + JointOrderStatistics(dist, 5, (3, 2); check_args = false) + JointOrderStatistics(dist, 5, (3, 3); check_args = false) end @testset for T in [Float32, Float64], @@ -37,8 +37,8 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test n in [16, 40], r in [ 1:n, - ([i, j] for j in 2:n for i in 1:min(10, j - 1))..., - vcat(2:4, (n - 10):(n - 5)), + ([i, j] for j = 2:n for i = 1:min(10, j - 1))..., + vcat(2:4, (n-10):(n-5)), (2, n ÷ 2, n - 5), ] @@ -65,7 +65,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test insupport(d, x) if length(x) > 1 @test !insupport(d, reverse(x)) - @test !insupport(d, x[1:(end - 1)]) + @test !insupport(d, x[1:(end-1)]) end @test !insupport(d, x2) @test !insupport(d, fill(NaN, length(x))) @@ -158,8 +158,8 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test x = rand(d, ndraws) @test all(xi -> insupport(d, xi), eachcol(x)) - m = mean(x; dims=2) - v = var(x; mean=m, dims=2) + m = mean(x; dims = 2) + v = var(x; mean = m, dims = 2) if dist isa Uniform # Arnold (2008). A first course in order statistics. eq 2.2.20-21 m_exact = r ./ (n + 1) @@ -171,7 +171,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test end # compute asymptotic sample standard deviation mean_std = @. sqrt(v_exact / ndraws) - m4 = dropdims(mapslices(xi -> moment(xi, 4), x; dims=2); dims=2) + m4 = dropdims(mapslices(xi -> moment(xi, 4), x; dims = 2); dims = 2) var_std = @. sqrt((m4 - v_exact^2) / ndraws) nchecks = length(r) @@ -188,8 +188,8 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test n = 100 rs = [ # good mixture of r values with gaps and no gaps 1:n, - vcat(1:10, (div(n, 2) - 5):(div(n, 2) + 5), (n - 9):n), - vcat(10:20, (n - 19):(n - 10)), + vcat(1:10, (div(n, 2)-5):(div(n, 2)+5), (n-9):n), + vcat(10:20, (n-19):(n-10)), (1, n), ] @@ -207,7 +207,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test m = length(r) - xcor = cor(x; dims=2) + xcor = cor(x; dims = 2) if dist isa Uniform # Arnold (2008). A first course in order statistics. Eq 2.3.16 s = @. n - r + 1 @@ -217,7 +217,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test v = [sum(k -> inv((n - k + 1)^2), 1:i) for i in r] xcor_exact = Symmetric(sqrt.(v ./ v')) end - for ii in 1:m, ji in (ii + 1):m + for ii = 1:m, ji = (ii+1):m i = r[ii] j = r[ji] ρ = xcor[ii, ji] diff --git a/test/multivariate/multinomial.jl b/test/multivariate/multinomial.jl index 79cddbe475..622c7e5619 100644 --- a/test/multivariate/multinomial.jl +++ b/test/multivariate/multinomial.jl @@ -10,166 +10,172 @@ using Test rng = MersenneTwister(123) d = Multinomial(nt, p) - @testset "Testing Multinomial with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) - - # Basics - - @test length(d) == 3 - @test d.n == nt - @test mean(d) ≈ T[2., 5., 3.] - @test std(d) ≈ T[sqrt(1.6), sqrt(2.5), sqrt(2.1)] - @test var(d) ≈ T[1.6, 2.5, 2.1] - @test cov(d) ≈ T[1.6 -1.0 -0.6; -1.0 2.5 -1.5; -0.6 -1.5 2.1] - - @test insupport(d, [1, 6, 3]) - @test !insupport(d, [2, 6, 3]) - @test partype(d) === T - - # Conversion - @test typeof(d) === Multinomial{T, Vector{T}} - for S in (Float16, Float32, Float64) - S === T && continue - @test typeof(convert(Multinomial{S}, d)) === Multinomial{S, Vector{S}} - @test typeof(convert(Multinomial{S, Vector{S}}, d)) === Multinomial{S, Vector{S}} - @test typeof(convert(Multinomial{S}, params(d)...)) === Multinomial{S, Vector{S}} - @test typeof(convert(Multinomial{S, Vector{S}}, params(d)...)) == Multinomial{S, Vector{S}} - end - @test convert(Multinomial{T}, d) === d - @test convert(Multinomial{T, Vector{T}}, d) === d - - # random sampling - - x = @inferred(func[1](d)) - @test isa(x, Vector{Int}) - @test sum(x) == nt - @test insupport(d, x) - @test size(x) == size(d) - @test length(x) == length(d) - @test d == typeof(d)(params(d)...) - @test d == deepcopy(d) - - x = @inferred(func[2](d, 100)) - @test isa(x, Matrix{Int}) - @test all(sum(x, dims=1) .== nt) - @test all(insupport(d, x)) - - x = @inferred(func[1](sampler(d))) - @test isa(x, Vector{Int}) - @test sum(x) == nt - @test insupport(d, x) - - # logpdf - - x1 = [1, 6, 3] - - @test @inferred(pdf(d, x1)) ≈ T(0.070875) - @test @inferred(logpdf(d, x1)) ≈ log(pdf(d, x1)) - - x = func[2](d, 100) - pv = pdf(d, x) - lp = logpdf(d, x) - for i in 1 : size(x, 2) - @test pv[i] ≈ pdf(d, x[:,i]) - @test lp[i] ≈ logpdf(d, x[:,i]) - end - - # test result type of logpdf - @test typeof(logpdf(d, x1)) === T - - # test degenerate cases of logpdf - d1 = Multinomial(1, T[0.5, 0.5, 0.0]) - d2 = Multinomial(0, T[0.5, 0.5, 0.0]) - x2 = [1, 0, 0] - x3 = [0, 0, 1] - x4 = [1, 0, 1] - - @test logpdf(d1, x2) ≈ T(log(0.5)) - @test logpdf(d2, x2) == -Inf - @test logpdf(d1, x3) == -Inf - @test logpdf(d2, x3) == -Inf - - # suffstats - - d0 = d - n0 = 100 - x = func[2](d0, n0) - w = func[1](n0) - - ss = suffstats(Multinomial, x) - @test isa(ss, Distributions.MultinomialStats) - @test ss.n == nt - @test ss.scnts == vec(sum(T[x[i,j] for i = 1:size(x,1), j = 1:size(x,2)], dims=2)) - @test ss.tw == n0 - - ss = suffstats(Multinomial, x, w) - @test isa(ss, Distributions.MultinomialStats) - @test ss.n == nt - @test ss.scnts ≈ T[x[i,j] for i = 1:size(x,1), j = 1:size(x,2)] * w - @test ss.tw ≈ sum(w) - - # fit - - x = func[2](d0, 10^5) - @test size(x) == (length(d0), 10^5) - @test all(sum(x, dims=1) .== nt) - - r = fit(Multinomial, x) - @test r.n == nt - @test length(r) == length(p) - @test probs(r) ≈ p atol=0.02 - r = fit(Multinomial{Float64}, x) - @test r.n == nt - @test length(r) == length(p) - @test probs(r) ≈ p atol=0.02 - - r = fit_mle(Multinomial, x, fill(2.0, size(x,2))) - @test r.n == nt - @test length(r) == length(p) - @test probs(r) ≈ p atol=0.02 - - # behavior for n = 0 - d0 = Multinomial(0, p) - @test func[1](d0) == [0, 0, 0] - @test pdf(d0, [0, 0, 0]) == 1 - @test pdf(d0, [0, 1, 0]) == 0 - @test mean(d0) == [0, 0, 0] - @test var(d0) == [0, 0, 0] - @test cov(d0) == zeros(3, 3) - @test entropy(d0) == 0 - @test insupport(d0, [0, 0, 0]) == true - @test insupport(d0, [0, 0, 4]) == false - @test length(d0) == 3 - @test size(d0) == (3,) - - # Abstract vector p - - @test typeof(Multinomial(nt, SVector{length(p), T}(p))) == Multinomial{T, SVector{3, T}} - + @testset "Testing Multinomial with $key" for (key, func) in Dict( + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) + + # Basics + + @test length(d) == 3 + @test d.n == nt + @test mean(d) ≈ T[2.0, 5.0, 3.0] + @test std(d) ≈ T[sqrt(1.6), sqrt(2.5), sqrt(2.1)] + @test var(d) ≈ T[1.6, 2.5, 2.1] + @test cov(d) ≈ T[1.6 -1.0 -0.6; -1.0 2.5 -1.5; -0.6 -1.5 2.1] + + @test insupport(d, [1, 6, 3]) + @test !insupport(d, [2, 6, 3]) + @test partype(d) === T + + # Conversion + @test typeof(d) === Multinomial{T,Vector{T}} + for S in (Float16, Float32, Float64) + S === T && continue + @test typeof(convert(Multinomial{S}, d)) === Multinomial{S,Vector{S}} + @test typeof(convert(Multinomial{S,Vector{S}}, d)) === Multinomial{S,Vector{S}} + @test typeof(convert(Multinomial{S}, params(d)...)) === Multinomial{S,Vector{S}} + @test typeof(convert(Multinomial{S,Vector{S}}, params(d)...)) == + Multinomial{S,Vector{S}} + end + @test convert(Multinomial{T}, d) === d + @test convert(Multinomial{T,Vector{T}}, d) === d + + # random sampling + + x = @inferred(func[1](d)) + @test isa(x, Vector{Int}) + @test sum(x) == nt + @test insupport(d, x) + @test size(x) == size(d) + @test length(x) == length(d) + @test d == typeof(d)(params(d)...) + @test d == deepcopy(d) + + x = @inferred(func[2](d, 100)) + @test isa(x, Matrix{Int}) + @test all(sum(x, dims = 1) .== nt) + @test all(insupport(d, x)) + + x = @inferred(func[1](sampler(d))) + @test isa(x, Vector{Int}) + @test sum(x) == nt + @test insupport(d, x) + + # logpdf + + x1 = [1, 6, 3] + + @test @inferred(pdf(d, x1)) ≈ T(0.070875) + @test @inferred(logpdf(d, x1)) ≈ log(pdf(d, x1)) + + x = func[2](d, 100) + pv = pdf(d, x) + lp = logpdf(d, x) + for i = 1:size(x, 2) + @test pv[i] ≈ pdf(d, x[:, i]) + @test lp[i] ≈ logpdf(d, x[:, i]) + end + + # test result type of logpdf + @test typeof(logpdf(d, x1)) === T + + # test degenerate cases of logpdf + d1 = Multinomial(1, T[0.5, 0.5, 0.0]) + d2 = Multinomial(0, T[0.5, 0.5, 0.0]) + x2 = [1, 0, 0] + x3 = [0, 0, 1] + x4 = [1, 0, 1] + + @test logpdf(d1, x2) ≈ T(log(0.5)) + @test logpdf(d2, x2) == -Inf + @test logpdf(d1, x3) == -Inf + @test logpdf(d2, x3) == -Inf + + # suffstats + + d0 = d + n0 = 100 + x = func[2](d0, n0) + w = func[1](n0) + + ss = suffstats(Multinomial, x) + @test isa(ss, Distributions.MultinomialStats) + @test ss.n == nt + @test ss.scnts == + vec(sum(T[x[i, j] for i = 1:size(x, 1), j = 1:size(x, 2)], dims = 2)) + @test ss.tw == n0 + + ss = suffstats(Multinomial, x, w) + @test isa(ss, Distributions.MultinomialStats) + @test ss.n == nt + @test ss.scnts ≈ T[x[i, j] for i = 1:size(x, 1), j = 1:size(x, 2)] * w + @test ss.tw ≈ sum(w) + + # fit + + x = func[2](d0, 10^5) + @test size(x) == (length(d0), 10^5) + @test all(sum(x, dims = 1) .== nt) + + r = fit(Multinomial, x) + @test r.n == nt + @test length(r) == length(p) + @test probs(r) ≈ p atol = 0.02 + r = fit(Multinomial{Float64}, x) + @test r.n == nt + @test length(r) == length(p) + @test probs(r) ≈ p atol = 0.02 + + r = fit_mle(Multinomial, x, fill(2.0, size(x, 2))) + @test r.n == nt + @test length(r) == length(p) + @test probs(r) ≈ p atol = 0.02 + + # behavior for n = 0 + d0 = Multinomial(0, p) + @test func[1](d0) == [0, 0, 0] + @test pdf(d0, [0, 0, 0]) == 1 + @test pdf(d0, [0, 1, 0]) == 0 + @test mean(d0) == [0, 0, 0] + @test var(d0) == [0, 0, 0] + @test cov(d0) == zeros(3, 3) + @test entropy(d0) == 0 + @test insupport(d0, [0, 0, 0]) == true + @test insupport(d0, [0, 0, 4]) == false + @test length(d0) == 3 + @test size(d0) == (3,) + + # Abstract vector p + + @test typeof(Multinomial(nt, SVector{length(p),T}(p))) == + Multinomial{T,SVector{3,T}} + end - - @testset "Testing Multinomial with $key" for (key, func) in - Dict("rand!(...)" => (dist, X) -> rand!(dist, X), - "rand!(rng, ...)" => (dist, X) -> rand!(rng, dist, X)) + + @testset "Testing Multinomial with $key" for (key, func) in Dict( + "rand!(...)" => (dist, X) -> rand!(dist, X), + "rand!(rng, ...)" => (dist, X) -> rand!(rng, dist, X), + ) # random sampling X = Matrix{Int}(undef, length(p), 100) x = @inferred(func(d, X)) @test x ≡ X @test isa(x, Matrix{Int}) - @test all(sum(x, dims=1) .== nt) + @test all(sum(x, dims = 1) .== nt) @test all(insupport(d, x)) pv = pdf(d, x) lp = logpdf(d, x) - for i in 1 : size(x, 2) - @test pv[i] ≈ pdf(d, x[:,i]) - @test lp[i] ≈ logpdf(d, x[:,i]) + for i = 1:size(x, 2) + @test pv[i] ≈ pdf(d, x[:, i]) + @test lp[i] ≈ logpdf(d, x[:, i]) end end - - @testset "Testing Multinomial with $key" for (key, func) in - Dict("rand!(..., true)" => (dist, X) -> rand!(dist, X, true), - "rand!(rng, ..., true)" => (dist, X) -> rand!(rng, dist, X, true)) + + @testset "Testing Multinomial with $key" for (key, func) in Dict( + "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), + "rand!(rng, ..., true)" => (dist, X) -> rand!(rng, dist, X, true), + ) # random sampling X = Vector{Vector{Int}}(undef, 100) x = @inferred(func(d, X)) @@ -177,10 +183,11 @@ using Test @test all(sum.(x) .== nt) @test all(insupport(d, a) for a in x) end - - @testset "Testing Multinomial with $key" for (key, func) in - Dict("rand!(..., false)" => (dist, X) -> rand!(dist, X, false), - "rand!(rng, ..., false)" => (dist, X) -> rand!(rng, dist, X, false)) + + @testset "Testing Multinomial with $key" for (key, func) in Dict( + "rand!(..., false)" => (dist, X) -> rand!(dist, X, false), + "rand!(rng, ..., false)" => (dist, X) -> rand!(rng, dist, X, false), + ) # random sampling X = [Vector{Int}(undef, length(p)) for _ in Base.OneTo(100)] x1 = X[1] @@ -189,12 +196,12 @@ using Test @test all(sum.(x) .== nt) @test all(insupport(d, a) for a in x) end - + repeats = 10 m = Vector{Vector{T}}(undef, repeats) @inferred(rand!(d, m)) @test isassigned(m, 1) - m1=m[1] + m1 = m[1] @inferred(rand!(d, m)) @test m1 ≡ m[1] @inferred(rand!(d, m, true)) @@ -202,15 +209,15 @@ using Test m1 = m[1] @inferred(rand!(d, m, false)) @test m1 ≡ m[1] - + p = T[0.2, 0.4, 0.3, 0.1] nt = 10 d = Multinomial(nt, p) @test_throws DimensionMismatch rand!(d, m, false) @test_nowarn rand!(d, m) - + p_v = T[0.1, 0.4, 0.3, 0.8] @test_throws DomainError Multinomial(10, p_v) - @test_throws DomainError Multinomial(10, p_v; check_args=true) - Multinomial(10, p_v; check_args=false) # should not warn + @test_throws DomainError Multinomial(10, p_v; check_args = true) + Multinomial(10, p_v; check_args = false) # should not warn end diff --git a/test/multivariate/mvlogitnormal.jl b/test/multivariate/mvlogitnormal.jl index 109ab4f32c..22d11a4fc3 100644 --- a/test/multivariate/mvlogitnormal.jl +++ b/test/multivariate/mvlogitnormal.jl @@ -7,7 +7,7 @@ using Test ####### Core testing procedure -function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) +function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int = 10^6) @test d.normal isa AbstractMvNormal dnorm = d.normal @@ -33,13 +33,13 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) T = partype(d) <: Float64 ? Float32 : Float64 if dnorm isa MvNormal @test convert(MvLogitNormal{MvNormal{T}}, d).normal == - convert(MvNormal{T}, dnorm) + convert(MvNormal{T}, dnorm) @test partype(convert(MvLogitNormal{MvNormal{T}}, d)) <: T @test canonform(d) isa MvLogitNormal{<:MvNormalCanon} @test canonform(d).normal == canonform(dnorm) elseif dnorm isa MvNormalCanon @test convert(MvLogitNormal{MvNormalCanon{T}}, d).normal == - convert(MvNormalCanon{T}, dnorm) + convert(MvNormalCanon{T}, dnorm) @test partype(convert(MvLogitNormal{MvNormalCanon{T}}, d)) <: T @test meanform(d) isa MvLogitNormal{<:MvNormal} @test meanform(d).normal == meanform(dnorm) @@ -48,19 +48,21 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) @testset "sampling" begin X = rand(d, nsamples) - Y = @views log.(X[1:(end - 1), :]) .- log.(X[end, :]') - Ymean = vec(mean(Y; dims=2)) - Ycov = cov(Y; dims=2) - for i in 1:(length(d) - 1) + Y = @views log.(X[1:(end-1), :]) .- log.(X[end, :]') + Ymean = vec(mean(Y; dims = 2)) + Ycov = cov(Y; dims = 2) + for i = 1:(length(d)-1) @test isapprox( - Ymean[i], mean(dnorm)[i], atol=sqrt(var(dnorm)[i] / nsamples) * 8 + Ymean[i], + mean(dnorm)[i], + atol = sqrt(var(dnorm)[i] / nsamples) * 8, ) end - for i in 1:(length(d) - 1), j in 1:(length(d) - 1) + for i = 1:(length(d)-1), j = 1:(length(d)-1) @test isapprox( Ycov[i, j], cov(dnorm)[i, j], - atol=sqrt(prod(var(dnorm)[[i, j]]) / nsamples) * 20, + atol = sqrt(prod(var(dnorm)[[i, j]]) / nsamples) * 20, ) end end @@ -69,16 +71,18 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) X = rand(d, nsamples) dfit = fit_mle(MvLogitNormal, X) dfit_norm = dfit.normal - for i in 1:(length(d) - 1) + for i = 1:(length(d)-1) @test isapprox( - mean(dfit_norm)[i], mean(dnorm)[i], atol=sqrt(var(dnorm)[i] / nsamples) * 8 + mean(dfit_norm)[i], + mean(dnorm)[i], + atol = sqrt(var(dnorm)[i] / nsamples) * 8, ) end - for i in 1:(length(d) - 1), j in 1:(length(d) - 1) + for i = 1:(length(d)-1), j = 1:(length(d)-1) @test isapprox( cov(dfit_norm)[i, j], cov(dnorm)[i, j], - atol=sqrt(prod(var(dnorm)[[i, j]]) / nsamples) * 20, + atol = sqrt(prod(var(dnorm)[[i, j]]) / nsamples) * 20, ) end @test fit_mle(MvLogitNormal{IsoNormal}, X) isa MvLogitNormal{<:IsoNormal} @@ -86,11 +90,11 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) @testset "evaluation" begin X = rand(d, nsamples) - for i in 1:min(100, nsamples) + for i = 1:min(100, nsamples) @test @inferred(logpdf(d, X[:, i])) ≈ log(pdf(d, X[:, i])) if dnorm isa MvNormal @test @inferred(gradlogpdf(d, X[:, i])) ≈ - ForwardDiff.gradient(x -> logpdf(d, x), X[:, i]) + ForwardDiff.gradient(x -> logpdf(d, x), X[:, i]) end end @test logpdf(d, X) ≈ log.(pdf(d, X)) @@ -127,13 +131,13 @@ end @testset "$(typeof(prms))" for prms in mvnorm_params d = MvLogitNormal(prms...) @test d == MvLogitNormal(MvNormal(prms...)) - test_mvlogitnormal(d; nsamples=10^4) + test_mvlogitnormal(d; nsamples = 10^4) end end @testset "wraps MvNormalCanon" begin @testset "$(typeof(prms))" for prms in mvnorm_params d = MvLogitNormal(MvNormalCanon(prms...)) - test_mvlogitnormal(d; nsamples=10^4) + test_mvlogitnormal(d; nsamples = 10^4) end end diff --git a/test/multivariate/mvlognormal.jl b/test/multivariate/mvlognormal.jl index 6daea0dede..d6d33e5556 100644 --- a/test/multivariate/mvlognormal.jl +++ b/test/multivariate/mvlognormal.jl @@ -7,8 +7,11 @@ using LinearAlgebra, Random, Test ####### Core testing procedure -function test_mvlognormal(g::MvLogNormal, n_tsamples::Int=10^6, - rng::Union{AbstractRNG, Missing} = missing) +function test_mvlognormal( + g::MvLogNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG,Missing} = missing, +) d = length(g) mn = mean(g) md = median(g) @@ -31,18 +34,19 @@ function test_mvlognormal(g::MvLogNormal, n_tsamples::Int=10^6, @test length(mo) == d @test length(s) == d @test size(S) == (d, d) - @test s ≈ diag(S) - @test std(g) ≈ sqrt.(diag(S)) - @test md ≈ exp.(mean(g.normal)) - @test mn ≈ exp.(mean(g.normal) .+ var(g.normal)/2) - @test mo ≈ exp.(mean(g.normal) .- var(g.normal)) - @test entropy(g) ≈ d*(1 + Distributions.log2π)/2 + logdetcov(g.normal)/2 + sum(mean(g.normal)) + @test s ≈ diag(S) + @test std(g) ≈ sqrt.(diag(S)) + @test md ≈ exp.(mean(g.normal)) + @test mn ≈ exp.(mean(g.normal) .+ var(g.normal) / 2) + @test mo ≈ exp.(mean(g.normal) .- var(g.normal)) + @test entropy(g) ≈ + d * (1 + Distributions.log2π) / 2 + logdetcov(g.normal) / 2 + sum(mean(g.normal)) gg = typeof(g)(MvNormal(params(g)...)) @test Vector(g.normal.μ) == Vector(gg.normal.μ) @test Matrix(g.normal.Σ) == Matrix(gg.normal.Σ) - @test insupport(g,ones(d)) - @test !insupport(g,zeros(d)) - @test !insupport(g,-ones(d)) + @test insupport(g, ones(d)) + @test !insupport(g, zeros(d)) + @test !insupport(g, -ones(d)) # sampling if ismissing(rng) @@ -50,63 +54,103 @@ function test_mvlognormal(g::MvLogNormal, n_tsamples::Int=10^6, else X = rand(rng, g, n_tsamples) end - emp_mn = vec(mean(X, dims=2)) - emp_md = vec(median(X, dims=2)) + emp_mn = vec(mean(X, dims = 2)) + emp_md = vec(median(X, dims = 2)) Z = X .- emp_mn emp_cov = (Z * Z') ./ n_tsamples for i = 1:d - @test isapprox(emp_mn[i] , mn[i] , atol=(sqrt(s[i] / n_tsamples) * 8.0)) + @test isapprox(emp_mn[i], mn[i], atol = (sqrt(s[i] / n_tsamples) * 8.0)) end for i = 1:d - @test isapprox(emp_md[i] , md[i] , atol=(sqrt(s[i] / n_tsamples) * 8.0)) + @test isapprox(emp_md[i], md[i], atol = (sqrt(s[i] / n_tsamples) * 8.0)) end for i = 1:d, j = 1:d - @test isapprox(emp_cov[i,j], S[i,j], atol=(sqrt(s[i] * s[j]) * 20.0) / sqrt(n_tsamples)) + @test isapprox( + emp_cov[i, j], + S[i, j], + atol = (sqrt(s[i] * s[j]) * 20.0) / sqrt(n_tsamples), + ) end # evaluation of logpdf and pdf for i = 1:min(100, n_tsamples) - @test logpdf(g, X[:,i]) ≈ log(pdf(g, X[:,i])) + @test logpdf(g, X[:, i]) ≈ log(pdf(g, X[:, i])) end @test logpdf(g, X) ≈ log.(pdf(g, X)) - @test isequal(logpdf(g, zeros(d)),-Inf) - @test isequal(logpdf(g, -mn),-Inf) - @test isequal(pdf(g, zeros(d)),0.0) - @test isequal(pdf(g, -mn),0.0) + @test isequal(logpdf(g, zeros(d)), -Inf) + @test isequal(logpdf(g, -mn), -Inf) + @test isequal(pdf(g, zeros(d)), 0.0) + @test isequal(pdf(g, -mn), 0.0) # test the location and scale functions - @test isapprox(location(g), location(MvLogNormal,:meancov,mean(g),cov(g)) , atol=1e-8) - @test isapprox(location(g), location(MvLogNormal,:mean,mean(g),scale(g)) , atol=1e-8) - @test isapprox(location(g), location(MvLogNormal,:median,median(g),scale(g)), atol=1e-8) - @test isapprox(location(g), location(MvLogNormal,:mode,mode(g),scale(g)) , atol=1e-8) - @test isapprox(scale(g) , scale(MvLogNormal,:meancov,mean(g),cov(g)) , atol=1e-8) + @test isapprox( + location(g), + location(MvLogNormal, :meancov, mean(g), cov(g)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location(MvLogNormal, :mean, mean(g), scale(g)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location(MvLogNormal, :median, median(g), scale(g)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location(MvLogNormal, :mode, mode(g), scale(g)), + atol = 1e-8, + ) + @test isapprox(scale(g), scale(MvLogNormal, :meancov, mean(g), cov(g)), atol = 1e-8) - @test isapprox(location(g), location!(MvLogNormal,:meancov,mean(g),cov(g),zero(mn)) , atol=1e-8) - @test isapprox(location(g), location!(MvLogNormal,:mean,mean(g),scale(g),zero(mn)) , atol=1e-8) - @test isapprox(location(g), location!(MvLogNormal,:median,median(g),scale(g),zero(mn)), atol=1e-8) - @test isapprox(location(g), location!(MvLogNormal,:mode,mode(g),scale(g),zero(mn)) , atol=1e-8) - @test isapprox(scale(g) , Distributions.scale!(MvLogNormal,:meancov,mean(g),cov(g),zero(S)), atol=1e-8) + @test isapprox( + location(g), + location!(MvLogNormal, :meancov, mean(g), cov(g), zero(mn)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location!(MvLogNormal, :mean, mean(g), scale(g), zero(mn)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location!(MvLogNormal, :median, median(g), scale(g), zero(mn)), + atol = 1e-8, + ) + @test isapprox( + location(g), + location!(MvLogNormal, :mode, mode(g), scale(g), zero(mn)), + atol = 1e-8, + ) + @test isapprox( + scale(g), + Distributions.scale!(MvLogNormal, :meancov, mean(g), cov(g), zero(S)), + atol = 1e-8, + ) - lc1,sc1 = params(MvLogNormal,mean(g),cov(g)) - lc2,sc2 = params!(MvLogNormal,mean(g),cov(g),similar(mn),similar(S)) - @test isapprox(location(g), lc1, atol=1e-8) - @test isapprox(location(g), lc2, atol=1e-8) - @test isapprox(scale(g) , sc1, atol=1e-8) - @test isapprox(scale(g) , sc2, atol=1e-8) + lc1, sc1 = params(MvLogNormal, mean(g), cov(g)) + lc2, sc2 = params!(MvLogNormal, mean(g), cov(g), similar(mn), similar(S)) + @test isapprox(location(g), lc1, atol = 1e-8) + @test isapprox(location(g), lc2, atol = 1e-8) + @test isapprox(scale(g), sc1, atol = 1e-8) + @test isapprox(scale(g), sc2, atol = 1e-8) end ####### Validate results for a single-dimension MvLogNormal by comparing with univariate LogNormal @testset "Comparing results from MvLogNormal with univariate LogNormal" begin l1 = LogNormal(0.1, 0.4) l2 = MvLogNormal([0.1], 0.16 * I) - @test [mean(l1)] ≈ mean(l2) - @test [median(l1)] ≈ median(l2) - @test [mode(l1)] ≈ mode(l2) - @test [var(l1)] ≈ var(l2) - @test entropy(l1) ≈ entropy(l2) - @test logpdf(l1,5.0) ≈ logpdf(l2,[5.0]) - @test pdf(l1,5.0) ≈ pdf(l2,[5.0]) - @test (Random.seed!(78393) ; [rand(l1)]) ≈ (Random.seed!(78393) ; rand(l2)) + @test [mean(l1)] ≈ mean(l2) + @test [median(l1)] ≈ median(l2) + @test [mode(l1)] ≈ mode(l2) + @test [var(l1)] ≈ var(l2) + @test entropy(l1) ≈ entropy(l2) + @test logpdf(l1, 5.0) ≈ logpdf(l2, [5.0]) + @test pdf(l1, 5.0) ≈ pdf(l2, [5.0]) + @test (Random.seed!(78393); [rand(l1)]) ≈ (Random.seed!(78393); rand(l2)) @test [rand(MersenneTwister(78393), l1)] ≈ rand(MersenneTwister(78393), l2) end @@ -118,24 +162,35 @@ end va = [0.16, 0.25, 0.36] C = [0.4 -0.2 -0.1; -0.2 0.5 -0.1; -0.1 -0.1 0.6] for (g, μ, Σ) in [ - (MvLogNormal(mu,PDMats.PDMat(C)), mu, C), - (MvLogNormal(mu_r,PDMats.PDMat(C)), mu_r, C), + (MvLogNormal(mu, PDMats.PDMat(C)), mu, C), + (MvLogNormal(mu_r, PDMats.PDMat(C)), mu_r, C), (MvLogNormal(PDMats.PDiagMat(sqrt.(va))), zeros(3), Matrix(Diagonal(va))), (@test_deprecated(MvLogNormal(mu, sqrt(0.2))), mu, Matrix(0.2I, 3, 3)), (@test_deprecated(MvLogNormal(3, sqrt(0.2))), zeros(3), Matrix(0.2I, 3, 3)), - (@test_deprecated(MvLogNormal(mu, Vector{Float64}(sqrt.(va)))), mu, Matrix(Diagonal(va))), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 - (@test_deprecated(MvLogNormal(Vector{Float64}(sqrt.(va)))), zeros(3), Matrix(Diagonal(va))), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 + ( + @test_deprecated(MvLogNormal(mu, Vector{Float64}(sqrt.(va)))), + mu, + Matrix(Diagonal(va)), + ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 + ( + @test_deprecated(MvLogNormal(Vector{Float64}(sqrt.(va)))), + zeros(3), + Matrix(Diagonal(va)), + ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 (MvLogNormal(mu, C), mu, C), (MvLogNormal(mu, Diagonal(C)), mu, Diagonal(C)), (MvLogNormal(mu, Symmetric(Diagonal(C))), mu, Diagonal(C)), - (MvLogNormal(C), zeros(3), C) ] + (MvLogNormal(C), zeros(3), C), + ] m, s = params(g) @test Vector(m) ≈ μ test_mvlognormal(g, 10^4) end d = MvLogNormal(Array{Float32}(mu), PDMats.PDMat(Array{Float32}(C))) @test convert(MvLogNormal{Float32}, d) === d - @test typeof(convert(MvLogNormal{Float64}, d)) == typeof(MvLogNormal(mu, PDMats.PDMat(C))) - @test typeof(convert(MvLogNormal{Float64}, d.normal.μ, d.normal.Σ)) == typeof(MvLogNormal(mu, PDMats.PDMat(C))) + @test typeof(convert(MvLogNormal{Float64}, d)) == + typeof(MvLogNormal(mu, PDMats.PDMat(C))) + @test typeof(convert(MvLogNormal{Float64}, d.normal.μ, d.normal.Σ)) == + typeof(MvLogNormal(mu, PDMats.PDMat(C))) @test d == deepcopy(d) end diff --git a/test/multivariate/mvnormal.jl b/test/multivariate/mvnormal.jl index 4af9275b3f..841b9fb876 100644 --- a/test/multivariate/mvnormal.jl +++ b/test/multivariate/mvnormal.jl @@ -13,15 +13,15 @@ using FillArrays ###### General Testing @testset "MvNormal tests" begin - mu = [1., 2., 3.] - mu_r = 1.:3. + mu = [1.0, 2.0, 3.0] + mu_r = 1.0:3.0 va = [1.2, 3.4, 2.6] - C = [4. -2. -1.; -2. 5. -1.; -1. -1. 6.] + C = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] - h = [1., 2., 3.] + h = [1.0, 2.0, 3.0] dv = [1.2, 3.4, 2.6] - J = [4. -2. -1.; -2. 5. -1.; -1. -1. 6.] - D = Diagonal(J); + J = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] + D = Diagonal(J) for (g, μ, Σ) in [ (@test_deprecated(MvNormal(mu, sqrt(2.0))), mu, Matrix(2.0I, 3, 3)), (@test_deprecated(MvNormal(mu_r, sqrt(2.0))), mu_r, Matrix(2.0I, 3, 3)), @@ -51,10 +51,11 @@ using FillArrays (MvNormal(mu, Diagonal(dv)), mu, Matrix(Diagonal(dv))), (MvNormal(mu, Symmetric(Diagonal(dv))), mu, Matrix(Diagonal(dv))), (MvNormal(mu, Hermitian(Diagonal(dv))), mu, Matrix(Diagonal(dv))), - (MvNormal(mu_r, Diagonal(dv)), mu_r, Matrix(Diagonal(dv))) ] + (MvNormal(mu_r, Diagonal(dv)), mu_r, Matrix(Diagonal(dv))), + ] - @test mean(g) ≈ μ - @test cov(g) ≈ Σ + @test mean(g) ≈ μ + @test cov(g) ≈ Σ @test invcov(g) ≈ inv(Σ) Distributions.TestUtils.test_mvnormal(g, 10^4) @@ -64,33 +65,37 @@ using FillArrays @test isa(gc, MvNormalCanon) @test length(gc) == length(g) @test mean(gc) ≈ mean(g) - @test cov(gc) ≈ cov(g) + @test cov(gc) ≈ cov(g) else @assert isa(g, MvNormalCanon) gc = meanform(g) @test isa(gc, MvNormal) @test length(gc) == length(g) @test mean(gc) ≈ mean(g) - @test cov(gc) ≈ cov(g) + @test cov(gc) ≈ cov(g) end end end @testset "MvNormal constructor" begin - mu = [1., 2., 3.] - C = [4. -2. -1.; -2. 5. -1.; -1. -1. 6.] + mu = [1.0, 2.0, 3.0] + C = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] J = inv(C) h = J \ mu @test typeof(MvNormal(mu, PDMat(Array{Float32}(C)))) == typeof(MvNormal(mu, PDMat(C))) @test typeof(MvNormal(mu, Array{Float32}(C))) == typeof(MvNormal(mu, PDMat(C))) - @test typeof(@test_deprecated(MvNormal(mu, 2.0f0))) == typeof(@test_deprecated(MvNormal(mu, 2.0))) + @test typeof(@test_deprecated(MvNormal(mu, 2.0f0))) == + typeof(@test_deprecated(MvNormal(mu, 2.0))) - @test typeof(MvNormalCanon(h, PDMat(Array{Float32}(J)))) == typeof(MvNormalCanon(h, PDMat(J))) + @test typeof(MvNormalCanon(h, PDMat(Array{Float32}(J)))) == + typeof(MvNormalCanon(h, PDMat(J))) @test typeof(MvNormalCanon(h, Array{Float32}(J))) == typeof(MvNormalCanon(h, PDMat(J))) - @test typeof(@test_deprecated(MvNormalCanon(h, 2.0f0))) == typeof(@test_deprecated(MvNormalCanon(h, 2.0))) + @test typeof(@test_deprecated(MvNormalCanon(h, 2.0f0))) == + typeof(@test_deprecated(MvNormalCanon(h, 2.0))) - @test typeof(MvNormalCanon(mu, Array{Float16}(h), PDMat(Array{Float32}(J)))) == typeof(MvNormalCanon(mu, h, PDMat(J))) + @test typeof(MvNormalCanon(mu, Array{Float16}(h), PDMat(Array{Float32}(J)))) == + typeof(MvNormalCanon(mu, h, PDMat(J))) d = MvNormal(Array{Float32}(mu), PDMat(Array{Float32}(C))) @test convert(MvNormal{Float32}, d) === d @@ -99,8 +104,10 @@ end d = MvNormalCanon(Array{Float32}(mu), Array{Float32}(h), PDMat(Array{Float32}(J))) @test convert(MvNormalCanon{Float32}, d) === d - @test typeof(convert(MvNormalCanon{Float64}, d)) == typeof(MvNormalCanon(mu, h, PDMat(J))) - @test typeof(convert(MvNormalCanon{Float64}, d.μ, d.h, d.J)) == typeof(MvNormalCanon(mu, h, PDMat(J))) + @test typeof(convert(MvNormalCanon{Float64}, d)) == + typeof(MvNormalCanon(mu, h, PDMat(J))) + @test typeof(convert(MvNormalCanon{Float64}, d.μ, d.h, d.J)) == + typeof(MvNormalCanon(mu, h, PDMat(J))) @test MvNormal(mu, I) === @test_deprecated(MvNormal(mu, 1)) @test MvNormal(mu, 9 * I) === @test_deprecated(MvNormal(mu, 3)) @@ -112,19 +119,23 @@ end @test MvNormalCanon(h, I) == MvNormalCanon(h, Diagonal(Ones(length(h)))) @test MvNormalCanon(h, 9 * I) == MvNormalCanon(h, Diagonal(Fill(9, length(h)))) - @test MvNormalCanon(h, 0.25f0 * I) == MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h)))) - - @test typeof(MvNormalCanon(h, I)) === typeof(MvNormalCanon(h, Diagonal(Ones(length(h))))) - @test typeof(MvNormalCanon(h, 9 * I)) === typeof(MvNormalCanon(h, Diagonal(Fill(9, length(h))))) - @test typeof(MvNormalCanon(h, 0.25f0 * I)) === typeof(MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h))))) + @test MvNormalCanon(h, 0.25f0 * I) == + MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h)))) + + @test typeof(MvNormalCanon(h, I)) === + typeof(MvNormalCanon(h, Diagonal(Ones(length(h))))) + @test typeof(MvNormalCanon(h, 9 * I)) === + typeof(MvNormalCanon(h, Diagonal(Fill(9, length(h))))) + @test typeof(MvNormalCanon(h, 0.25f0 * I)) === + typeof(MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h))))) end @testset "MvNormal 32-bit logpdf" begin # Test 32-bit logpdf - mu = [1., 2., 3.] - C = [4. -2. -1.; -2. 5. -1.; -1. -1. 6.] + mu = [1.0, 2.0, 3.0] + C = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] d = MvNormal(mu, PDMat(C)) - X = [1., 2., 3.] + X = [1.0, 2.0, 3.0] d32 = convert(MvNormal{Float32}, d) X32 = convert(AbstractArray{Float32}, X) @@ -146,8 +157,8 @@ if isdefined(PDMats, :PDSparseMat) J = PDSparseMat(J) μ = zeros(n) - d_prec_sparse = MvNormalCanon(μ, J*μ, J) - d_prec_dense = MvNormalCanon(μ, J*μ, PDMat(Matrix(J))) + d_prec_sparse = MvNormalCanon(μ, J * μ, J) + d_prec_dense = MvNormalCanon(μ, J * μ, PDMat(Matrix(J))) d_cov_dense = MvNormal(μ, PDMat(Symmetric(Σ))) x_prec_sparse = rand(d_prec_sparse, nsamp) @@ -168,12 +179,12 @@ if isdefined(PDMats, :PDSparseMat) identical. As a result, these tests only check for approximate statistical equality, rather than strict numerical equality of the samples. =# - for i in 1:3, j in 1:3 + for i = 1:3, j = 1:3 @test all(abs.(mean(samples[i]) .- μ) .< 2se) - loglik_ii = [logpdf(dists[i], samples[i][:, k]) for k in 1:100_000] - loglik_ji = [logpdf(dists[j], samples[i][:, k]) for k in 1:100_000] + loglik_ii = [logpdf(dists[i], samples[i][:, k]) for k = 1:100_000] + loglik_ji = [logpdf(dists[j], samples[i][:, k]) for k = 1:100_000] # test average likelihood ratio between distribution i and sample j are small - @test mean((loglik_ii .- loglik_ji).^2) < tol + @test mean((loglik_ii .- loglik_ji) .^ 2) < tol end end end @@ -184,23 +195,23 @@ end # a slow but safe way to implement MLE for verification function _gauss_mle(x::AbstractMatrix{<:Real}) - mu = vec(mean(x, dims=2)) + mu = vec(mean(x, dims = 2)) z = x .- mu - C = (z * z') * (1/size(x,2)) + C = (z * z') * (1 / size(x, 2)) return mu, C end function _gauss_mle(x::AbstractMatrix{<:Real}, w::AbstractVector{<:Real}) sw = sum(w) - mu = (x * w) * (1/sw) + mu = (x * w) * (1 / sw) z = x .- mu - C = (z * (Diagonal(w) * z')) * (1/sw) + C = (z * (Diagonal(w) * z')) * (1 / sw) LinearAlgebra.copytri!(C, 'U') return mu, C end @testset "MvNormal MLE" begin - x = randn(3, 200) .+ randn(3) * 2. + x = randn(3, 200) .+ randn(3) * 2.0 w = rand(200) u, C = _gauss_mle(x) uw, Cw = _gauss_mle(x, w) @@ -208,47 +219,49 @@ end g = fit_mle(MvNormal, suffstats(MvNormal, x)) @test isa(g, FullNormal) @test mean(g) ≈ u - @test cov(g) ≈ C + @test cov(g) ≈ C g = fit_mle(MvNormal, x) @test isa(g, FullNormal) @test mean(g) ≈ u - @test cov(g) ≈ C + @test cov(g) ≈ C g = fit_mle(MvNormal, x, w) @test isa(g, FullNormal) @test mean(g) ≈ uw - @test cov(g) ≈ Cw + @test cov(g) ≈ Cw g = fit_mle(IsoNormal, x) @test isa(g, IsoNormal) - @test g.μ ≈ u + @test g.μ ≈ u @test g.Σ.value ≈ mean(diag(C)) g = fit_mle(IsoNormal, x, w) @test isa(g, IsoNormal) - @test g.μ ≈ uw + @test g.μ ≈ uw @test g.Σ.value ≈ mean(diag(Cw)) g = fit_mle(DiagNormal, x) @test isa(g, DiagNormal) - @test g.μ ≈ u + @test g.μ ≈ u @test g.Σ.diag ≈ diag(C) g = fit_mle(DiagNormal, x, w) @test isa(g, DiagNormal) - @test g.μ ≈ uw + @test g.μ ≈ uw @test g.Σ.diag ≈ diag(Cw) end @testset "MvNormal affine transformations" begin @testset "moment identities" begin - for n in 1:5 # dimension + for n = 1:5 # dimension # distribution μ = randn(n) - for Σ in (randn(n, n) |> A -> A*A', # dense - Diagonal(abs2.(randn(n))), # diagonal - abs2(randn()) * I) # scaled unit + for Σ in ( + randn(n, n) |> A -> A * A', # dense + Diagonal(abs2.(randn(n))), # diagonal + abs2(randn()) * I, + ) # scaled unit d = MvNormal(μ, Σ) # random arrays for transformations diff --git a/test/multivariate/mvtdist.jl b/test/multivariate/mvtdist.jl index e403b0a83d..540b62e838 100644 --- a/test/multivariate/mvtdist.jl +++ b/test/multivariate/mvtdist.jl @@ -5,85 +5,95 @@ import Distributions: GenericMvTDist import PDMats: PDMat @testset "mvtdist" begin -# Set location vector mu and scale matrix Sigma as in -# Hofert M. On Sampling from the Multivariate t Distribution. The R Journal -mu = [1., 2] -Sigma = [4. 2; 2 3] + # Set location vector mu and scale matrix Sigma as in + # Hofert M. On Sampling from the Multivariate t Distribution. The R Journal + mu = [1.0, 2] + Sigma = [4.0 2; 2 3] -# LogPDF evaluation for varying degrees of freedom df -# Julia's output is compared to R's corresponding values obtained via R's mvtnorm package -# R code exemplifying how the R values (rvalues) were obtained: -# options(digits=20) -# library("mvtnorm") -# mu <- 1:2 -# Sigma <- matrix(c(4, 2, 2, 3), ncol=2) -# dmvt(c(-2., 3.), delta=mu, sigma=Sigma, df=1) -rvalues = [-5.6561739738159975133, - -5.4874952805811396672, - -5.4441948098568158088, - -5.432461875138580254, - -5.4585441614404803801] -df = [1., 2, 3, 5, 10] -for i = 1:length(df) - d = MvTDist(df[i], mu, Sigma) - @test isapprox(logpdf(d, [-2., 3]), rvalues[i], atol=1.0e-8) - dd = typeof(d)(params(d)...) - @test d.df == dd.df - @test Vector(d.μ) == Vector(dd.μ) - @test Matrix(d.Σ) == Matrix(dd.Σ) -end + # LogPDF evaluation for varying degrees of freedom df + # Julia's output is compared to R's corresponding values obtained via R's mvtnorm package + # R code exemplifying how the R values (rvalues) were obtained: + # options(digits=20) + # library("mvtnorm") + # mu <- 1:2 + # Sigma <- matrix(c(4, 2, 2, 3), ncol=2) + # dmvt(c(-2., 3.), delta=mu, sigma=Sigma, df=1) + rvalues = [ + -5.6561739738159975133, + -5.4874952805811396672, + -5.4441948098568158088, + -5.432461875138580254, + -5.4585441614404803801, + ] + df = [1.0, 2, 3, 5, 10] + for i = 1:length(df) + d = MvTDist(df[i], mu, Sigma) + @test isapprox(logpdf(d, [-2.0, 3]), rvalues[i], atol = 1.0e-8) + dd = typeof(d)(params(d)...) + @test d.df == dd.df + @test Vector(d.μ) == Vector(dd.μ) + @test Matrix(d.Σ) == Matrix(dd.Σ) + end -# test constructors for mixed inputs: -@test typeof(GenericMvTDist(1, Vector{Float32}(mu), PDMat(Sigma))) == typeof(GenericMvTDist(1., mu, PDMat(Sigma))) + # test constructors for mixed inputs: + @test typeof(GenericMvTDist(1, Vector{Float32}(mu), PDMat(Sigma))) == + typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) -@test typeof(GenericMvTDist(1, mu, PDMat(Array{Float32}(Sigma)))) == typeof(GenericMvTDist(1., mu, PDMat(Sigma))) + @test typeof(GenericMvTDist(1, mu, PDMat(Array{Float32}(Sigma)))) == + typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) -d = GenericMvTDist(1, Array{Float32}(mu), PDMat(Array{Float32}(Sigma))) -@test convert(GenericMvTDist{Float32}, d) === d -@test typeof(convert(GenericMvTDist{Float64}, d)) == typeof(GenericMvTDist(1, mu, PDMat(Sigma))) -@test typeof(convert(GenericMvTDist{Float64}, d.df, d.dim, d.μ, d.Σ)) == typeof(GenericMvTDist(1, mu, PDMat(Sigma))) -@test partype(d) == Float32 -@test d == deepcopy(d) + d = GenericMvTDist(1, Array{Float32}(mu), PDMat(Array{Float32}(Sigma))) + @test convert(GenericMvTDist{Float32}, d) === d + @test typeof(convert(GenericMvTDist{Float64}, d)) == + typeof(GenericMvTDist(1, mu, PDMat(Sigma))) + @test typeof(convert(GenericMvTDist{Float64}, d.df, d.dim, d.μ, d.Σ)) == + typeof(GenericMvTDist(1, mu, PDMat(Sigma))) + @test partype(d) == Float32 + @test d == deepcopy(d) -@test size(rand(MvTDist(1., mu, Sigma))) == (2,) -@test size(rand(MvTDist(1., mu, Sigma), 10)) == (2,10) -@test size(rand(MersenneTwister(123), MvTDist(1., mu, Sigma))) == (2,) -@test size(rand(MersenneTwister(123), MvTDist(1., mu, Sigma), 10)) == (2,10) + @test size(rand(MvTDist(1.0, mu, Sigma))) == (2,) + @test size(rand(MvTDist(1.0, mu, Sigma), 10)) == (2, 10) + @test size(rand(MersenneTwister(123), MvTDist(1.0, mu, Sigma))) == (2,) + @test size(rand(MersenneTwister(123), MvTDist(1.0, mu, Sigma), 10)) == (2, 10) -# static array for mean/variance -mu_static = @SVector [1., 2] -# depends on PDMats#101 (merged but not released) -# Sigma_static = @SMatrix [4. 2; 2 3] + # static array for mean/variance + mu_static = @SVector [1.0, 2] + # depends on PDMats#101 (merged but not released) + # Sigma_static = @SMatrix [4. 2; 2 3] -for i in 1:length(df) - d = GenericMvTDist(df[i], mu_static, PDMat(Sigma)) - d32 = convert(GenericMvTDist{Float32}, d) - @test d.μ isa SVector - @test isapprox(logpdf(d, [-2., 3]), rvalues[i], atol=1.0e-8) - @test isa(logpdf(d32, [-2f0, 3f0]), Float32) - @test isapprox(logpdf(d32, [-2f0, 3f0]), convert(Float32, rvalues[i]), atol=1.0e-4) - dd = typeof(d)(params(d)...) - @test d.df == dd.df - @test d.μ == dd.μ - @test Matrix(d.Σ) == Matrix(dd.Σ) -end + for i = 1:length(df) + d = GenericMvTDist(df[i], mu_static, PDMat(Sigma)) + d32 = convert(GenericMvTDist{Float32}, d) + @test d.μ isa SVector + @test isapprox(logpdf(d, [-2.0, 3]), rvalues[i], atol = 1.0e-8) + @test isa(logpdf(d32, [-2.0f0, 3.0f0]), Float32) + @test isapprox( + logpdf(d32, [-2.0f0, 3.0f0]), + convert(Float32, rvalues[i]), + atol = 1.0e-4, + ) + dd = typeof(d)(params(d)...) + @test d.df == dd.df + @test d.μ == dd.μ + @test Matrix(d.Σ) == Matrix(dd.Σ) + end -@testset "zero-mean" begin + @testset "zero-mean" begin - X_implicit = GenericMvTDist(2.0, PDMat(Sigma)) - X_expicit = GenericMvTDist(2.0, zeros(2), PDMat(Sigma)) + X_implicit = GenericMvTDist(2.0, PDMat(Sigma)) + X_expicit = GenericMvTDist(2.0, zeros(2), PDMat(Sigma)) - # Check that the means equal the same thing. - @test mean(X_expicit) == mean(X_implicit) + # Check that the means equal the same thing. + @test mean(X_expicit) == mean(X_implicit) - # Check that generated random numbers are the same. - @test isapprox( - rand(MersenneTwister(123456), X_expicit), - rand(MersenneTwister(123456), X_implicit), - ) + # Check that generated random numbers are the same. + @test isapprox( + rand(MersenneTwister(123456), X_expicit), + rand(MersenneTwister(123456), X_implicit), + ) - # Check that the logpdf computed is the same. - x = rand(X_implicit) - @test logpdf(X_implicit, x) ≈ logpdf(X_expicit, x) -end + # Check that the logpdf computed is the same. + x = rand(X_implicit) + @test logpdf(X_implicit, x) ≈ logpdf(X_expicit, x) + end end diff --git a/test/multivariate/product.jl b/test/multivariate/product.jl index ada4302c12..ba5a4ccc28 100644 --- a/test/multivariate/product.jl +++ b/test/multivariate/product.jl @@ -9,67 +9,36 @@ using Distributions: Product # TODO: remove when `Product` is removed @testset "Deprecated `Product` distribution" begin -@testset "Testing normal product distributions" begin - Random.seed!(123456) - N = 11 - # Construct independent distributions and `Product` distribution from these. - μ = randn(N) - ds = Normal.(μ, 1.0) - x = rand.(ds) - d_product = @test_deprecated(Product(ds)) - @test d_product isa Product - # Check that methods for `Product` are consistent. - @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) - @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) - @test mean(d_product) == mean.(ds) - @test std(d_product) == std.(ds) - @test var(d_product) == var.(ds) - @test cov(d_product) == Diagonal(var.(ds)) - @test entropy(d_product) ≈ sum(entropy.(ds)) - - y = rand(d_product) - @test y isa typeof(x) - @test length(y) == N -end - -@testset "Testing generic product distributions" begin - Random.seed!(123456) - N = 11 - # Construct independent distributions and `Product` distribution from these. - ubound = rand(N) - ds = Uniform.(-ubound, ubound) - x = rand.(ds) - d_product = product_distribution(ds) - @test d_product isa Product - # Check that methods for `Product` are consistent. - @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) - @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) - @test mean(d_product) == mean.(ds) - @test std(d_product) == std.(ds) - @test var(d_product) == var.(ds) - @test cov(d_product) == Diagonal(var.(ds)) - @test entropy(d_product) == sum(entropy.(ds)) - @test insupport(d_product, ubound) == true - @test insupport(d_product, ubound .+ 1) == false - @test minimum(d_product) == -ubound - @test maximum(d_product) == ubound - @test extrema(d_product) == (-ubound, ubound) - @test isless(extrema(d_product)...) - - y = rand(d_product) - @test y isa typeof(x) - @test length(y) == N -end + @testset "Testing normal product distributions" begin + Random.seed!(123456) + N = 11 + # Construct independent distributions and `Product` distribution from these. + μ = randn(N) + ds = Normal.(μ, 1.0) + x = rand.(ds) + d_product = @test_deprecated(Product(ds)) + @test d_product isa Product + # Check that methods for `Product` are consistent. + @test length(d_product) == length(ds) + @test eltype(d_product) === eltype(ds[1]) + @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) + @test mean(d_product) == mean.(ds) + @test std(d_product) == std.(ds) + @test var(d_product) == var.(ds) + @test cov(d_product) == Diagonal(var.(ds)) + @test entropy(d_product) ≈ sum(entropy.(ds)) -@testset "Testing discrete non-parametric product distribution" begin - Random.seed!(123456) - N = 11 + y = rand(d_product) + @test y isa typeof(x) + @test length(y) == N + end - for a in ([0, 1], [-0.5, 0.5]) + @testset "Testing generic product distributions" begin + Random.seed!(123456) + N = 11 # Construct independent distributions and `Product` distribution from these. - ds = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N] + ubound = rand(N) + ds = Uniform.(-ubound, ubound) x = rand.(ds) d_product = product_distribution(ds) @test d_product isa Product @@ -82,29 +51,60 @@ end @test var(d_product) == var.(ds) @test cov(d_product) == Diagonal(var.(ds)) @test entropy(d_product) == sum(entropy.(ds)) - @test insupport(d_product, fill(a[2], N)) == true - @test insupport(d_product, fill(a[2] + 1, N)) == false + @test insupport(d_product, ubound) == true + @test insupport(d_product, ubound .+ 1) == false + @test minimum(d_product) == -ubound + @test maximum(d_product) == ubound + @test extrema(d_product) == (-ubound, ubound) + @test isless(extrema(d_product)...) y = rand(d_product) @test y isa typeof(x) @test length(y) == N end -end -@testset "Testing iid product distributions" begin - Random.seed!(123456) - N = 11 - d = @test_deprecated(Product(Fill(Laplace(0.0, 2.3), N))) - @test N == length(unique(rand(d))); - @test mean(d) === Fill(0.0, N) - @test cov(d) === Diagonal(Fill(var(Laplace(0.0, 2.3)), N)) -end + @testset "Testing discrete non-parametric product distribution" begin + Random.seed!(123456) + N = 11 + + for a in ([0, 1], [-0.5, 0.5]) + # Construct independent distributions and `Product` distribution from these. + ds = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ = 1:N] + x = rand.(ds) + d_product = product_distribution(ds) + @test d_product isa Product + # Check that methods for `Product` are consistent. + @test length(d_product) == length(ds) + @test eltype(d_product) === eltype(ds[1]) + @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) + @test mean(d_product) == mean.(ds) + @test std(d_product) == std.(ds) + @test var(d_product) == var.(ds) + @test cov(d_product) == Diagonal(var.(ds)) + @test entropy(d_product) == sum(entropy.(ds)) + @test insupport(d_product, fill(a[2], N)) == true + @test insupport(d_product, fill(a[2] + 1, N)) == false + + y = rand(d_product) + @test y isa typeof(x) + @test length(y) == N + end + end -@testset "Empty vector of distributions (#1619)" begin - d = @inferred(product_distribution(typeof(Beta(1, 1))[])) - @test d isa Product - @test iszero(@inferred(logpdf(d, Float64[]))) - @test_throws DimensionMismatch logpdf(d, rand(1)) - @test_throws DimensionMismatch logpdf(d, rand(3)) + @testset "Testing iid product distributions" begin + Random.seed!(123456) + N = 11 + d = @test_deprecated(Product(Fill(Laplace(0.0, 2.3), N))) + @test N == length(unique(rand(d))) + @test mean(d) === Fill(0.0, N) + @test cov(d) === Diagonal(Fill(var(Laplace(0.0, 2.3)), N)) + end + + @testset "Empty vector of distributions (#1619)" begin + d = @inferred(product_distribution(typeof(Beta(1, 1))[])) + @test d isa Product + @test iszero(@inferred(logpdf(d, Float64[]))) + @test_throws DimensionMismatch logpdf(d, rand(1)) + @test_throws DimensionMismatch logpdf(d, rand(3)) + end end -end \ No newline at end of file diff --git a/test/multivariate/vonmisesfisher.jl b/test/multivariate/vonmisesfisher.jl index cc45f41ed5..6e000be393 100644 --- a/test/multivariate/vonmisesfisher.jl +++ b/test/multivariate/vonmisesfisher.jl @@ -5,24 +5,24 @@ using LinearAlgebra, Test using SpecialFunctions -logvmfCp(p::Int, κ::Real) = (p / 2 - 1) * log(κ) - log(besselix(p / 2 - 1, κ)) - κ - p / 2 * log(2π) +logvmfCp(p::Int, κ::Real) = + (p / 2 - 1) * log(κ) - log(besselix(p / 2 - 1, κ)) - κ - p / 2 * log(2π) safe_logvmfpdf(μ::Vector, κ::Real, x::Vector) = logvmfCp(length(μ), κ) + κ * dot(μ, x) -function gen_vmf_tdata(n::Int, p::Int, - rng::Union{AbstractRNG, Missing} = missing) +function gen_vmf_tdata(n::Int, p::Int, rng::Union{AbstractRNG,Missing} = missing) if ismissing(rng) X = randn(p, n) else X = randn(rng, p, n) end for i = 1:n - X[:,i] = X[:,i] ./ norm(X[:,i]) + X[:, i] = X[:, i] ./ norm(X[:, i]) end return X end -function test_vmf_rot(p::Int, rng::Union{AbstractRNG, Missing} = missing) +function test_vmf_rot(p::Int, rng::Union{AbstractRNG,Missing} = missing) if ismissing(rng) μ = randn(p) x = randn(p) @@ -34,16 +34,16 @@ function test_vmf_rot(p::Int, rng::Union{AbstractRNG, Missing} = missing) μ = μ ./ κ s = Distributions.VonMisesFisherSampler(μ, κ) - v = μ - vcat(1, zeros(p-1)) - H = I - 2*v*v'/(v'*v) + v = μ - vcat(1, zeros(p - 1)) + H = I - 2 * v * v' / (v' * v) - @test Distributions._vmf_rot!(s.v, copy(x)) ≈ (H*x) + @test Distributions._vmf_rot!(s.v, copy(x)) ≈ (H * x) end -function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG, Missing} = missing) +function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG,Missing} = missing) p = 3 if ismissing(rng) @@ -55,24 +55,29 @@ function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG, Missing} = missin s = Distributions.VonMisesFisherSampler(μ, float(κ)) - genw3_res = [Distributions._vmf_genw3(rng, s.p, s.b, s.x0, s.c, s.κ) for _ in 1:ns] - genwp_res = [Distributions._vmf_genwp(rng, s.p, s.b, s.x0, s.c, s.κ) for _ in 1:ns] + genw3_res = [Distributions._vmf_genw3(rng, s.p, s.b, s.x0, s.c, s.κ) for _ = 1:ns] + genwp_res = [Distributions._vmf_genwp(rng, s.p, s.b, s.x0, s.c, s.κ) for _ = 1:ns] - @test isapprox(mean(genw3_res), mean(genwp_res), atol=0.01) - @test isapprox(std(genw3_res), std(genwp_res), atol=0.01/κ) + @test isapprox(mean(genw3_res), mean(genwp_res), atol = 0.01) + @test isapprox(std(genw3_res), std(genwp_res), atol = 0.01 / κ) # test mean and stdev against analytical formulas coth_κ = coth(κ) - mean_w = coth_κ - 1/κ - var_w = 1 - coth_κ^2 + 1/κ^2 + mean_w = coth_κ - 1 / κ + var_w = 1 - coth_κ^2 + 1 / κ^2 - @test isapprox(mean(genw3_res), mean_w, atol=0.01) - @test isapprox(std(genw3_res), sqrt(var_w), atol=0.01/κ) + @test isapprox(mean(genw3_res), mean_w, atol = 0.01) + @test isapprox(std(genw3_res), sqrt(var_w), atol = 0.01 / κ) end -function test_vonmisesfisher(p::Int, κ::Real, n::Int, ns::Int, - rng::Union{AbstractRNG, Missing} = missing) +function test_vonmisesfisher( + p::Int, + κ::Real, + n::Int, + ns::Int, + rng::Union{AbstractRNG,Missing} = missing, +) if ismissing(rng) μ = randn(p) else @@ -88,7 +93,10 @@ function test_vonmisesfisher(p::Int, κ::Real, n::Int, ns::Int, # conversions @test convert(VonMisesFisher{partype(d)}, d) === d - for d32 in (convert(VonMisesFisher{Float32}, d), convert(VonMisesFisher{Float32}, d.μ, d.κ, d.logCκ)) + for d32 in ( + convert(VonMisesFisher{Float32}, d), + convert(VonMisesFisher{Float32}, d.μ, d.κ, d.logCκ), + ) @test d32 isa VonMisesFisher{Float32} @test params(d32) == (map(Float32, μ), Float32(κ)) end @@ -99,12 +107,12 @@ function test_vonmisesfisher(p::Int, κ::Real, n::Int, ns::Int, @test meandir(d2) ≈ μ @test concentration(d2) ≈ κ - @test d.logCκ ≈ logvmfCp(p, κ) atol=1.0e-12 + @test d.logCκ ≈ logvmfCp(p, κ) atol = 1.0e-12 X = gen_vmf_tdata(n, p, rng) lp0 = zeros(n) for i = 1:n - xi = X[:,i] + xi = X[:, i] lp0[i] = safe_logvmfpdf(μ, κ, xi) @test logpdf(d, xi) ≈ lp0[i] end @@ -125,8 +133,8 @@ function test_vonmisesfisher(p::Int, κ::Real, n::Int, ns::Int, X = rand(rng, d, n) end for i = 1:n - @test norm(X[:,i]) ≈ 1.0 - @test insupport(d, X[:,i]) + @test norm(X[:, i]) ≈ 1.0 + @test insupport(d, X[:, i]) end # MLE @@ -137,12 +145,12 @@ function test_vonmisesfisher(p::Int, κ::Real, n::Int, ns::Int, end d_est = fit_mle(VonMisesFisher, X) @test isa(d_est, VonMisesFisher) - @test isapprox(d_est.μ, μ, atol=0.01) - @test isapprox(d_est.κ, κ, atol=κ * 0.01) + @test isapprox(d_est.μ, μ, atol = 0.01) + @test isapprox(d_est.κ, κ, atol = κ * 0.01) d_est = fit(VonMisesFisher{Float64}, X) @test isa(d_est, VonMisesFisher) - @test isapprox(d_est.μ, μ, atol=0.01) - @test isapprox(d_est.κ, κ, atol=κ * 0.01) + @test isapprox(d_est.μ, μ, atol = 0.01) + @test isapprox(d_est.κ, κ, atol = κ * 0.01) end @@ -161,17 +169,20 @@ end n = 1000 ns = 10^6 -@testset "Testing VonMisesFisher with $key" for (key, rng) in - Dict("rand(...)" => missing, - "rand(rng, ...)" => MersenneTwister(123)) - - @testset "Testing VonMisesFisher with $key at ($p, $κ)" for (p, κ) in [(2, 1.0), - (2, 5.0), - (3, 1.0), - (3, 5.0), - (5, 2.0), - (2, 2), - (2, 1000)] # test with large κ +@testset "Testing VonMisesFisher with $key" for (key, rng) in Dict( + "rand(...)" => missing, + "rand(rng, ...)" => MersenneTwister(123), +) + + @testset "Testing VonMisesFisher with $key at ($p, $κ)" for (p, κ) in [ + (2, 1.0), + (2, 5.0), + (3, 1.0), + (3, 5.0), + (5, 2.0), + (2, 2), + (2, 1000), + ] # test with large κ test_vonmisesfisher(p, κ, n, ns, rng) test_vmf_rot(p, rng) end diff --git a/test/multivariate_stats.jl b/test/multivariate_stats.jl index f5bd9d8eb6..92b264dd67 100644 --- a/test/multivariate_stats.jl +++ b/test/multivariate_stats.jl @@ -5,20 +5,21 @@ using Test const n_samples = 5_000_001 mu = [1.0, 2.0, 3.0] -C = [4. -2. -1.; -2. 5. -1.; -1. -1. 6.] +C = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] h = mu J = C for d in [ - Dirichlet(3, 2.0), - Dirichlet([2.0, 1.0, 3.0]), - IsoNormal(mu, 2.0), - DiagNormal(mu, [1.5, 2.0, 2.5]), - MvNormal(mu, C), - IsoNormalCanon(h, 2.0), - DiagNormalCanon(h, [1.5, 2.0, 1.2]), - MvNormalCanon(h, J)] + Dirichlet(3, 2.0), + Dirichlet([2.0, 1.0, 3.0]), + IsoNormal(mu, 2.0), + DiagNormal(mu, [1.5, 2.0, 2.5]), + MvNormal(mu, C), + IsoNormalCanon(h, 2.0), + DiagNormalCanon(h, [1.5, 2.0, 1.2]), + MvNormalCanon(h, J), +] println(d) dmean = mean(d) diff --git a/test/namedtuple/productnamedtuple.jl b/test/namedtuple/productnamedtuple.jl index 24ca59afad..3835c1bf27 100644 --- a/test/namedtuple/productnamedtuple.jl +++ b/test/namedtuple/productnamedtuple.jl @@ -6,7 +6,7 @@ using Test @testset "ProductNamedTupleDistribution" begin @testset "Constructor" begin - nt = (x=Normal(1.0, 2.0), y=Normal(3.0, 4.0)) + nt = (x = Normal(1.0, 2.0), y = Normal(3.0, 4.0)) d = @inferred ProductNamedTupleDistribution(nt) @test d isa ProductNamedTupleDistribution @test d.dists === nt @@ -14,10 +14,10 @@ using Test @test Distributions.value_support(typeof(d)) === Continuous nt = ( - x=Normal(), - y=Dirichlet(10, 1.0), - z=DiscreteUniform(1, 10), - w=LKJCholesky(3, 2.0), + x = Normal(), + y = Dirichlet(10, 1.0), + z = DiscreteUniform(1, 10), + w = LKJCholesky(3, 2.0), ) d = @inferred ProductNamedTupleDistribution(nt) @test d isa ProductNamedTupleDistribution @@ -27,22 +27,22 @@ using Test end @testset "product_distribution" begin - nt = (x=Normal(1.0, 2.0), y=Normal(3.0, 4.0)) + nt = (x = Normal(1.0, 2.0), y = Normal(3.0, 4.0)) d = @inferred product_distribution(nt) @test d === ProductNamedTupleDistribution(nt) nt = ( - x=Normal(), - y=Dirichlet(10, 1.0), - z=DiscreteUniform(1, 10), - w=LKJCholesky(3, 2.0), + x = Normal(), + y = Dirichlet(10, 1.0), + z = DiscreteUniform(1, 10), + w = LKJCholesky(3, 2.0), ) d = @inferred product_distribution(nt) @test d === ProductNamedTupleDistribution(nt) end @testset "show" begin - d = ProductNamedTupleDistribution((x=Gamma(1.0, 2.0), y=Normal())) + d = ProductNamedTupleDistribution((x = Gamma(1.0, 2.0), y = Normal())) @test repr(d) == """ ProductNamedTupleDistribution{(:x, :y)}( x: Gamma{Float64}(α=1.0, θ=2.0) @@ -54,18 +54,18 @@ using Test @testset "Properties" begin @testset "eltype" begin @testset for nt in [ - (x=Normal(1.0, 2.0), y=Normal(3.0, 4.0)), - (x=Normal(), y=Gamma()), - (x=Bernoulli(),), - (x=Normal(), y=Bernoulli()), - (w=LKJCholesky(3, 2.0),), + (x = Normal(1.0, 2.0), y = Normal(3.0, 4.0)), + (x = Normal(), y = Gamma()), + (x = Bernoulli(),), + (x = Normal(), y = Bernoulli()), + (w = LKJCholesky(3, 2.0),), ( - x=Normal(), - y=Dirichlet(10, 1.0), - z=DiscreteUniform(1, 10), - w=LKJCholesky(3, 2.0), + x = Normal(), + y = Dirichlet(10, 1.0), + z = DiscreteUniform(1, 10), + w = LKJCholesky(3, 2.0), ), - (x=product_distribution((x=Normal(), y=Gamma())),), + (x = product_distribution((x = Normal(), y = Gamma())),), ] d = ProductNamedTupleDistribution(nt) @test eltype(d) === eltype(rand(d)) @@ -73,75 +73,83 @@ using Test end @testset "minimum" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=MvNormal(Diagonal(ones(5)))) + nt = (x = Normal(1.0, 2.0), y = Gamma(), z = MvNormal(Diagonal(ones(5)))) d = ProductNamedTupleDistribution(nt) @test @inferred(minimum(d)) == - (x=minimum(nt.x), y=minimum(nt.y), z=minimum(nt.z)) + (x = minimum(nt.x), y = minimum(nt.y), z = minimum(nt.z)) end @testset "maximum" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=MvNormal(Diagonal(ones(5)))) + nt = (x = Normal(1.0, 2.0), y = Gamma(), z = MvNormal(Diagonal(ones(5)))) d = ProductNamedTupleDistribution(nt) @test @inferred(maximum(d)) == - (x=maximum(nt.x), y=maximum(nt.y), z=maximum(nt.z)) + (x = maximum(nt.x), y = maximum(nt.y), z = maximum(nt.z)) end @testset "insupport" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=Dirichlet(5, 1.0)) + nt = (x = Normal(1.0, 2.0), y = Gamma(), z = Dirichlet(5, 1.0)) d = ProductNamedTupleDistribution(nt) - x = (x=rand(nt.x), y=rand(nt.y), z=rand(nt.z)) + x = (x = rand(nt.x), y = rand(nt.y), z = rand(nt.z)) @test @inferred(insupport(d, x)) @test insupport(d, NamedTuple{(:z, :y, :x)}(x)) @test !insupport(d, NamedTuple{(:x, :y)}(x)) - @test !insupport(d, merge(x, (x=NaN,))) - @test !insupport(d, merge(x, (y=-1,))) - @test !insupport(d, merge(x, (z=fill(0.25, 4),))) + @test !insupport(d, merge(x, (x = NaN,))) + @test !insupport(d, merge(x, (y = -1,))) + @test !insupport(d, merge(x, (z = fill(0.25, 4),))) end end @testset "Evaluation" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=Dirichlet(5, 1.0), w=Bernoulli()) + nt = (x = Normal(1.0, 2.0), y = Gamma(), z = Dirichlet(5, 1.0), w = Bernoulli()) d = ProductNamedTupleDistribution(nt) - x = (x=rand(nt.x), y=rand(nt.y), z=rand(nt.z), w=rand(nt.w)) + x = (x = rand(nt.x), y = rand(nt.y), z = rand(nt.z), w = rand(nt.w)) xrev = NamedTuple{(:z, :y, :x, :w)}(x) @test @inferred(logpdf(d, x)) == - logpdf(nt.x, x.x) + logpdf(nt.y, x.y) + logpdf(nt.z, x.z) + logpdf(nt.w, x.w) + logpdf(nt.x, x.x) + logpdf(nt.y, x.y) + logpdf(nt.z, x.z) + logpdf(nt.w, x.w) @test @inferred(logpdf(d, xrev)) == logpdf(d, x) @test @inferred(pdf(d, x)) == exp(logpdf(d, x)) @test @inferred(pdf(d, xrev)) == pdf(d, x) @test @inferred(loglikelihood(d, x)) == logpdf(d, x) @test @inferred(loglikelihood(d, xrev)) == loglikelihood(d, x) - xs = [(x=rand(nt.x), y=rand(nt.y), z=rand(nt.z), w=rand(nt.w)) for _ in 1:10] + xs = [(x = rand(nt.x), y = rand(nt.y), z = rand(nt.z), w = rand(nt.w)) for _ = 1:10] xs_perm = [NamedTuple{(:z, :y, :x, :w)}(x) for x in xs] @test @inferred(loglikelihood(d, xs)) == sum(logpdf.(Ref(d), xs)) @test @inferred(loglikelihood(d, xs_perm)) == loglikelihood(d, xs) end @testset "Statistics" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=MvNormal(Diagonal(abs2.(1.0:5.0))), w=Poisson(100)) + nt = ( + x = Normal(1.0, 2.0), + y = Gamma(), + z = MvNormal(Diagonal(abs2.(1.0:5.0))), + w = Poisson(100), + ) d = ProductNamedTupleDistribution(nt) - @test @inferred(mode(d)) == (x=mode(nt.x), y=mode(nt.y), z=mode(nt.z), w=mode(nt.w)) - @test @inferred(mean(d)) == (x=mean(nt.x), y=mean(nt.y), z=mean(nt.z), w=mean(nt.w)) - @test @inferred(var(d)) == (x=var(nt.x), y=var(nt.y), z=var(nt.z), w=var(nt.w)) + @test @inferred(mode(d)) == + (x = mode(nt.x), y = mode(nt.y), z = mode(nt.z), w = mode(nt.w)) + @test @inferred(mean(d)) == + (x = mean(nt.x), y = mean(nt.y), z = mean(nt.z), w = mean(nt.w)) + @test @inferred(var(d)) == + (x = var(nt.x), y = var(nt.y), z = var(nt.z), w = var(nt.w)) @test @inferred(entropy(d)) == - entropy(nt.x) + entropy(nt.y) + entropy(nt.z) + entropy(nt.w) + entropy(nt.x) + entropy(nt.y) + entropy(nt.z) + entropy(nt.w) - d1 = ProductNamedTupleDistribution((x=Normal(1.0, 2.0), y=Gamma())) - d2 = ProductNamedTupleDistribution((x=Normal(), y=Gamma(2.0, 3.0))) - d2_perm = ProductNamedTupleDistribution((y=Gamma(2.0, 3.0), x=Normal())) - d2_sub = ProductNamedTupleDistribution((x=Normal(1.0, 2.0),)) + d1 = ProductNamedTupleDistribution((x = Normal(1.0, 2.0), y = Gamma())) + d2 = ProductNamedTupleDistribution((x = Normal(), y = Gamma(2.0, 3.0))) + d2_perm = ProductNamedTupleDistribution((y = Gamma(2.0, 3.0), x = Normal())) + d2_sub = ProductNamedTupleDistribution((x = Normal(1.0, 2.0),)) @test kldivergence(d1, d2) == - kldivergence(d1.dists.x, d2.dists.x) + kldivergence(d1.dists.y, d2.dists.y) + kldivergence(d1.dists.x, d2.dists.x) + kldivergence(d1.dists.y, d2.dists.y) @test kldivergence(d1, d2_perm) == kldivergence(d1, d2) @test_throws ArgumentError kldivergence(d1, d2_sub) @test_throws ArgumentError kldivergence(d2_sub, d1) - d3 = ProductNamedTupleDistribution((x=Normal(1.0, 2.0), y=Gamma(6.0, 7.0))) - @test std(d3) == (x=std(d3.dists.x), y=std(d3.dists.y)) + d3 = ProductNamedTupleDistribution((x = Normal(1.0, 2.0), y = Gamma(6.0, 7.0))) + @test std(d3) == (x = std(d3.dists.x), y = std(d3.dists.y)) end @testset "Sampler" begin - nt1 = (x=Normal(1.0, 2.0), y=Gamma(), z=Dirichlet(5, 1.0), w=Bernoulli()) + nt1 = (x = Normal(1.0, 2.0), y = Gamma(), z = Dirichlet(5, 1.0), w = Bernoulli()) d1 = ProductNamedTupleDistribution(nt1) # sampler(::Gamma) is type-unstable spl = @inferred ProductNamedTupleSampler{(:x, :y, :z, :w)} sampler(d1) @@ -150,28 +158,34 @@ using Test x1 = @inferred rand(rng, d1) rng = MersenneTwister(973) x2 = ( - x=rand(rng, nt1.x), y=rand(rng, nt1.y), z=rand(rng, nt1.z), w=rand(rng, nt1.w) + x = rand(rng, nt1.x), + y = rand(rng, nt1.y), + z = rand(rng, nt1.z), + w = rand(rng, nt1.w), ) @test x1 == x2 x3 = rand(rng, d1) @test x3 != x1 # sampler should now be type-stable - nt2 = (x=Normal(1.0, 2.0), z=Dirichlet(5, 1.0), w=Bernoulli()) + nt2 = (x = Normal(1.0, 2.0), z = Dirichlet(5, 1.0), w = Bernoulli()) d2 = ProductNamedTupleDistribution(nt2) @inferred sampler(d2) end @testset "Sampling" begin @testset "rand" begin - nt = (x=Normal(1.0, 2.0), y=Gamma(), z=Dirichlet(5, 1.0), w=Bernoulli()) + nt = (x = Normal(1.0, 2.0), y = Gamma(), z = Dirichlet(5, 1.0), w = Bernoulli()) d = ProductNamedTupleDistribution(nt) rng = MersenneTwister(973) x1 = @inferred rand(rng, d) @test eltype(x1) === eltype(d) rng = MersenneTwister(973) x2 = ( - x=rand(rng, nt.x), y=rand(rng, nt.y), z=rand(rng, nt.z), w=rand(rng, nt.w) + x = rand(rng, nt.x), + y = rand(rng, nt.y), + z = rand(rng, nt.z), + w = rand(rng, nt.w), ) @test x1 == x2 x3 = rand(rng, d) @@ -186,7 +200,7 @@ using Test @test size(xs2) == (2, 3, 4) @test all(insupport.(Ref(d), xs2)) - nt2 = (x=Normal(1.0, 2.0), z=Dirichlet(5, 1.0), w=Bernoulli()) + nt2 = (x = Normal(1.0, 2.0), z = Dirichlet(5, 1.0), w = Bernoulli()) d2 = ProductNamedTupleDistribution(nt2) # now type-inferrable @inferred rand(rng, d2, 10) @@ -195,7 +209,10 @@ using Test @testset "rand!" begin d = ProductNamedTupleDistribution(( - x=Normal(1.0, 2.0), y=Gamma(), z=Dirichlet(5, 1.0), w=Bernoulli() + x = Normal(1.0, 2.0), + y = Gamma(), + z = Dirichlet(5, 1.0), + w = Bernoulli(), )) x = rand(d) xs = Array{typeof(x)}(undef, (2, 3, 4)) diff --git a/test/pdfnorm.jl b/test/pdfnorm.jl index 9b72ab1c5a..24db7449dc 100644 --- a/test/pdfnorm.jl +++ b/test/pdfnorm.jl @@ -5,13 +5,13 @@ using QuadGK # norms of the distributions. These methods aren't very robust because can't # deal with divergent norms, or discrete distributions with infinite support. numeric_norm(d::ContinuousUnivariateDistribution) = - quadgk(x -> pdf(d, x) ^ 2, support(d).lb, support(d).ub)[1] + quadgk(x -> pdf(d, x)^2, support(d).lb, support(d).ub)[1] function numeric_norm(d::DiscreteUnivariateDistribution) # When the distribution has infinite support, sum up to an arbitrary large # value. upper = isfinite(maximum(d)) ? round(Int, maximum(d)) : 100 - return sum(pdf(d, k) ^ 2 for k in round(Int, minimum(d)):upper) + return sum(pdf(d, k)^2 for k = round(Int, minimum(d)):upper) end @testset "pdf L2 norm" begin @@ -37,7 +37,7 @@ end @testset "Categorical" begin for n in (1, 2, 5, 10) - d = Categorical(collect(1 / n for _ in 1:n)) + d = Categorical(collect(1 / n for _ = 1:n)) @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end for d in (Categorical([0.25, 0.75]), Categorical([1 / 6, 1 / 3, 1 / 2])) @@ -50,7 +50,9 @@ end @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end # The norm doesn't depend on the mean - @test pdfsquaredL2norm(Cauchy(100, 1)) == pdfsquaredL2norm(Cauchy(-100, 1)) == pdfsquaredL2norm(Cauchy(0, 1)) + @test pdfsquaredL2norm(Cauchy(100, 1)) == + pdfsquaredL2norm(Cauchy(-100, 1)) == + pdfsquaredL2norm(Cauchy(0, 1)) end @testset "Chi" begin @@ -83,7 +85,13 @@ end end @testset "Geometric" begin - for d in (Geometric(0.20), Geometric(0.25), Geometric(0.50), Geometric(0.75), Geometric(0.80)) + for d in ( + Geometric(0.20), + Geometric(0.25), + Geometric(0.50), + Geometric(0.75), + Geometric(0.80), + ) @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end end @@ -93,7 +101,9 @@ end @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end # The norm doesn't depend on the mean - @test pdfsquaredL2norm(Logistic(100, 1)) == pdfsquaredL2norm(Logistic(-100, 1)) == pdfsquaredL2norm(Logistic(0, 1)) + @test pdfsquaredL2norm(Logistic(100, 1)) == + pdfsquaredL2norm(Logistic(-100, 1)) == + pdfsquaredL2norm(Logistic(0, 1)) end @testset "Normal" begin @@ -102,7 +112,9 @@ end end @test pdfsquaredL2norm(Normal(1, 0)) ≈ Inf # The norm doesn't depend on the mean - @test pdfsquaredL2norm(Normal(100, 1)) == pdfsquaredL2norm(Normal(-100, 1)) == pdfsquaredL2norm(Normal(0, 1)) + @test pdfsquaredL2norm(Normal(100, 1)) == + pdfsquaredL2norm(Normal(-100, 1)) == + pdfsquaredL2norm(Normal(0, 1)) end @testset "Poisson" begin diff --git a/test/product.jl b/test/product.jl index 54f8f9aefb..fb41510041 100644 --- a/test/product.jl +++ b/test/product.jl @@ -49,17 +49,21 @@ end # d_product1 = @inferred(product_distribution(ds1)) # when `Product` is removed d_product1 = @inferred(Distributions.ProductDistribution(ds1)) - @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector,Continuous,Float64} + @test d_product1 isa + Distributions.VectorOfUnivariateDistribution{<:Vector,Continuous,Float64} - d_product2 = @inferred(product_distribution(ntuple(i -> Uniform(0.0, ubound[i]), 11)...)) - @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + d_product2 = + @inferred(product_distribution(ntuple(i -> Uniform(0.0, ubound[i]), 11)...)) + @test d_product2 isa + Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} ds3 = Fill(Uniform(0.0, first(ubound)), N) # Replace with # d_product3 = @inferred(product_distribution(ds3)) # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) - @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill,Continuous,Float64} + @test d_product3 isa + Distributions.VectorOfUnivariateDistribution{<:Fill,Continuous,Float64} # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product2), (ds3, d_product3)) @@ -95,22 +99,36 @@ end for a in ([0, 1], [-0.5, 0.5]) # Construct independent distributions and `ProductDistribution` from these. - ds1 = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N] + ds1 = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ = 1:N] # Replace with # d_product1 = @inferred(product_distribution(ds1)) # when `Product` is removed d_product1 = @inferred(Distributions.ProductDistribution(ds1)) - @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector{<:DiscreteNonParametric},Discrete,eltype(a)} - - d_product2 = @inferred(product_distribution(ntuple(_ -> DiscreteNonParametric(a, [0.5, 0.5]), 11)...)) - @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:NTuple{N,<:DiscreteNonParametric},Discrete,eltype(a)} + @test d_product1 isa Distributions.VectorOfUnivariateDistribution{ + <:Vector{<:DiscreteNonParametric}, + Discrete, + eltype(a), + } + + d_product2 = @inferred( + product_distribution(ntuple(_ -> DiscreteNonParametric(a, [0.5, 0.5]), 11)...) + ) + @test d_product2 isa Distributions.VectorOfUnivariateDistribution{ + <:NTuple{N,<:DiscreteNonParametric}, + Discrete, + eltype(a), + } ds3 = Fill(DiscreteNonParametric(a, [0.5, 0.5]), N) # Replace with # d_product3 = @inferred(product_distribution(ds3)) # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) - @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill{<:DiscreteNonParametric,1},Discrete,eltype(a)} + @test d_product3 isa Distributions.VectorOfUnivariateDistribution{ + <:Fill{<:DiscreteNonParametric,1}, + Discrete, + eltype(a), + } # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product3), (ds3, d_product2)) @@ -146,7 +164,8 @@ end ds = (Bernoulli(0.3), Uniform(0.0, 0.7), Categorical([0.4, 0.2, 0.4])) d_product = @inferred(product_distribution(ds...)) - @test d_product isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + @test d_product isa + Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} ds_vec = vcat(ds...) @@ -182,11 +201,19 @@ end ds1 = Uniform.(0.0, ubound) d_product1 = @inferred(product_distribution(ds1)) - @test d_product1 isa Distributions.MatrixOfUnivariateDistribution{<:Matrix{<:Uniform},Continuous,Float64} + @test d_product1 isa Distributions.MatrixOfUnivariateDistribution{ + <:Matrix{<:Uniform}, + Continuous, + Float64, + } ds2 = Fill(Uniform(0.0, first(ubound)), M, N) d_product2 = @inferred(product_distribution(ds2)) - @test d_product2 isa Distributions.MatrixOfUnivariateDistribution{<:Fill{<:Uniform,2},Continuous,Float64} + @test d_product2 isa Distributions.MatrixOfUnivariateDistribution{ + <:Fill{<:Uniform,2}, + Continuous, + Float64, + } # Check that methods for `MatrixOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @@ -195,7 +222,8 @@ end @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(vec(var.(ds))) - @test @inferred(cov(d_product, Val(false))) == reshape(Diagonal(vec(var.(ds))), M, N, M, N) + @test @inferred(cov(d_product, Val(false))) == + reshape(Diagonal(vec(var.(ds))), M, N, M, N) @test minimum(d_product) == map(minimum, ds) @test maximum(d_product) == map(maximum, ds) @@ -220,36 +248,61 @@ end ds1 = Dirichlet.(alphas) d_product1 = @inferred(product_distribution(ds1)) - @test d_product1 isa Distributions.ProductDistribution{length(N) + 1,1,<:Array{<:Dirichlet{Float64},length(N)},Continuous,Float64} + @test d_product1 isa Distributions.ProductDistribution{ + length(N) + 1, + 1, + <:Array{<:Dirichlet{Float64},length(N)}, + Continuous, + Float64, + } ds2 = Fill(Dirichlet(first(alphas)), N...) d_product2 = @inferred(product_distribution(ds2)) - @test d_product2 isa Distributions.ProductDistribution{length(N) + 1,1,<:Fill{<:Dirichlet{Float64},length(N)},Continuous,Float64} + @test d_product2 isa Distributions.ProductDistribution{ + length(N) + 1, + 1, + <:Fill{<:Dirichlet{Float64},length(N)}, + Continuous, + Float64, + } # Check that methods for `VectorOfMultivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test size(d_product) == (length(ds[1]), size(ds)...) @test eltype(d_product) === eltype(ds[1]) - @test @inferred(mean(d_product)) == reshape(mapreduce(mean, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) - @test @inferred(var(d_product)) == reshape(mapreduce(var, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) + @test @inferred(mean(d_product)) == reshape( + mapreduce(mean, (x, y) -> cat(x, y; dims = ndims(ds) + 1), ds), + size(d_product), + ) + @test @inferred(var(d_product)) == reshape( + mapreduce(var, (x, y) -> cat(x, y; dims = ndims(ds) + 1), ds), + size(d_product), + ) @test @inferred(cov(d_product)) == Diagonal(mapreduce(var, vcat, ds)) if d_product isa MatrixDistribution @test @inferred(cov(d_product, Val(false))) == reshape( - Diagonal(mapreduce(var, vcat, ds)), M, length(ds), M, length(ds) + Diagonal(mapreduce(var, vcat, ds)), + M, + length(ds), + M, + length(ds), ) end x = @inferred(rand(d_product)) @test size(x) == size(d_product) - @test x isa typeof(mapreduce(rand, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds)) + @test x isa + typeof(mapreduce(rand, (x, y) -> cat(x, y; dims = ndims(ds) + 1), ds)) # inference broken for non-Fill arrays y = reshape(x, Val(2)) if ds isa Fill - @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) + @test @inferred(logpdf(d_product, x)) ≈ + sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) else - @test logpdf(d_product, x) ≈ sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) + @test logpdf(d_product, x) ≈ + sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) end # ensure that samples are different, in particular if `Fill` is used @test length(unique(x)) == length(d_product) diff --git a/test/qq.jl b/test/qq.jl index e0d8d922a2..22cffc6a8b 100644 --- a/test/qq.jl +++ b/test/qq.jl @@ -13,20 +13,20 @@ using Test @test minimum(pp) >= 0 @test maximum(pp) <= 1 - a = qqbuild(collect(1:10), Uniform(1,10)) - b = qqbuild(1:10, Uniform(1,10)) - c = qqbuild(view(collect(1:20), 1:10), Uniform(1,10)) + a = qqbuild(collect(1:10), Uniform(1, 10)) + b = qqbuild(1:10, Uniform(1, 10)) + c = qqbuild(view(collect(1:20), 1:10), Uniform(1, 10)) @test length(a.qy) == length(a.qx) == 10 @test a.qx ≈ b.qx ≈ c.qx ≈ a.qy ≈ b.qy ≈ c.qy - a = qqbuild(Uniform(1,10), collect(1:10)) - b = qqbuild(Uniform(1,10), 1:10) - c = qqbuild(Uniform(1,10), view(collect(1:20), 1:10)) + a = qqbuild(Uniform(1, 10), collect(1:10)) + b = qqbuild(Uniform(1, 10), 1:10) + c = qqbuild(Uniform(1, 10), view(collect(1:20), 1:10)) @test length(a.qy) == length(a.qx) == 10 @test a.qx ≈ b.qx ≈ c.qx ≈ a.qy ≈ b.qy ≈ c.qy - for n in 0:3 - a = qqbuild(rand(n), Uniform(0,1)) + for n = 0:3 + a = qqbuild(rand(n), Uniform(0, 1)) @test length(a.qy) == length(a.qx) == n end end diff --git a/test/reshaped.jl b/test/reshaped.jl index 54bf9be8d8..a75560118c 100644 --- a/test/reshaped.jl +++ b/test/reshaped.jl @@ -40,7 +40,8 @@ # support for d in d1s, s in sizes - @test (size(d) == s) ⊻ (length(s) != length(size(d)) || !insupport(d, reshape(x1, s))) + @test (size(d) == s) ⊻ + (length(s) != length(size(d)) || !insupport(d, reshape(x1, s))) end # mean @@ -120,7 +121,8 @@ σ = rand(rng, 16, 16) μ = rand(rng, 16) d1 = MvNormal(μ, σ * σ') - sizes = [(4, 4), (8, 2), (2, 8), (1, 16), (16, 1), (4, 2, 2), (2, 4, 2), (2, 2, 2, 2)] + sizes = + [(4, 4), (8, 2), (2, 8), (1, 16), (16, 1), (4, 2, 2), (2, 4, 2), (2, 2, 2, 2)] test_reshaped(rng, d1, sizes) end @@ -128,8 +130,16 @@ α = rand(rng, 36) .+ 1 # mode is only defined if all alpha > 1 d1 = Dirichlet(α) sizes = [ - (6, 6), (4, 9), (9, 4), (3, 12), (12, 3), (1, 36), (36, 1), (6, 3, 2), - (3, 2, 6), (2, 3, 3, 2), + (6, 6), + (4, 9), + (9, 4), + (3, 12), + (12, 3), + (1, 36), + (36, 1), + (6, 3, 2), + (3, 2, 6), + (2, 3, 3, 2), ] test_reshaped(rng, d1, sizes) end diff --git a/test/runtests.jl b/test/runtests.jl index f63a3d5e8e..4c7dcced20 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -164,7 +164,7 @@ const tests = [ # "univariate/continuous/vonmises", ] -printstyled("Running tests:\n", color=:blue) +printstyled("Running tests:\n", color = :blue) Random.seed!(345679) diff --git a/test/samplers.jl b/test/samplers.jl index 749b45d0d6..bd2ff9928a 100644 --- a/test/samplers.jl +++ b/test/samplers.jl @@ -30,49 +30,76 @@ import Distributions: @testset "Categorical: $S" for S in [CategoricalDirectSampler, AliasTable] @testset "p=$p" for p in Any[[1.0], [0.3, 0.7], [0.2, 0.3, 0.4, 0.1]] test_samples(S(p), Categorical(p), n_tsamples) - test_samples(S(p), Categorical(p), n_tsamples, rng=rng) + test_samples(S(p), Categorical(p), n_tsamples, rng = rng) @test ncategories(S(p)) == length(p) end end - @test string(AliasTable(Float16[1,2,3])) == "AliasTable with 3 entries" + @test string(AliasTable(Float16[1, 2, 3])) == "AliasTable with 3 entries" ## Binomial samplers - binomparams = [(0, 0.4), (0, 0.6), (5, 0.0), (5, 1.0), - (1, 0.2), (1, 0.8), (3, 0.4), (4, 0.6), - (40, 0.5), (100, 0.4), (300, 0.6)] + binomparams = [ + (0, 0.4), + (0, 0.6), + (5, 0.0), + (5, 1.0), + (1, 0.2), + (1, 0.8), + (3, 0.4), + (4, 0.6), + (40, 0.5), + (100, 0.4), + (300, 0.6), + ] @testset "Binomial: $S" for (S, paramlst) in [ - (BinomialGeomSampler, [(0, 0.4), (0, 0.6), (5, 0.0), (5, 1.0), (1, 0.2), (1, 0.8), (3, 0.4), (4, 0.6)]), - (BinomialTPESampler, [(40, 0.5), (100, 0.4), (300, 0.6)]), - (BinomialPolySampler, binomparams), - (BinomialAliasSampler, binomparams) ] + ( + BinomialGeomSampler, + [ + (0, 0.4), + (0, 0.6), + (5, 0.0), + (5, 1.0), + (1, 0.2), + (1, 0.8), + (3, 0.4), + (4, 0.6), + ], + ), + (BinomialTPESampler, [(40, 0.5), (100, 0.4), (300, 0.6)]), + (BinomialPolySampler, binomparams), + (BinomialAliasSampler, binomparams), + ] @testset "pa=$pa" for pa in paramlst n, p = pa test_samples(S(n, p), Binomial(n, p), n_tsamples) - test_samples(S(n, p), Binomial(n, p), n_tsamples, rng=rng) + test_samples(S(n, p), Binomial(n, p), n_tsamples, rng = rng) end end ## Poisson samplers - @testset "Poisson: $S" for - (S, paramlst) in [ - (PoissonCountSampler, [0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 20.0, 30.0]), - (PoissonADSampler, [10.0, 15.0, 20.0, 30.0])] + @testset "Poisson: $S" for (S, paramlst) in [ + (PoissonCountSampler, [0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 20.0, 30.0]), + (PoissonADSampler, [10.0, 15.0, 20.0, 30.0]), + ] @testset "μ=$μ" for μ in paramlst test_samples(S(μ), Poisson(μ), n_tsamples) - test_samples(S(μ), Poisson(μ), n_tsamples, rng=rng) + test_samples(S(μ), Poisson(μ), n_tsamples, rng = rng) end end ## Poisson Binomial sampler @testset "Poisson-Binomial: $S" for S in (PoissBinAliasSampler,) - @testset "p=$p" for p in (fill(0.2, 30), range(0.1, stop = .99, length = 30), [fill(0.1, 10); fill(0.9, 10)]) + @testset "p=$p" for p in ( + fill(0.2, 30), + range(0.1, stop = 0.99, length = 30), + [fill(0.1, 10); fill(0.9, 10)], + ) d = PoissonBinomial(p) test_samples(S(d), d, n_tsamples) - test_samples(S(d), d, n_tsamples, rng=rng) + test_samples(S(d), d, n_tsamples, rng = rng) end end @@ -82,24 +109,30 @@ import Distributions: @testset "Exponential: $S" for S in [ExponentialSampler] @testset "scale=$scale" for scale in [1.0, 2.0, 3.0] test_samples(S(scale), Exponential(scale), n_tsamples) - test_samples(S(scale), Exponential(scale), n_tsamples, rng=rng) + test_samples(S(scale), Exponential(scale), n_tsamples, rng = rng) end end ## Gamma samplers @testset "Gamma (shape >= 1): $S" for S in [GammaGDSampler, GammaMTSampler] - @testset "d=$d" for d in [Gamma(1.0, 1.0), Gamma(2.0, 1.0), Gamma(3.0, 1.0), - Gamma(1.0, 2.0), Gamma(3.0, 2.0), Gamma(100.0, 2.0)] + @testset "d=$d" for d in [ + Gamma(1.0, 1.0), + Gamma(2.0, 1.0), + Gamma(3.0, 1.0), + Gamma(1.0, 2.0), + Gamma(3.0, 2.0), + Gamma(100.0, 2.0), + ] test_samples(S(d), d, n_tsamples) - test_samples(S(d), d, n_tsamples, rng=rng) + test_samples(S(d), d, n_tsamples, rng = rng) end end @testset "Gamma (shape < 1): $S" for S in [GammaGSSampler, GammaIPSampler] - @testset "d=$d" for d in [Gamma(0.1,1.0),Gamma(0.9,1.0)] + @testset "d=$d" for d in [Gamma(0.1, 1.0), Gamma(0.9, 1.0)] test_samples(S(d), d, n_tsamples) - test_samples(S(d), d, n_tsamples, rng=rng) + test_samples(S(d), d, n_tsamples, rng = rng) end end @@ -109,19 +142,19 @@ import Distributions: @test s isa GammaIPSampler{<:GammaMTSampler} @test s.s isa GammaMTSampler test_samples(s, d, n_tsamples) - test_samples(s, d, n_tsamples, rng=rng) + test_samples(s, d, n_tsamples, rng = rng) s = @inferred(GammaIPSampler(d, GammaMTSampler)) @test s isa GammaIPSampler{<:GammaMTSampler} @test s.s isa GammaMTSampler test_samples(s, d, n_tsamples) - test_samples(s, d, n_tsamples, rng=rng) + test_samples(s, d, n_tsamples, rng = rng) s = @inferred(GammaIPSampler(d, GammaGDSampler)) @test s isa GammaIPSampler{<:GammaGDSampler} @test s.s isa GammaGDSampler test_samples(s, d, n_tsamples) - test_samples(s, d, n_tsamples, rng=rng) + test_samples(s, d, n_tsamples, rng = rng) end end diff --git a/test/statsapi.jl b/test/statsapi.jl index 37745c5223..4bc54204e1 100644 --- a/test/statsapi.jl +++ b/test/statsapi.jl @@ -13,14 +13,14 @@ using Test ys = rand(dist, 1_000_000) # Check that empirical frequencies match pvalues of left/right tail approximately - @test pvalue(dist, x; tail=:left) ≈ mean(≤(x), ys) rtol=5e-3 - @test pvalue(dist, x; tail=:right) ≈ mean(≥(x), ys) rtol=5e-3 + @test pvalue(dist, x; tail = :left) ≈ mean(≤(x), ys) rtol = 5e-3 + @test pvalue(dist, x; tail = :right) ≈ mean(≥(x), ys) rtol = 5e-3 # Check consistency of pvalues of both tails - @test pvalue(dist, x; tail=:both) == - min(1, 2 * min(pvalue(dist, x; tail=:left), pvalue(dist, x; tail=:right))) + @test pvalue(dist, x; tail = :both) == + min(1, 2 * min(pvalue(dist, x; tail = :left), pvalue(dist, x; tail = :right))) # Incorrect value for keyword argument - @test_throws ArgumentError("`tail=:l` is invalid") pvalue(dist, x; tail=:l) + @test_throws ArgumentError("`tail=:l` is invalid") pvalue(dist, x; tail = :l) end end diff --git a/test/testutils.jl b/test/testutils.jl index d1e215745a..400aad679b 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -12,7 +12,7 @@ function _linspace(a::Float64, b::Float64, n::Int) intv = (b - a) / (n - 1) r = Vector{Float64}(undef, n) @inbounds for i = 1:n - r[i] = a + (i-1) * intv + r[i] = a + (i - 1) * intv end r[n] = b return r @@ -27,8 +27,12 @@ end # testing the implementation of a discrete univariate distribution # -function test_distr(distr::DiscreteUnivariateDistribution, n::Int; - testquan::Bool=true, rng::AbstractRNG = Random.default_rng()) +function test_distr( + distr::DiscreteUnivariateDistribution, + n::Int; + testquan::Bool = true, + rng::AbstractRNG = Random.default_rng(), +) test_range(distr) vs = get_evalsamples(distr, 0.00001) @@ -40,13 +44,13 @@ function test_distr(distr::DiscreteUnivariateDistribution, n::Int; test_stats(distr, vs) test_samples(distr, n) - test_samples(distr, n; rng=rng) + test_samples(distr, n; rng = rng) test_params(distr) end function test_cgf(dist, ts) κ₀ = cgf(dist, 0) - @test κ₀ ≈ 0 atol=2*eps(one(float(κ₀))) + @test κ₀ ≈ 0 atol = 2 * eps(one(float(κ₀))) d(f) = Base.Fix1(ForwardDiff.derivative, f) κ₁ = d(Base.Fix1(cgf, dist))(0) @test κ₁ ≈ mean(dist) @@ -56,16 +60,20 @@ function test_cgf(dist, ts) val = @inferred cgf(dist, t) @test isfinite(val) if isfinite(mgf(dist, t)) - rtol = eps(float(one(t)))^(1/2) - @test (exp∘cgf)(dist, t) ≈ mgf(dist, t) rtol=rtol + rtol = eps(float(one(t)))^(1 / 2) + @test (exp ∘ cgf)(dist, t) ≈ mgf(dist, t) rtol = rtol end end end # testing the implementation of a continuous univariate distribution # -function test_distr(distr::ContinuousUnivariateDistribution, n::Int; - testquan::Bool=true, rng::AbstractRNG=MersenneTwister(123)) +function test_distr( + distr::ContinuousUnivariateDistribution, + n::Int; + testquan::Bool = true, + rng::AbstractRNG = MersenneTwister(123), +) test_range(distr) vs = get_evalsamples(distr, 0.01, 2000) @@ -78,7 +86,7 @@ function test_distr(distr::ContinuousUnivariateDistribution, n::Int; end xs = test_samples(distr, n) allow_test_stats(distr) && test_stats(distr, xs) - xs = test_samples(distr, n, rng=rng) + xs = test_samples(distr, n, rng = rng) allow_test_stats(distr) && test_stats(distr, xs) test_params(distr) end @@ -94,12 +102,14 @@ end # for discrete samplers # -function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable instance - distr::DiscreteUnivariateDistribution, # corresponding distribution - n::Int; # number of samples to generate - q::Float64=1.0e-7, # confidence interval, 1 - q as confidence - verbose::Bool=false, # show intermediate info (for debugging) - rng::Union{AbstractRNG, Missing}=missing) # add an rng? +function test_samples( + s::Sampleable{Univariate,Discrete}, # the sampleable instance + distr::DiscreteUnivariateDistribution, # corresponding distribution + n::Int; # number of samples to generate + q::Float64 = 1.0e-7, # confidence interval, 1 - q as confidence + verbose::Bool = false, # show intermediate info (for debugging) + rng::Union{AbstractRNG,Missing} = missing, +) # add an rng? # The basic idea # ------------------ @@ -123,8 +133,8 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable vmin = minimum(distr) vmax = maximum(distr) - rmin = floor(Int,quantile(distr, 0.00001))::Int - rmax = floor(Int,quantile(distr, 0.99999))::Int + rmin = floor(Int, quantile(distr, 0.00001))::Int + rmax = floor(Int, quantile(distr, 0.99999))::Int m = rmax - rmin + 1 # length of the range p0 = map(Base.Fix1(pdf, distr), rmin:rmax) # reference probability masses @assert length(p0) == m @@ -136,8 +146,8 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable cub = Vector{Int}(undef, m) for i = 1:m bp = Binomial(n, p0[i]) - clb[i] = floor(Int,quantile(bp, q/2)) - cub[i] = ceil(Int,cquantile(bp, q/2)) + clb[i] = floor(Int, quantile(bp, q / 2)) + cub[i] = ceil(Int, cquantile(bp, q / 2)) @assert cub[i] >= clb[i] end @@ -149,9 +159,9 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable Random.seed!(1234) samples2 = rand(s, n) Random.seed!(1234) - samples3 = [rand(s) for _ in 1:n] + samples3 = [rand(s) for _ = 1:n] Random.seed!(1234) - samples4 = [rand(s) for _ in 1:n] + samples4 = [rand(s) for _ = 1:n] else # RNGs have to be copied with `copy`, not `deepcopy` # Ref https://github.com/JuliaLang/julia/issues/42899 @@ -160,8 +170,8 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) - samples3 = [rand(rng3, s) for _ in 1:n] - samples4 = [rand(rng4, s) for _ in 1:n] + samples3 = [rand(rng3, s) for _ = 1:n] + samples4 = [rand(rng4, s) for _ = 1:n] end @test length(samples) == n @test samples2 == samples @@ -173,47 +183,63 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable for i = 1:n @inbounds si = samples[i] if rmin <= si <= rmax - cnts[si - rmin + 1] += 1 + cnts[si-rmin+1] += 1 else - vmin <= si <= vmax || - throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) + vmin <= si <= vmax || throw( + DomainError( + si, + "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].", + ), + ) end @inbounds si_sc = samples3[i] if rmin <= si_sc <= rmax - cnts_sc[si_sc - rmin + 1] += 1 + cnts_sc[si_sc-rmin+1] += 1 else - vmin <= si_sc <= vmax || - throw(DomainError(si, "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].")) + vmin <= si_sc <= vmax || throw( + DomainError( + si, + "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].", + ), + ) end end # check the counts for i = 1:m verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts[i])") - clb[i] <= cnts[i] <= cub[i] || - error("The counts of samples generated by `rand(s, n)` are out of the confidence interval.") + clb[i] <= cnts[i] <= cub[i] || error( + "The counts of samples generated by `rand(s, n)` are out of the confidence interval.", + ) verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts_sc[i])") - clb[i] <= cnts_sc[i] <= cub[i] || - error("The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.") + clb[i] <= cnts_sc[i] <= cub[i] || error( + "The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.", + ) end return samples end -test_samples(distr::DiscreteUnivariateDistribution, n::Int; - q::Float64=1.0e-6, verbose::Bool=false, rng=missing) = - test_samples(distr, distr, n; q=q, verbose=verbose, rng=rng) +test_samples( + distr::DiscreteUnivariateDistribution, + n::Int; + q::Float64 = 1.0e-6, + verbose::Bool = false, + rng = missing, +) = test_samples(distr, distr, n; q = q, verbose = verbose, rng = rng) # for continuous samplers # -function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable instance - distr::ContinuousUnivariateDistribution, # corresponding distribution - n::Int; # number of samples to generate - nbins::Int=50, # divide the main interval into nbins - q::Float64=1.0e-6, # confidence interval, 1 - q as confidence - verbose::Bool=false, # show intermediate info (for debugging) - rng::Union{AbstractRNG, Missing}=missing) # add an rng? +function test_samples( + s::Sampleable{Univariate,Continuous}, # the sampleable instance + distr::ContinuousUnivariateDistribution, # corresponding distribution + n::Int; # number of samples to generate + nbins::Int = 50, # divide the main interval into nbins + q::Float64 = 1.0e-6, # confidence interval, 1 - q as confidence + verbose::Bool = false, # show intermediate info (for debugging) + rng::Union{AbstractRNG,Missing} = missing, +) # add an rng? # The basic idea # ------------------ @@ -260,8 +286,8 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable for i = 1:nbins pi = cdfs[i+1] - cdfs[i] bp = Binomial(n, pi) - clb[i] = floor(Int,quantile(bp, q/2)) - cub[i] = ceil(Int,cquantile(bp, q/2)) + clb[i] = floor(Int, quantile(bp, q / 2)) + cub[i] = ceil(Int, cquantile(bp, q / 2)) @assert cub[i] >= clb[i] end @@ -273,9 +299,9 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable Random.seed!(1234) samples2 = rand(s, n) Random.seed!(1234) - samples3 = [rand(s) for _ in 1:n] + samples3 = [rand(s) for _ = 1:n] Random.seed!(1234) - samples4 = [rand(s) for _ in 1:n] + samples4 = [rand(s) for _ = 1:n] else # RNGs have to be copied with `copy`, not `deepcopy` # Ref https://github.com/JuliaLang/julia/issues/42899 @@ -284,8 +310,8 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) - samples3 = [rand(rng3, s) for _ in 1:n] - samples4 = [rand(rng4, s) for _ in 1:n] + samples3 = [rand(rng3, s) for _ = 1:n] + samples4 = [rand(rng4, s) for _ = 1:n] end @test length(samples) == n @test samples2 == samples @@ -298,36 +324,66 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable # check whether all samples are in the valid range for i = 1:n @inbounds si = samples[i] - vmin <= si <= vmax || - throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) + vmin <= si <= vmax || throw( + DomainError( + si, + "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].", + ), + ) @inbounds si_sc = samples3[i] - vmin <= si_sc <= vmax || - throw(DomainError(si, "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].")) + vmin <= si_sc <= vmax || throw( + DomainError( + si, + "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].", + ), + ) end # get counts - cnts = fit(Histogram, samples, edges; closed=:right).weights + cnts = fit(Histogram, samples, edges; closed = :right).weights @assert length(cnts) == nbins - cnts_sc = fit(Histogram, samples3, edges; closed=:right).weights + cnts_sc = fit(Histogram, samples3, edges; closed = :right).weights @assert length(cnts_sc) == nbins # check the counts for i = 1:nbins if verbose - @printf("[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], edges[i+1], clb[i], cub[i], cnts[i]) - @printf("[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], edges[i+1], clb[i], cub[i], cnts_sc[i]) + @printf( + "[%.4f, %.4f) ==> (%d, %d): %d\n", + edges[i], + edges[i+1], + clb[i], + cub[i], + cnts[i] + ) + @printf( + "[%.4f, %.4f) ==> (%d, %d): %d\n", + edges[i], + edges[i+1], + clb[i], + cub[i], + cnts_sc[i] + ) end - clb[i] <= cnts[i] <= cub[i] || - error("The counts of samples generated by `rand(s, n)` are out of the confidence interval.") - clb[i] <= cnts_sc[i] <= cub[i] || - error("The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.") + clb[i] <= cnts[i] <= cub[i] || error( + "The counts of samples generated by `rand(s, n)` are out of the confidence interval.", + ) + clb[i] <= cnts_sc[i] <= cub[i] || error( + "The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.", + ) end return samples end -test_samples(distr::ContinuousUnivariateDistribution, n::Int; nbins::Int=50, q::Float64=1.0e-6, verbose::Bool=false, rng=missing) = - test_samples(distr, distr, n; nbins=nbins, q=q, verbose=verbose, rng=rng) +test_samples( + distr::ContinuousUnivariateDistribution, + n::Int; + nbins::Int = 50, + q::Float64 = 1.0e-6, + verbose::Bool = false, + rng = missing, +) = test_samples(distr, distr, n; nbins = nbins, q = q, verbose = verbose, rng = rng) #### Testing range & support methods @@ -349,8 +405,8 @@ function get_evalsamples(d::DiscreteUnivariateDistribution, q::Float64) # samples for testing evaluation functions (even spacing) T = eltype(typeof(d)) - lv = (islowerbounded(d) ? minimum(d) : floor(T,quantile(d, q/2)))::T - hv = (isupperbounded(d) ? maximum(d) : ceil(T,cquantile(d, q/2)))::T + lv = (islowerbounded(d) ? minimum(d) : floor(T, quantile(d, q / 2)))::T + hv = (isupperbounded(d) ? maximum(d) : ceil(T, cquantile(d, q / 2)))::T @assert lv <= hv return lv:hv end @@ -358,8 +414,8 @@ end function get_evalsamples(d::ContinuousUnivariateDistribution, q::Float64, n::Int) # samples for testing evaluation functions (even spacing) - lv = quantile(d, q/2) - hv = cquantile(d, q/2) + lv = quantile(d, q / 2) + hv = cquantile(d, q / 2) @assert isfinite(lv) && isfinite(hv) && lv <= hv return _linspace(lv, hv, n) end @@ -373,12 +429,12 @@ function test_support(d::UnivariateDistribution, vs::AbstractVector) if islowerbounded(d) @test isfinite(minimum(d)) @test insupport(d, minimum(d)) - @test !insupport(d, minimum(d)-1) + @test !insupport(d, minimum(d) - 1) end if isupperbounded(d) @test isfinite(maximum(d)) @test insupport(d, maximum(d)) - @test !insupport(d, maximum(d)+1) + @test !insupport(d, maximum(d) + 1) end @test isbounded(d) == (isupperbounded(d) && islowerbounded(d)) @@ -444,15 +500,19 @@ function test_range_evaluation(d::DiscreteUnivariateDistribution) end -function test_evaluation(d::DiscreteUnivariateDistribution, vs::AbstractVector, testquan::Bool=true) - nv = length(vs) - p = Vector{Float64}(undef, nv) - c = Vector{Float64}(undef, nv) - cc = Vector{Float64}(undef, nv) - lp = Vector{Float64}(undef, nv) - lc = Vector{Float64}(undef, nv) +function test_evaluation( + d::DiscreteUnivariateDistribution, + vs::AbstractVector, + testquan::Bool = true, +) + nv = length(vs) + p = Vector{Float64}(undef, nv) + c = Vector{Float64}(undef, nv) + cc = Vector{Float64}(undef, nv) + lp = Vector{Float64}(undef, nv) + lc = Vector{Float64}(undef, nv) lcc = Vector{Float64}(undef, nv) - ci = 0. + ci = 0.0 for (i, v) in enumerate(vs) p[i] = pdf(d, v) @@ -467,10 +527,10 @@ function test_evaluation(d::DiscreteUnivariateDistribution, vs::AbstractVector, ci += p[i] @test ci ≈ c[i] - @test isapprox(c[i] + cc[i], 1.0 , atol=1.0e-12) - @test isapprox(lp[i] , log(p[i]) , atol=1.0e-12) - @test isapprox(lc[i] , log(c[i]) , atol=1.0e-12) - @test isapprox(lcc[i] , log(cc[i]), atol=1.0e-12) + @test isapprox(c[i] + cc[i], 1.0, atol = 1.0e-12) + @test isapprox(lp[i], log(p[i]), atol = 1.0e-12) + @test isapprox(lc[i], log(c[i]), atol = 1.0e-12) + @test isapprox(lcc[i], log(cc[i]), atol = 1.0e-12) if testquan ep = 1.0e-8 @@ -486,23 +546,27 @@ function test_evaluation(d::DiscreteUnivariateDistribution, vs::AbstractVector, end # consistency of scalar-based and vectorized evaluation - @test Base.Fix1(pdf, d).(vs) ≈ p - @test Base.Fix1(cdf, d).(vs) ≈ c + @test Base.Fix1(pdf, d).(vs) ≈ p + @test Base.Fix1(cdf, d).(vs) ≈ c @test Base.Fix1(ccdf, d).(vs) ≈ cc - @test Base.Fix1(logpdf, d).(vs) ≈ lp - @test Base.Fix1(logcdf, d).(vs) ≈ lc + @test Base.Fix1(logpdf, d).(vs) ≈ lp + @test Base.Fix1(logcdf, d).(vs) ≈ lc @test Base.Fix1(logccdf, d).(vs) ≈ lcc end -function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector, testquan::Bool=true) - nv = length(vs) - p = Vector{Float64}(undef, nv) - c = Vector{Float64}(undef, nv) - cc = Vector{Float64}(undef, nv) - lp = Vector{Float64}(undef, nv) - lc = Vector{Float64}(undef, nv) +function test_evaluation( + d::ContinuousUnivariateDistribution, + vs::AbstractVector, + testquan::Bool = true, +) + nv = length(vs) + p = Vector{Float64}(undef, nv) + c = Vector{Float64}(undef, nv) + cc = Vector{Float64}(undef, nv) + lp = Vector{Float64}(undef, nv) + lc = Vector{Float64}(undef, nv) lcc = Vector{Float64}(undef, nv) for (i, v) in enumerate(vs) @@ -519,12 +583,12 @@ function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector @assert (i == 1 || c[i] >= c[i-1]) - @test isapprox(c[i] + cc[i], 1.0 , atol=1.0e-12) + @test isapprox(c[i] + cc[i], 1.0, atol = 1.0e-12) if !isa(d, StudentizedRange) - @test isapprox(lp[i] , log(p[i]) , atol=1.0e-12) + @test isapprox(lp[i], log(p[i]), atol = 1.0e-12) end - @test isapprox(lc[i] , log(c[i]) , atol=1.0e-12) - @test isapprox(lcc[i] , log(cc[i]), atol=1.0e-12) + @test isapprox(lc[i], log(c[i]), atol = 1.0e-12) + @test isapprox(lcc[i], log(cc[i]), atol = 1.0e-12) if testquan # TODO: remove this line when we have more accurate implementation @@ -532,10 +596,10 @@ function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector qtol = isa(d, InverseGaussian) ? 1.0e-4 : 1.0e-10 qtol = isa(d, StudentizedRange) ? 1.0e-5 : qtol if p[i] > 1.0e-6 - @test isapprox(quantile(d, c[i]) , v, atol=qtol * (abs(v) + 1.0)) - @test isapprox(cquantile(d, cc[i]) , v, atol=qtol * (abs(v) + 1.0)) - @test isapprox(invlogcdf(d, lc[i]) , v, atol=qtol * (abs(v) + 1.0)) - @test isapprox(invlogccdf(d, lcc[i]), v, atol=qtol * (abs(v) + 1.0)) + @test isapprox(quantile(d, c[i]), v, atol = qtol * (abs(v) + 1.0)) + @test isapprox(cquantile(d, cc[i]), v, atol = qtol * (abs(v) + 1.0)) + @test isapprox(invlogcdf(d, lc[i]), v, atol = qtol * (abs(v) + 1.0)) + @test isapprox(invlogccdf(d, lcc[i]), v, atol = qtol * (abs(v) + 1.0)) end end end @@ -546,7 +610,7 @@ function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector if p[i] > 1.0e-6 v = vs[i] ap = (cdf(d, v + 1.0e-6) - cdf(d, v - 1.0e-6)) / (2.0e-6) - @test isapprox(p[i], ap, atol=p[i] * 1.0e-3) + @test isapprox(p[i], ap, atol = p[i] * 1.0e-3) end end end @@ -557,10 +621,10 @@ function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector @test Base.Fix1(logpdf, d).(vs) ≈ lp end - @test Base.Fix1(cdf, d).(vs) ≈ c + @test Base.Fix1(cdf, d).(vs) ≈ c @test Base.Fix1(ccdf, d).(vs) ≈ cc - @test Base.Fix1(logcdf, d).(vs) ≈ lc + @test Base.Fix1(logcdf, d).(vs) ≈ lc @test Base.Fix1(logccdf, d).(vs) ≈ lcc end @@ -597,27 +661,27 @@ function test_stats(d::DiscreteUnivariateDistribution, vs::AbstractVector) xvar = dot(p, abs2.(vf .- xmean)) xstd = sqrt(xvar) xentropy = entropy(p) - xskew = dot(p, (vf .- xmean).^3) / (xstd.^3) - xkurt = dot(p, (vf .- xmean).^4) / (xvar.^2) - 3.0 + xskew = dot(p, (vf .- xmean) .^ 3) / (xstd .^ 3) + xkurt = dot(p, (vf .- xmean) .^ 4) / (xvar .^ 2) - 3.0 if isbounded(d) - @test isapprox(mean(d), xmean, atol=1.0e-8) - @test isapprox(var(d) , xvar , atol=1.0e-8) - @test isapprox(std(d) , xstd , atol=1.0e-8) + @test isapprox(mean(d), xmean, atol = 1.0e-8) + @test isapprox(var(d), xvar, atol = 1.0e-8) + @test isapprox(std(d), xstd, atol = 1.0e-8) if applicable(skewness, d) && isfinite(skewness(d)) - @test isapprox(skewness(d), xskew , atol=1.0e-8) + @test isapprox(skewness(d), xskew, atol = 1.0e-8) end if applicable(kurtosis, d) && isfinite(kurtosis(d)) - @test isapprox(kurtosis(d), xkurt , atol=1.0e-8) + @test isapprox(kurtosis(d), xkurt, atol = 1.0e-8) end if applicable(entropy, d) - @test isapprox(entropy(d), xentropy, atol=1.0e-8) + @test isapprox(entropy(d), xentropy, atol = 1.0e-8) end else - @test isapprox(mean(d), xmean, atol=1.0e-3 * (abs(xmean) + 1.0)) - @test isapprox(var(d) , xvar , atol=0.01 * xvar) - @test isapprox(std(d) , xstd , atol=0.01 * xstd) + @test isapprox(mean(d), xmean, atol = 1.0e-3 * (abs(xmean) + 1.0)) + @test isapprox(var(d), xvar, atol = 0.01 * xvar) + @test isapprox(std(d), xstd, atol = 0.01 * xstd) end end @@ -646,7 +710,7 @@ function test_stats(d::ContinuousUnivariateDistribution, xs::AbstractVector{Floa # For a normal distribution, it is extremely rare for a sample to deviate more # than 5 * std.dev, (chance < 6.0e-7) mean_tol = 5.0 * (sqrt(vd / n)) - @test isapprox(mean(d), xmean, atol=mean_tol) + @test isapprox(mean(d), xmean, atol = mean_tol) # test variance if applicable(kurtosis, d) @@ -659,7 +723,7 @@ function test_stats(d::ContinuousUnivariateDistribution, xs::AbstractVector{Floa # where k is the excessive kurtosis # if isfinite(kd) && kd > -2.0 - @test isapprox(var(d), xvar, atol=5.0 * vd * (kd + 2) / sqrt(n)) + @test isapprox(var(d), xvar, atol = 5.0 * vd * (kd + 2) / sqrt(n)) end end end @@ -699,7 +763,7 @@ function pvalue_kolmogorovsmirnoff(x::AbstractVector, d::UnivariateDistribution) # compute maximum absolute deviation from the empirical cdf n = length(x) cdfs = sort!(map(Base.Fix1(cdf, d), x)) - dmax = maximum(zip(cdfs, (0:(n-1))/n, (1:n)/n)) do (cdf, lower, upper) + dmax = maximum(zip(cdfs, (0:(n-1)) / n, (1:n) / n)) do (cdf, lower, upper) return max(cdf - lower, upper - cdf) end diff --git a/test/truncate.jl b/test/truncate.jl index 9c2a286d09..1303870357 100644 --- a/test/truncate.jl +++ b/test/truncate.jl @@ -9,7 +9,7 @@ import JSON using Test using ..Main: fdm -function verify_and_test_drive(jsonfile, selected, n_tsamples::Int,lower::Int,upper::Int) +function verify_and_test_drive(jsonfile, selected, n_tsamples::Int, lower::Int, upper::Int) R = JSON.parsefile(jsonfile) for dct in R ex = dct["expr"] @@ -38,7 +38,7 @@ function verify_and_test_drive(jsonfile, selected, n_tsamples::Int,lower::Int,up end println(" testing truncated($(ex),$lower,$upper)") - d = truncated(eval(Meta.parse(ex)),lower,upper) + d = truncated(eval(Meta.parse(ex)), lower, upper) if dtype != Uniform && dtype != DiscreteUniform # Uniform is truncated to Uniform @assert isa(dtype, Type) && dtype <: UnivariateDistribution @test isa(d, dtypet) @@ -56,14 +56,12 @@ _json_value(x::Number) = x _json_value(x::AbstractString) = x == "inf" ? Inf : - x == "-inf" ? -Inf : - x == "nan" ? NaN : - error("Invalid numerical value: $x") + x == "-inf" ? -Inf : x == "nan" ? NaN : error("Invalid numerical value: $x") function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int) # verify stats - @test minimum(d) ≈ max(_json_value(dct["minimum"]),d.lower) - @test maximum(d) ≈ min(_json_value(dct["maximum"]),d.upper) + @test minimum(d) ≈ max(_json_value(dct["minimum"]), d.lower) + @test maximum(d) ≈ min(_json_value(dct["maximum"]), d.upper) @test extrema(d) == (minimum(d), maximum(d)) @test d == deepcopy(d) @@ -72,18 +70,49 @@ function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int) for pt in pts x = _parse_x(d, pt["x"]) lp = d.lower <= x <= d.upper ? Float64(pt["logpdf"]) - d.logtp : -Inf - cf = x < d.lower ? 0.0 : x >= d.upper ? 1.0 : (Float64(pt["cdf"]) - d.lcdf)/d.tp - if !isa(d, Distributions.Truncated{Distributions.StudentizedRange{Float64},Distributions.Continuous}) - @test logpdf(d, x) ≈ lp atol=sqrt(eps()) + cf = x < d.lower ? 0.0 : x >= d.upper ? 1.0 : (Float64(pt["cdf"]) - d.lcdf) / d.tp + if !isa( + d, + Distributions.Truncated{ + Distributions.StudentizedRange{Float64}, + Distributions.Continuous, + }, + ) + @test logpdf(d, x) ≈ lp atol = sqrt(eps()) end - @test cdf(d, x) ≈ cf atol=sqrt(eps()) + @test cdf(d, x) ≈ cf atol = sqrt(eps()) # NOTE: some distributions use pdf() in StatsFuns.jl which have no generic support yet - if !any(T -> d isa T, [Distributions.Truncated{Distributions.NoncentralChisq{Float64},Distributions.Continuous, Float64}, - Distributions.Truncated{Distributions.NoncentralF{Float64},Distributions.Continuous, Float64}, - Distributions.Truncated{Distributions.NoncentralT{Float64},Distributions.Continuous, Float64}, - Distributions.Truncated{Distributions.StudentizedRange{Float64},Distributions.Continuous, Float64}, - Distributions.Truncated{Distributions.Rician{Float64},Distributions.Continuous, Float64}]) - @test isapprox(logpdf(d, Dual(float(x))), lp, atol=sqrt(eps())) + if !any( + T -> d isa T, + [ + Distributions.Truncated{ + Distributions.NoncentralChisq{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.NoncentralF{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.NoncentralT{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.StudentizedRange{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.Rician{Float64}, + Distributions.Continuous, + Float64, + }, + ], + ) + @test isapprox(logpdf(d, Dual(float(x))), lp, atol = sqrt(eps())) end # NOTE: this test is disabled as StatsFuns.jl doesn't have generic support for cdf() # @test isapprox(cdf(d, Dual(x)) , cf, atol=sqrt(eps())) @@ -96,22 +125,22 @@ function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int) @test isapprox(quantile(d, Float64(0.7)), quantile(d, Float32(0.7))) try - m = mgf(d,0.0) + m = mgf(d, 0.0) @test m == 1.0 - m = mgf(d,Dual(0.0)) + m = mgf(d, Dual(0.0)) @test m == 1.0 catch e isa(e, MethodError) || throw(e) end try - c = cf(d,0.0) + c = cf(d, 0.0) @test c == 1.0 - c = cf(d,Dual(0.0)) + c = cf(d, Dual(0.0)) @test c == 1.0 # test some extra values: should all be well-defined - for t in (0.1,-0.1,1.0,-1.0) - @test !isnan(cf(d,t)) - @test !isnan(cf(d,Dual(t))) + for t in (0.1, -0.1, 1.0, -1.0) + @test !isnan(cf(d, t)) + @test !isnan(cf(d, Dual(t))) end catch e isa(e, MethodError) || throw(e) @@ -129,35 +158,34 @@ for (μ, lower, upper) in [(0, -1, 1), (1, 2, 4)] @test d.untruncated === Normal(μ, 1) @test d.lower == lower @test d.upper == upper - @test truncated(Normal(μ, 1); lower=lower, upper=upper) === d + @test truncated(Normal(μ, 1); lower = lower, upper = upper) === d end for bound in (-2, 1) d = @test_deprecated Distributions.Truncated(Normal(), Float64(bound), Inf) - @test truncated(Normal(); lower=bound, upper=Inf) == d + @test truncated(Normal(); lower = bound, upper = Inf) == d - d_nothing = truncated(Normal(); lower=bound) - @test truncated(Normal(); lower=bound, upper=nothing) == d_nothing + d_nothing = truncated(Normal(); lower = bound) + @test truncated(Normal(); lower = bound, upper = nothing) == d_nothing @test extrema(d_nothing) == promote(bound, Inf) d = @test_deprecated Distributions.Truncated(Normal(), -Inf, Float64(bound)) - @test truncated(Normal(); lower=-Inf, upper=bound) == d + @test truncated(Normal(); lower = -Inf, upper = bound) == d - d_nothing = truncated(Normal(); upper=bound) - @test truncated(Normal(); lower=nothing, upper=bound) == d_nothing + d_nothing = truncated(Normal(); upper = bound) + @test truncated(Normal(); lower = nothing, upper = bound) == d_nothing @test extrema(d_nothing) == promote(-Inf, bound) end @test truncated(Normal()) === Normal() ## main -for c in ["discrete", - "continuous"] +for c in ["discrete", "continuous"] title = string(uppercase(c[1]), c[2:end]) println(" [$title]") println(" ------------") jsonfile = joinpath(@__DIR__, "ref", "$(c)_test.ref.json") - verify_and_test_drive(jsonfile, ARGS, 10^6,3,5) + verify_and_test_drive(jsonfile, ARGS, 10^6, 3, 5) println() end @@ -165,58 +193,61 @@ end f = x -> logpdf(truncated(Normal(x[1], x[2]), x[3], x[4]), mean(x)) at = [0.0, 1.0, 0.0, 1.0] -@test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol=1e-6) +@test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1e-6) - @testset "errors" begin - @test_throws ErrorException truncated(Normal(), 1, 0) - @test_throws ArgumentError truncated(Uniform(), 1, 2) - @test_throws ErrorException truncated(Exponential(), 3, 1) - end +@testset "errors" begin + @test_throws ErrorException truncated(Normal(), 1, 0) + @test_throws ArgumentError truncated(Uniform(), 1, 2) + @test_throws ErrorException truncated(Exponential(), 3, 1) +end - @testset "#1328" begin - dist = Poisson(2.0) - dist_zeroinflated = MixtureModel([Dirac(0.0), dist], [0.4, 0.6]) - dist_zerotruncated = truncated(dist; lower=1) - dist_zeromodified = MixtureModel([Dirac(0.0), dist_zerotruncated], [0.4, 0.6]) +@testset "#1328" begin + dist = Poisson(2.0) + dist_zeroinflated = MixtureModel([Dirac(0.0), dist], [0.4, 0.6]) + dist_zerotruncated = truncated(dist; lower = 1) + dist_zeromodified = MixtureModel([Dirac(0.0), dist_zerotruncated], [0.4, 0.6]) - @test logsumexp(logpdf(dist, x) for x in 0:1000) ≈ 0 atol=1e-15 - @test logsumexp(logpdf(dist_zeroinflated, x) for x in 0:1000) ≈ 0 atol=1e-15 - @test logsumexp(logpdf(dist_zerotruncated, x) for x in 0:1000) ≈ 0 atol=1e-15 - @test logsumexp(logpdf(dist_zeromodified, x) for x in 0:1000) ≈ 0 atol=1e-15 - end + @test logsumexp(logpdf(dist, x) for x = 0:1000) ≈ 0 atol = 1e-15 + @test logsumexp(logpdf(dist_zeroinflated, x) for x = 0:1000) ≈ 0 atol = 1e-15 + @test logsumexp(logpdf(dist_zerotruncated, x) for x = 0:1000) ≈ 0 atol = 1e-15 + @test logsumexp(logpdf(dist_zeromodified, x) for x = 0:1000) ≈ 0 atol = 1e-15 +end end @testset "show" begin - @test sprint(show, "text/plain", truncated(Normal(); lower=2.0)) == "Truncated($(Normal()); lower=2.0)" - @test sprint(show, "text/plain", truncated(Normal(); upper=3.0)) == "Truncated($(Normal()); upper=3.0)" - @test sprint(show, "text/plain", truncated(Normal(), 2.0, 3.0)) == "Truncated($(Normal()); lower=2.0, upper=3.0)" + @test sprint(show, "text/plain", truncated(Normal(); lower = 2.0)) == + "Truncated($(Normal()); lower=2.0)" + @test sprint(show, "text/plain", truncated(Normal(); upper = 3.0)) == + "Truncated($(Normal()); upper=3.0)" + @test sprint(show, "text/plain", truncated(Normal(), 2.0, 3.0)) == + "Truncated($(Normal()); lower=2.0, upper=3.0)" end @testset "sampling with small mass (#1548)" begin - d = truncated(Beta(10, 100); lower=0.5) + d = truncated(Beta(10, 100); lower = 0.5) x = rand(d, 10_000) - @test mean(x) ≈ 0.5 atol=0.05 + @test mean(x) ≈ 0.5 atol = 0.05 end @testset "quantile examples (#1726)" begin - d_narrow = truncated(Normal(0, 0.01), -1, 1.2) + d_narrow = truncated(Normal(0, 0.01), -1, 1.2) - # prior to patch #1727 this was offering either ±Inf - @test quantile(d_narrow, 1) ≤ d_narrow.upper && quantile(d_narrow, 1) ≈ d_narrow.upper - @test quantile(d_narrow, 0) ≥ d_narrow.lower && quantile(d_narrow, 0) ≈ d_narrow.lower + # prior to patch #1727 this was offering either ±Inf + @test quantile(d_narrow, 1) ≤ d_narrow.upper && quantile(d_narrow, 1) ≈ d_narrow.upper + @test quantile(d_narrow, 0) ≥ d_narrow.lower && quantile(d_narrow, 0) ≈ d_narrow.lower - @test isa(quantile(d_narrow, ForwardDiff.Dual(0., 0.)), ForwardDiff.Dual) + @test isa(quantile(d_narrow, ForwardDiff.Dual(0.0, 0.0)), ForwardDiff.Dual) - d = truncated(Normal(0, 2), 0, 1) + d = truncated(Normal(0, 2), 0, 1) - # prior to patch #1727 this was offering 1.0000000000000002 > 1. - @test quantile(d, 1) ≤ d.upper && quantile(d, 1) ≈ d.upper + # prior to patch #1727 this was offering 1.0000000000000002 > 1. + @test quantile(d, 1) ≤ d.upper && quantile(d, 1) ≈ d.upper - @test isa(quantile(d, ForwardDiff.Dual(1.,0.)), ForwardDiff.Dual) + @test isa(quantile(d, ForwardDiff.Dual(1.0, 0.0)), ForwardDiff.Dual) end @testset "cdf outside of [0, 1] (#1854)" begin - dist = truncated(Normal(2.5, 0.2); lower=0.0) + dist = truncated(Normal(2.5, 0.2); lower = 0.0) @test @inferred(cdf(dist, 3.741058503233821e-17)) === 0.0 @test @inferred(ccdf(dist, 3.741058503233821e-17)) === 1.0 @test @inferred(cdf(dist, 1.4354474178676617e-18)) === 0.0 diff --git a/test/truncated/exponential.jl b/test/truncated/exponential.jl index 0fb8e438e8..f74573e3dc 100644 --- a/test/truncated/exponential.jl +++ b/test/truncated/exponential.jl @@ -4,19 +4,20 @@ using Distributions, Random, Test d = Exponential(1.5) l = 1.2 r = 2.7 - @test mean(d) ≈ mean(truncated(d; lower=-3.0)) # non-binding truncation - @test mean(d) ≈ mean(truncated(d; lower=-3.0, upper=Inf)) - @test mean(truncated(d; lower=l)) ≈ mean(d) + l - @test mean(truncated(d; lower=l, upper=Inf)) ≈ mean(d) + l + @test mean(d) ≈ mean(truncated(d; lower = -3.0)) # non-binding truncation + @test mean(d) ≈ mean(truncated(d; lower = -3.0, upper = Inf)) + @test mean(truncated(d; lower = l)) ≈ mean(d) + l + @test mean(truncated(d; lower = l, upper = Inf)) ≈ mean(d) + l # test values below calculated using symbolic integration in Maxima @test mean(truncated(d, 0, r)) ≈ 0.9653092084094841 @test mean(truncated(d, l, r)) ≈ 1.82703493969601 # all the fun corner cases and numerical quirks - @test mean(truncated(Exponential(1.0); upper=0)) == 0 # degenerate - @test mean(truncated(Exponential(1.0); lower=-Inf, upper=0)) == 0 - @test mean(truncated(Exponential(1.0); upper=0+eps())) ≈ 0 atol = eps() # near-degenerate - @test mean(truncated(Exponential(1.0); lower=-Inf, upper=0+eps())) ≈ 0 atol=eps() - @test mean(truncated(Exponential(1.0), 1.0, 1.0+eps())) ≈ 1.0 # near-degenerate - @test mean(truncated(Exponential(1e308), 1.0, 1.0+eps())) ≈ 1.0 # near-degenerate + @test mean(truncated(Exponential(1.0); upper = 0)) == 0 # degenerate + @test mean(truncated(Exponential(1.0); lower = -Inf, upper = 0)) == 0 + @test mean(truncated(Exponential(1.0); upper = 0 + eps())) ≈ 0 atol = eps() # near-degenerate + @test mean(truncated(Exponential(1.0); lower = -Inf, upper = 0 + eps())) ≈ 0 atol = + eps() + @test mean(truncated(Exponential(1.0), 1.0, 1.0 + eps())) ≈ 1.0 # near-degenerate + @test mean(truncated(Exponential(1e308), 1.0, 1.0 + eps())) ≈ 1.0 # near-degenerate end diff --git a/test/truncated/normal.jl b/test/truncated/normal.jl index 9d287c0262..c8e249c594 100644 --- a/test/truncated/normal.jl +++ b/test/truncated/normal.jl @@ -4,27 +4,33 @@ using Distributions, Random rng = MersenneTwister(123) @testset "Truncated normal, mean and variance" begin - @test mean(truncated(Normal(0,1),100,115)) ≈ 100.00999800099926070518490239457545847490332879043 - @test mean(truncated(Normal(-2,3),50,70)) ≈ 50.171943499898757645751683644632860837133138152489 - @test mean(truncated(Normal(0,2),-100,0)) ≈ -1.59576912160573071175978423973752747390343452465973 - @test mean(truncated(Normal(0,1))) == 0 - @test mean(truncated(Normal(0,1); lower=-Inf, upper=Inf)) == 0 - @test mean(truncated(Normal(0,1); lower=0)) ≈ +√(2/π) - @test mean(truncated(Normal(0,1); lower=0, upper=Inf)) ≈ +√(2/π) - @test mean(truncated(Normal(0,1); upper=0)) ≈ -√(2/π) - @test mean(truncated(Normal(0,1); lower=-Inf, upper=0)) ≈ -√(2/π) - @test var(truncated(Normal(0,1),50,70)) ≈ 0.00039904318680389954790992722653605933053648912703600 - @test var(truncated(Normal(-2,3); lower=50, upper=70)) ≈ 0.029373438107168350377591231295634273607812172191712 - @test var(truncated(Normal(0,1))) == 1 - @test var(truncated(Normal(0,1); lower=-Inf, upper=Inf)) == 1 - @test var(truncated(Normal(0,1); lower=0)) ≈ 1 - 2/π - @test var(truncated(Normal(0,1); lower=0, upper=Inf)) ≈ 1 - 2/π - @test var(truncated(Normal(0,1); upper=0)) ≈ 1 - 2/π - @test var(truncated(Normal(0,1); lower=-Inf, upper=0)) ≈ 1 - 2/π + @test mean(truncated(Normal(0, 1), 100, 115)) ≈ + 100.00999800099926070518490239457545847490332879043 + @test mean(truncated(Normal(-2, 3), 50, 70)) ≈ + 50.171943499898757645751683644632860837133138152489 + @test mean(truncated(Normal(0, 2), -100, 0)) ≈ + -1.59576912160573071175978423973752747390343452465973 + @test mean(truncated(Normal(0, 1))) == 0 + @test mean(truncated(Normal(0, 1); lower = -Inf, upper = Inf)) == 0 + @test mean(truncated(Normal(0, 1); lower = 0)) ≈ +√(2 / π) + @test mean(truncated(Normal(0, 1); lower = 0, upper = Inf)) ≈ +√(2 / π) + @test mean(truncated(Normal(0, 1); upper = 0)) ≈ -√(2 / π) + @test mean(truncated(Normal(0, 1); lower = -Inf, upper = 0)) ≈ -√(2 / π) + @test var(truncated(Normal(0, 1), 50, 70)) ≈ + 0.00039904318680389954790992722653605933053648912703600 + @test var(truncated(Normal(-2, 3); lower = 50, upper = 70)) ≈ + 0.029373438107168350377591231295634273607812172191712 + @test var(truncated(Normal(0, 1))) == 1 + @test var(truncated(Normal(0, 1); lower = -Inf, upper = Inf)) == 1 + @test var(truncated(Normal(0, 1); lower = 0)) ≈ 1 - 2 / π + @test var(truncated(Normal(0, 1); lower = 0, upper = Inf)) ≈ 1 - 2 / π + @test var(truncated(Normal(0, 1); upper = 0)) ≈ 1 - 2 / π + @test var(truncated(Normal(0, 1); lower = -Inf, upper = 0)) ≈ 1 - 2 / π # https://github.com/JuliaStats/Distributions.jl/issues/827 - @test mean(truncated(Normal(1000000,1),0,1000)) ≈ 999.999998998998999001005011019018990904720462367106 - @test var(truncated(Normal(),999000,1e6)) ≥ 0 - @test var(truncated(Normal(1000000,1),0,1000)) ≥ 0 + @test mean(truncated(Normal(1000000, 1), 0, 1000)) ≈ + 999.999998998998999001005011019018990904720462367106 + @test var(truncated(Normal(), 999000, 1e6)) ≥ 0 + @test var(truncated(Normal(1000000, 1), 0, 1000)) ≥ 0 # https://github.com/JuliaStats/Distributions.jl/issues/624 @test rand(truncated(Normal(+Inf, 1), 0, 1)) ≈ 1 @test rand(truncated(Normal(-Inf, 1), 0, 1)) ≈ 0 @@ -43,11 +49,12 @@ rng = MersenneTwister(123) end @testset "Truncated normal $trunc" begin trunc = truncated(Normal(0, 1), -2, 2) - @testset "Truncated normal $trunc with $func" for func in - [(r = rand, r! = rand!), - (r = ((d, n) -> rand(rng, d, n)), r! = ((d, X) -> rand!(rng, d, X)))] + @testset "Truncated normal $trunc with $func" for func in [ + (r = rand, r! = rand!), + (r = ((d, n) -> rand(rng, d, n)), r! = ((d, X) -> rand!(rng, d, X))), + ] repeats = 1000000 - + @test abs(mean(func.r(trunc, repeats))) < 0.01 @test abs(median(func.r(trunc, repeats))) < 0.01 @test abs(var(func.r(trunc, repeats)) - var(trunc)) < 0.01 diff --git a/test/truncated/uniform.jl b/test/truncated/uniform.jl index 17056c5194..389bf7eb46 100644 --- a/test/truncated/uniform.jl +++ b/test/truncated/uniform.jl @@ -4,12 +4,12 @@ using Distributions, Test # just test equivalence of truncation results u = Uniform(1, 2) @test truncated(u) === u - @test truncated(u; lower=0) == u - @test truncated(u; lower=0, upper=Inf) == u - @test truncated(u; upper=2.1) == u - @test truncated(u; lower=-Inf, upper=2.1) == u - @test truncated(u; lower=1.1) == Uniform(1.1, 2) - @test truncated(u; lower=1.1, upper=Inf) == Uniform(1.1, 2) + @test truncated(u; lower = 0) == u + @test truncated(u; lower = 0, upper = Inf) == u + @test truncated(u; upper = 2.1) == u + @test truncated(u; lower = -Inf, upper = 2.1) == u + @test truncated(u; lower = 1.1) == Uniform(1.1, 2) + @test truncated(u; lower = 1.1, upper = Inf) == Uniform(1.1, 2) @test truncated(u, 1.1, 2.1) == Uniform(1.1, 2) @test truncated(u, 1.1, 1.9) == Uniform(1.1, 1.9) end diff --git a/test/types.jl b/test/types.jl index 4eefd5d96d..c8f4d7a18d 100644 --- a/test/types.jl +++ b/test/types.jl @@ -24,7 +24,7 @@ using ForwardDiff: Dual @assert ContinuousMatrixDistribution <: MatrixDistribution @testset "Test Sample Type" begin - for T in (Float64,Float32,Dual{Nothing,Float64,0}) + for T in (Float64, Float32, Dual{Nothing,Float64,0}) @testset "Type $T" begin dists = ( MvNormal(Diagonal(ones(T, 2))), @@ -60,7 +60,7 @@ end @test !isequal(dist1, dist3) @test !isapprox(dist1, dist3) @test hash(dist1) != hash(dist3) - @test isapprox(dist1, dist3, atol=0.3) + @test isapprox(dist1, dist3, atol = 0.3) dist4 = LogNormal(1, 1) @test dist1 != dist4 diff --git a/test/univariate/continuous.jl b/test/univariate/continuous.jl index 3a1da94b92..73c2fc1809 100644 --- a/test/univariate/continuous.jl +++ b/test/univariate/continuous.jl @@ -8,28 +8,31 @@ using Calculus: derivative n_tsamples = 100 # additional distributions that have no direct counterparts in R references -@testset "Testing $(distr)" for distr in [Biweight(), - Biweight(1,3), - Epanechnikov(), - Epanechnikov(1,3), - Triweight(), - Triweight(2), - Triweight(1, 3), - Triweight(1)] - - test_distr(distr, n_tsamples; testquan=false) +@testset "Testing $(distr)" for distr in [ + Biweight(), + Biweight(1, 3), + Epanechnikov(), + Epanechnikov(1, 3), + Triweight(), + Triweight(2), + Triweight(1, 3), + Triweight(1), +] + + test_distr(distr, n_tsamples; testquan = false) end # Test for non-Float64 input using ForwardDiff -@test string(logpdf(Normal(0,1),big(1))) == "-1.418938533204672741780329736405617639861397473637783412817151540482765695927251" +@test string(logpdf(Normal(0, 1), big(1))) == + "-1.418938533204672741780329736405617639861397473637783412817151540482765695927251" @test derivative(t -> logpdf(Normal(1.0, 0.15), t), 2.5) ≈ -66.66666666666667 @test derivative(t -> pdf(Normal(t, 1.0), 0.0), 0.0) == 0.0 @testset "Normal distribution with non-standard (ie not Float64) parameter types" begin - n32 = Normal(1f0, 0.1f0) - n64 = Normal(1., 0.1) + n32 = Normal(1.0f0, 0.1f0) + n64 = Normal(1.0, 0.1) nbig = Normal(big(pi), big(ℯ)) @test eltype(typeof(n32)) === Float32 @@ -42,10 +45,10 @@ using ForwardDiff end # Test for numerical problems -@test pdf(Logistic(6,0.01),-2) == 0 +@test pdf(Logistic(6, 0.01), -2) == 0 @testset "Normal with std=0" begin - d = Normal(0.5,0.0) + d = Normal(0.5, 0.0) @test pdf(d, 0.49) == 0.0 @test pdf(d, 0.5) == Inf @test pdf(d, 0.51) == 0.0 @@ -73,10 +76,10 @@ end d = VonMises(1.1, 1000) @test var(d) ≈ 0.0005001251251957198 @test entropy(d) ≈ -2.034688918525470 - @test cf(d, 2.5) ≈ -0.921417 + 0.38047im atol=1e-6 + @test cf(d, 2.5) ≈ -0.921417 + 0.38047im atol = 1e-6 @test pdf(d, 0.5) ≈ 1.758235814051e-75 @test logpdf(d, 0.5) ≈ -172.1295710466005 - @test cdf(d, 1.0) ≈ 0.000787319 atol=1e-9 + @test cdf(d, 1.0) ≈ 0.000787319 atol = 1e-9 end @testset "NormalInverseGaussian random repeatable and basic metrics" begin @@ -96,7 +99,7 @@ end @test mean(d) ≈ µ + β * δ / g @test var(d) ≈ δ * α^2 / g^3 - @test skewness(d) ≈ 3β/(α*sqrt(δ*g)) + @test skewness(d) ≈ 3β / (α * sqrt(δ * g)) end @testset "edge cases" begin diff --git a/test/univariate/continuous/arcsine.jl b/test/univariate/continuous/arcsine.jl index f974bd8e0b..829ea7f6a6 100644 --- a/test/univariate/continuous/arcsine.jl +++ b/test/univariate/continuous/arcsine.jl @@ -19,7 +19,7 @@ using ForwardDiff @test isinf(logpdf(d, 5.0)) # derivative - for v in 3.01:0.1:4.99 + for v = 3.01:0.1:4.99 fgrad = ForwardDiff.derivative(x -> logpdf(d, x), v) glog = gradlogpdf(d, v) @test fgrad ≈ glog diff --git a/test/univariate/continuous/chernoff.jl b/test/univariate/continuous/chernoff.jl index a5f28d02bc..32af585a1f 100644 --- a/test/univariate/continuous/chernoff.jl +++ b/test/univariate/continuous/chernoff.jl @@ -2,157 +2,157 @@ d = Chernoff() cdftest = [ - 0.005 0.5037916689930134; - 0.015 0.5113737159744436; - 0.025 0.5189518911337264; - 0.035 0.5265236172270374; - 0.045 0.5340863229612878; - 0.055 0.541637445363313; - 0.065 0.5491744321382676; - 0.075 0.556694744013348; - 0.085 0.5641958570651835; - 0.095 0.5716752650256268; - 0.105 0.5791304815656103; - 0.115 0.5865590425511593; - 0.125 0.5939585082711147; - 0.135 0.6013264656321359; - 0.145 0.6086605303182655; - 0.155 0.6159583489126876; - 0.165 0.6232176009794599; - 0.175 0.6304360010992436; - 0.185 0.6376113008624671; - 0.195 0.6447412908097765; - 0.205 0.6518238023240649; - 0.215 0.6588567094682105; - 0.225 0.6658379307650819; - 0.235 0.6727654309193446; - 0.245 0.6796372224828434; - 0.255 0.6864513674498119; - 0.265 0.6932059787930698; - 0.275 0.6998992219329853; - 0.285 0.7065293161345655; - 0.295 0.7130945358373242; - 0.305 0.7195932119132353; - 0.315 0.7260237328492151; - 0.325 0.7323845458573087; - 0.335 0.7386741579064688; - 0.345 0.7448911366812273; - 0.355 0.7510341114556635; - 0.365 0.7571017738953651; - 0.375 0.7630928787745712; - 0.385 0.7690062446148507; - 0.395 0.7748407542420853; - 0.405 0.7805953552614361; - 0.415 0.786269060454087; - 0.425 0.7918609480850309; - 0.435 0.7973701621351792; - 0.445 0.8027959124488315; - 0.455 0.8081374748004513; - 0.465 0.8133941908816786; - 0.475 0.8185654682017057; - 0.485 0.823650779913423; - 0.495 0.8286496645651691; - 0.505 0.8335617257530529; - 0.515 0.8383866317232886; - 0.525 0.8431241148805353; - 0.535 0.8477739712206347; - 0.545 0.8523360597092733; - 0.555 0.8568103015557271; - 0.565 0.8611966794490666; - 0.575 0.8654952367057263; - 0.585 0.8697060763547471; - 0.595 0.8738293601627617; - 0.605 0.8778653075886909; - 0.615 0.8818141946885997; - 0.625 0.88567635294178; - 0.635 0.8894521680505922; - 0.645 0.8931420786562767; - 0.655 0.8967465750281972; - 0.665 0.9002661976867428; - 0.675 0.9037015359952119; - 0.685 0.9070532266935508; - 0.695 0.9103219523993228; - 0.705 0.9135084400870754; - 0.715 0.9166134594908737; - 0.725 0.9196378215191889; - 0.735 0.9225823766240466; - 0.745 0.9254480131350427; - 0.755 0.928235655591972; - 0.765 0.9309462630294286; - 0.775 0.9335808272718444; - 0.785 0.9361403712094327; - 0.795 0.9386259470397855; - 0.805 0.9410386345416801; - 0.815 0.9433795393000659; - 0.825 0.9456497909787722; - 0.835 0.9478505415455782; - 0.845 0.9499829635303033; - 0.855 0.9520482483056357; - 0.865 0.9540476043275662; - 0.875 0.9559822554235589; - 0.885 0.9578534391151732; - 0.895 0.9596624048960603; - 0.905 0.9614104126081986; - 0.915 0.9630987307693821; - 0.925 0.9647286349803182; - 0.935 0.9663014063237074; - 0.945 0.9678183298266878; - 0.955 0.9692806929171217; - 0.965 0.9706897839442293; - 0.975 0.9720468907261572; - 0.985 0.9733532991278337; + 0.005 0.5037916689930134 + 0.015 0.5113737159744436 + 0.025 0.5189518911337264 + 0.035 0.5265236172270374 + 0.045 0.5340863229612878 + 0.055 0.541637445363313 + 0.065 0.5491744321382676 + 0.075 0.556694744013348 + 0.085 0.5641958570651835 + 0.095 0.5716752650256268 + 0.105 0.5791304815656103 + 0.115 0.5865590425511593 + 0.125 0.5939585082711147 + 0.135 0.6013264656321359 + 0.145 0.6086605303182655 + 0.155 0.6159583489126876 + 0.165 0.6232176009794599 + 0.175 0.6304360010992436 + 0.185 0.6376113008624671 + 0.195 0.6447412908097765 + 0.205 0.6518238023240649 + 0.215 0.6588567094682105 + 0.225 0.6658379307650819 + 0.235 0.6727654309193446 + 0.245 0.6796372224828434 + 0.255 0.6864513674498119 + 0.265 0.6932059787930698 + 0.275 0.6998992219329853 + 0.285 0.7065293161345655 + 0.295 0.7130945358373242 + 0.305 0.7195932119132353 + 0.315 0.7260237328492151 + 0.325 0.7323845458573087 + 0.335 0.7386741579064688 + 0.345 0.7448911366812273 + 0.355 0.7510341114556635 + 0.365 0.7571017738953651 + 0.375 0.7630928787745712 + 0.385 0.7690062446148507 + 0.395 0.7748407542420853 + 0.405 0.7805953552614361 + 0.415 0.786269060454087 + 0.425 0.7918609480850309 + 0.435 0.7973701621351792 + 0.445 0.8027959124488315 + 0.455 0.8081374748004513 + 0.465 0.8133941908816786 + 0.475 0.8185654682017057 + 0.485 0.823650779913423 + 0.495 0.8286496645651691 + 0.505 0.8335617257530529 + 0.515 0.8383866317232886 + 0.525 0.8431241148805353 + 0.535 0.8477739712206347 + 0.545 0.8523360597092733 + 0.555 0.8568103015557271 + 0.565 0.8611966794490666 + 0.575 0.8654952367057263 + 0.585 0.8697060763547471 + 0.595 0.8738293601627617 + 0.605 0.8778653075886909 + 0.615 0.8818141946885997 + 0.625 0.88567635294178 + 0.635 0.8894521680505922 + 0.645 0.8931420786562767 + 0.655 0.8967465750281972 + 0.665 0.9002661976867428 + 0.675 0.9037015359952119 + 0.685 0.9070532266935508 + 0.695 0.9103219523993228 + 0.705 0.9135084400870754 + 0.715 0.9166134594908737 + 0.725 0.9196378215191889 + 0.735 0.9225823766240466 + 0.745 0.9254480131350427 + 0.755 0.928235655591972 + 0.765 0.9309462630294286 + 0.775 0.9335808272718444 + 0.785 0.9361403712094327 + 0.795 0.9386259470397855 + 0.805 0.9410386345416801 + 0.815 0.9433795393000659 + 0.825 0.9456497909787722 + 0.835 0.9478505415455782 + 0.845 0.9499829635303033 + 0.855 0.9520482483056357 + 0.865 0.9540476043275662 + 0.875 0.9559822554235589 + 0.885 0.9578534391151732 + 0.895 0.9596624048960603 + 0.905 0.9614104126081986 + 0.915 0.9630987307693821 + 0.925 0.9647286349803182 + 0.935 0.9663014063237074 + 0.945 0.9678183298266878 + 0.955 0.9692806929171217 + 0.965 0.9706897839442293 + 0.975 0.9720468907261572 + 0.985 0.9733532991278337 0.995 0.9746102916986603 ] - + pdftest = [ - 0.000 0.758345; - 0.050 0.755123; - 0.100 0.745532; - 0.150 0.729792; - 0.200 0.708260; - 0.250 0.681422; - 0.300 0.649874; - 0.350 0.614303; - 0.400 0.575464; - 0.450 0.534159; - 0.500 0.491208; - 0.550 0.447424; - 0.600 0.403594; - 0.650 0.360447; - 0.700 0.318646; - 0.750 0.278760; - 0.800 0.241264; - 0.850 0.206523; - 0.900 0.174795; - 0.950 0.146231; - 1.000 0.120880; - 1.050 0.0987031; - 1.100 0.0795821; - 1.150 0.0633362; - 1.200 0.0497372; - 1.250 0.0385246; - 1.300 0.0294208; - 1.350 0.0221441; - 1.400 0.0164201; - 1.450 0.0119902; - 1.500 0.0086186; - 1.550 0.0060955; - 1.600 0.0042401; - 1.650 0.0028995; - 1.700 0.0019485; - 1.750 0.0012861; - 1.800 0.0008334; - 1.850 0.0005300; - 1.900 0.0003306; - 1.950 0.0002022; + 0.000 0.758345 + 0.050 0.755123 + 0.100 0.745532 + 0.150 0.729792 + 0.200 0.708260 + 0.250 0.681422 + 0.300 0.649874 + 0.350 0.614303 + 0.400 0.575464 + 0.450 0.534159 + 0.500 0.491208 + 0.550 0.447424 + 0.600 0.403594 + 0.650 0.360447 + 0.700 0.318646 + 0.750 0.278760 + 0.800 0.241264 + 0.850 0.206523 + 0.900 0.174795 + 0.950 0.146231 + 1.000 0.120880 + 1.050 0.0987031 + 1.100 0.0795821 + 1.150 0.0633362 + 1.200 0.0497372 + 1.250 0.0385246 + 1.300 0.0294208 + 1.350 0.0221441 + 1.400 0.0164201 + 1.450 0.0119902 + 1.500 0.0086186 + 1.550 0.0060955 + 1.600 0.0042401 + 1.650 0.0028995 + 1.700 0.0019485 + 1.750 0.0012861 + 1.800 0.0008334 + 1.850 0.0005300 + 1.900 0.0003306 + 1.950 0.0002022 2.000 0.0001212 ] - for i=1:size(cdftest, 1) - @test isapprox(cdf(d, cdftest[i, 1]), cdftest[i, 2]) + for i = 1:size(cdftest, 1) + @test isapprox(cdf(d, cdftest[i, 1]), cdftest[i, 2]) end - - for i=1:size(pdftest, 1) - @test isapprox(pdf(d, pdftest[i, 1]), pdftest[i, 2] ; atol = 1e-6) + + for i = 1:size(pdftest, 1) + @test isapprox(pdf(d, pdftest[i, 1]), pdftest[i, 2]; atol = 1e-6) end end diff --git a/test/univariate/continuous/chi.jl b/test/univariate/continuous/chi.jl index d3b4278d6e..e6c5af185a 100644 --- a/test/univariate/continuous/chi.jl +++ b/test/univariate/continuous/chi.jl @@ -2,6 +2,6 @@ @test isfinite(mean(Chi(1000))) # see #1497 -for x in (-0.5, -1, -1.5f0), ν in (3, 3f0, 3.0) +for x in (-0.5, -1, -1.5f0), ν in (3, 3.0f0, 3.0) @test @inferred(logpdf(Chi(ν), x)) == -Inf -end \ No newline at end of file +end diff --git a/test/univariate/continuous/erlang.jl b/test/univariate/continuous/erlang.jl index dc96b517a9..3936b51bd1 100644 --- a/test/univariate/continuous/erlang.jl +++ b/test/univariate/continuous/erlang.jl @@ -1,2 +1,2 @@ -test_cgf(Erlang(1,0.4) , (1, 1/0.400001, -1, -100f0, -1e6)) -test_cgf(Erlang(10,0.01), (1, 1/0.010001f0, -1, -100f0, -1e6)) +test_cgf(Erlang(1, 0.4), (1, 1 / 0.400001, -1, -100.0f0, -1e6)) +test_cgf(Erlang(10, 0.01), (1, 1 / 0.010001f0, -1, -100.0f0, -1e6)) diff --git a/test/univariate/continuous/exponential.jl b/test/univariate/continuous/exponential.jl index 9c5f29aa93..0460b8bc68 100644 --- a/test/univariate/continuous/exponential.jl +++ b/test/univariate/continuous/exponential.jl @@ -1,24 +1,20 @@ @testset "Exponential" begin - test_cgf(Exponential(1), (0.9, -1, -100f0, -1e6)) - test_cgf(Exponential(0.91), (0.9, -1, -100f0, -1e6)) - test_cgf(Exponential(10 ), (0.08, -1, -100f0, -1e6)) + test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1e6)) + test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1e6)) + test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1e6)) for T in (Float32, Float64) @test @inferred(rand(Exponential(T(1)))) isa T end end -test_cgf(Exponential(1), (0.9, -1, -100f0, -1e6)) -test_cgf(Exponential(0.91), (0.9, -1, -100f0, -1e6)) -test_cgf(Exponential(10), (0.08, -1, -100f0, -1e6)) +test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1e6)) +test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1e6)) +test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1e6)) # Sampling Tests @testset "Exponential sampling tests" begin - for d in [ - Exponential(1), - Exponential(0.91), - Exponential(10) - ] + for d in [Exponential(1), Exponential(0.91), Exponential(10)] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/gamma.jl b/test/univariate/continuous/gamma.jl index 8c1887786e..0f881dacd0 100644 --- a/test/univariate/continuous/gamma.jl +++ b/test/univariate/continuous/gamma.jl @@ -27,7 +27,7 @@ using Test, Distributions, OffsetArrays for T in (Float32, Float64) @test @inferred(rand(Gamma(T(1), T(1)))) isa T - @test @inferred(rand(Gamma(1/T(2), T(1)))) isa T + @test @inferred(rand(Gamma(1 / T(2), T(1)))) isa T @test @inferred(rand(Gamma(T(2), T(1)))) isa T end end diff --git a/test/univariate/continuous/gumbel.jl b/test/univariate/continuous/gumbel.jl index 470680ee2c..6aae521062 100644 --- a/test/univariate/continuous/gumbel.jl +++ b/test/univariate/continuous/gumbel.jl @@ -1,20 +1,20 @@ @testset "Gumbel" begin @testset "eltype" begin @test eltype(Gumbel()) === Float64 - @test eltype(Gumbel(1f0)) === Float32 + @test eltype(Gumbel(1.0f0)) === Float32 @test eltype(Gumbel{Int}(0, 1)) === Int end @testset "rand" begin d = Gumbel(rand(), rand()) - samples = [rand(d) for _ in 1:10_000] - @test mean(samples) ≈ mean(d) rtol=5e-2 - @test std(samples) ≈ std(d) rtol=5e-2 + samples = [rand(d) for _ = 1:10_000] + @test mean(samples) ≈ mean(d) rtol = 5e-2 + @test std(samples) ≈ std(d) rtol = 5e-2 samples = rand(d, 10_000) - @test mean(samples) ≈ mean(d) rtol=5e-2 - @test std(samples) ≈ std(d) rtol=5e-2 + @test mean(samples) ≈ mean(d) rtol = 5e-2 + @test std(samples) ≈ std(d) rtol = 5e-2 d = Gumbel{Int}(0, 1) @test rand(d) isa Float64 diff --git a/test/univariate/continuous/inversegaussian.jl b/test/univariate/continuous/inversegaussian.jl index 6990cae710..35879eaa1b 100644 --- a/test/univariate/continuous/inversegaussian.jl +++ b/test/univariate/continuous/inversegaussian.jl @@ -1,8 +1,5 @@ @testset "InverseGaussian cdf outside of [0, 1] (#1873)" begin - for d in [ - InverseGaussian(1.65, 590), - InverseGaussian(0.5, 1000) - ] + for d in [InverseGaussian(1.65, 590), InverseGaussian(0.5, 1000)] for x in [0.02, 1.0, 20.0, 300.0] p = cdf(d, x) @test 0.0 <= p <= 1.0 @@ -15,4 +12,4 @@ @test (p + q) ≈ 1 end end -end \ No newline at end of file +end diff --git a/test/univariate/continuous/johnsonsu.jl b/test/univariate/continuous/johnsonsu.jl index 716f1b1df8..4ab0ab8220 100644 --- a/test/univariate/continuous/johnsonsu.jl +++ b/test/univariate/continuous/johnsonsu.jl @@ -71,56 +71,56 @@ @inferred pdf(d1, 1.0) @inferred pdf(d1, 1.0f0) @inferred pdf(d1, 1) - @inferred pdf(d1, 1//2) + @inferred pdf(d1, 1 // 2) @inferred pdf(d1, Inf) @inferred logpdf(d1, -Inf32) @inferred logpdf(d1, 1.0) @inferred logpdf(d1, 1.0f0) @inferred logpdf(d1, 1) - @inferred logpdf(d1, 1//2) + @inferred logpdf(d1, 1 // 2) @inferred logpdf(d1, Inf) @inferred cdf(d1, -Inf32) @inferred cdf(d1, 1.0) @inferred cdf(d1, 1.0f0) @inferred cdf(d1, 1) - @inferred cdf(d1, 1//2) + @inferred cdf(d1, 1 // 2) @inferred cdf(d1, Inf) @inferred logcdf(d1, -Inf32) @inferred logcdf(d1, 1.0) @inferred logcdf(d1, 1.0f0) @inferred logcdf(d1, 1) - @inferred logcdf(d1, 1//2) + @inferred logcdf(d1, 1 // 2) @inferred logcdf(d1, Inf) @inferred ccdf(d1, -Inf32) @inferred ccdf(d1, 1.0) @inferred ccdf(d1, 1.0f0) @inferred ccdf(d1, 1) - @inferred ccdf(d1, 1//2) + @inferred ccdf(d1, 1 // 2) @inferred ccdf(d1, Inf) @inferred logccdf(d1, -Inf32) @inferred logccdf(d1, 1.0) @inferred logccdf(d1, 1.0f0) @inferred logccdf(d1, 1) - @inferred logccdf(d1, 1//2) + @inferred logccdf(d1, 1 // 2) @inferred logccdf(d1, Inf) @inferred invlogcdf(d1, -Inf32) @inferred invlogcdf(d1, 1.0) @inferred invlogcdf(d1, 1.0f0) @inferred invlogcdf(d1, 1) - @inferred invlogcdf(d1, 1//2) + @inferred invlogcdf(d1, 1 // 2) @inferred invlogcdf(d1, Inf) @inferred invlogccdf(d1, -Inf32) @inferred invlogccdf(d1, 1.0) @inferred invlogccdf(d1, 1.0f0) @inferred invlogccdf(d1, 1) - @inferred invlogccdf(d1, 1//2) + @inferred invlogccdf(d1, 1 // 2) @inferred invlogccdf(d1, Inf) end diff --git a/test/univariate/continuous/kolmogorov.jl b/test/univariate/continuous/kolmogorov.jl index 70c506eeba..a3697c7d18 100644 --- a/test/univariate/continuous/kolmogorov.jl +++ b/test/univariate/continuous/kolmogorov.jl @@ -1,16 +1,16 @@ -using Distributions +using Distributions using Test d = Kolmogorov() # compare to Smirnov's tabulated values -@test round(cdf(d,0.28), digits=6) == .000_001 -@test round(cdf(d,0.50), digits=6) == .036_055 -@test round(cdf(d,0.99), digits=6) == .719_126 -@test round(cdf(d,1.00), digits=6) == .730_000 -@test round(cdf(d,1.01), digits=6) == .740_566 +@test round(cdf(d, 0.28), digits = 6) == 0.000_001 +@test round(cdf(d, 0.50), digits = 6) == 0.036_055 +@test round(cdf(d, 0.99), digits = 6) == 0.719_126 +@test round(cdf(d, 1.00), digits = 6) == 0.730_000 +@test round(cdf(d, 1.01), digits = 6) == 0.740_566 # @test round(cdf(d,1.04),digits=6) == .770_434 # table value appears to be wrong -@test round(cdf(d,1.50), digits=6) == .977_782 -@test round(cdf(d,2.00), digits=6) == .999_329 -@test round(cdf(d,2.50), digits=7) == .999_992_5 -@test round(cdf(d,3.00), digits=8) == .999_999_97 +@test round(cdf(d, 1.50), digits = 6) == 0.977_782 +@test round(cdf(d, 2.00), digits = 6) == 0.999_329 +@test round(cdf(d, 2.50), digits = 7) == 0.999_992_5 +@test round(cdf(d, 3.00), digits = 8) == 0.999_999_97 diff --git a/test/univariate/continuous/kumaraswamy.jl b/test/univariate/continuous/kumaraswamy.jl index 37cfbd429b..d616ca373d 100644 --- a/test/univariate/continuous/kumaraswamy.jl +++ b/test/univariate/continuous/kumaraswamy.jl @@ -17,10 +17,10 @@ using Distributions: expectation @test typeof(@inferred rand(D)) === typeof(rand()) tol = sqrt(eps(float(T))) @testset "gradlogpdf" begin - for x in T(0):(T <: Integer ? one(T) : T(0.5)):T(20) + for x = T(0):(T <: Integer ? one(T) : T(0.5)):T(20) fd = ForwardDiff.derivative(Base.Fix1(logpdf, D), x) gl = @inferred gradlogpdf(D, x) - @test fd ≈ gl atol=tol + @test fd ≈ gl atol = tol if T <: AbstractFloat @test gl isa T end @@ -28,21 +28,21 @@ using Distributions: expectation end @testset "median" begin m = @inferred median(D) - @test m ≈ sqrt(1 - T(2)^(-1//3)) atol=tol + @test m ≈ sqrt(1 - T(2)^(-1 // 3)) atol = tol if T <: AbstractFloat @test m isa T end end @testset "entropy" begin shannon = @inferred entropy(D) - @test shannon ≈ (19//12 - log(T(6))) atol=tol + @test shannon ≈ (19 // 12 - log(T(6))) atol = tol if T <: AbstractFloat @test shannon isa T end end @testset "mode" begin m = @inferred mode(D) - @test m ≈ inv(sqrt(T(5))) atol=tol + @test m ≈ inv(sqrt(T(5))) atol = tol if T <: AbstractFloat @test m isa T end @@ -56,7 +56,7 @@ using Distributions: expectation if T <: AbstractFloat @test y₁ isa T end - @test y₁ ≈ y₂ atol=sqrt(tol) + @test y₁ ≈ y₂ atol = sqrt(tol) end end @testset "limits" begin diff --git a/test/univariate/continuous/laplace.jl b/test/univariate/continuous/laplace.jl index 9288927982..54bd1f88e1 100644 --- a/test/univariate/continuous/laplace.jl +++ b/test/univariate/continuous/laplace.jl @@ -1,5 +1,5 @@ @testset "laplace.jl" begin # affine transformations - test_cgf(Laplace(1, 1), (0.99, -0.99, 1f-2, -1f-5)) + test_cgf(Laplace(1, 1), (0.99, -0.99, 1.0f-2, -1.0f-5)) test_affine_transformations(Laplace, randn(), randn()^2) end diff --git a/test/univariate/continuous/lindley.jl b/test/univariate/continuous/lindley.jl index 57d141693f..21948fb559 100644 --- a/test/univariate/continuous/lindley.jl +++ b/test/univariate/continuous/lindley.jl @@ -19,20 +19,20 @@ using Distributions: expectation samples = rand(rng, Lindley(1.5f0), 10_000) mle = fit_mle(Lindley, samples) @test mle isa Lindley - @test shape(mle) ≈ 1.5 atol=0.1 + @test shape(mle) ≈ 1.5 atol = 0.1 end @testset "$T" for T in (Float16, Float32, Float64, Rational{Int}) D = Lindley(one(T)) @test partype(D) === T @test typeof(@inferred rand(D)) === typeof(rand()) - @test @inferred(mean(D)) == T(3/2) + @test @inferred(mean(D)) == T(3 / 2) tol = sqrt(eps(float(T))) @testset "Gradient of log PDF" begin - for x in T(0):T(0.5):T(20) + for x = T(0):T(0.5):T(20) fd = ForwardDiff.derivative(Fix1(logpdf, D), x) gl = @inferred gradlogpdf(D, x) @test gl isa T - @test fd ≈ gl atol=tol + @test fd ≈ gl atol = tol end end @testset "Entropy" begin @@ -41,7 +41,7 @@ using Distributions: expectation if T <: AbstractFloat @test shannon isa T end - @test shannon ≈ expect atol=tol + @test shannon ≈ expect atol = tol end @testset "K-L divergence" begin S = supertype(typeof(D)) @@ -51,14 +51,14 @@ using Distributions: expectation if T <: AbstractFloat @test d₁ isa T end - @test d₁ ≈ d₂ atol=tol + @test d₁ ≈ d₂ atol = tol end @testset "Mode" begin @test iszero(@inferred mode(D)) @test isone(mode(Lindley(T(0.5)))) m = mode(Lindley(T(0.1))) @test m isa T - @test m ≈ T(9) atol=tol + @test m ≈ T(9) atol = tol end @testset "Skewness" begin μ = mean(D) @@ -68,14 +68,14 @@ using Distributions: expectation if T <: AbstractFloat @test s₁ isa T end - @test s₁ ≈ s₂ atol=sqrt(tol) + @test s₁ ≈ s₂ atol = sqrt(tol) end @testset "MGF, CGF, CF" begin @test @inferred(mgf(D, 0)) === one(T) @test iszero(mgf(D, shape(D) + 1)) @test ForwardDiff.derivative(Fix1(mgf, D), 0) ≈ mean(D) @test central_fdm(5, 1)(Fix1(cf, D), 0) ≈ mean(D) * im - test_cgf(D, (-1e6, -100f0, Float16(-1), 1//10, 0.9)) + test_cgf(D, (-1e6, -100.0f0, Float16(-1), 1 // 10, 0.9)) end end end diff --git a/test/univariate/continuous/logistic.jl b/test/univariate/continuous/logistic.jl index 3eb0b8f2d0..4316d482e3 100644 --- a/test/univariate/continuous/logistic.jl +++ b/test/univariate/continuous/logistic.jl @@ -1,2 +1,2 @@ -test_cgf(Logistic(0, 1), (-0.99,0.99, 1f-2, -1f-2)) -test_cgf(Logistic(100,10), (-0.099,0.099, 1f-2, -1f-2)) +test_cgf(Logistic(0, 1), (-0.99, 0.99, 1.0f-2, -1.0f-2)) +test_cgf(Logistic(100, 10), (-0.099, 0.099, 1.0f-2, -1.0f-2)) diff --git a/test/univariate/continuous/logitnormal.jl b/test/univariate/continuous/logitnormal.jl index b8d72290ef..f9de13c8f5 100644 --- a/test/univariate/continuous/logitnormal.jl +++ b/test/univariate/continuous/logitnormal.jl @@ -5,8 +5,11 @@ using Random, Test using StatsFuns ####### Core testing procedure -function test_logitnormal(g::LogitNormal, n_tsamples::Int=10^6, - rng::Union{AbstractRNG, Missing} = missing) +function test_logitnormal( + g::LogitNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG,Missing} = missing, +) d = length(g) #mn = mean(g) md = median(g) @@ -20,16 +23,16 @@ function test_logitnormal(g::LogitNormal, n_tsamples::Int=10^6, #@test isa(s, Float64) @test md ≈ logistic(g.μ) #@test entropy(g) ≈ d*(1 + Distributions.log2π)/2 + logdetcov(g.normal)/2 + sum(mean(g.normal)) - @test insupport(g,1e-8) - # corner cases of 0 and 1 handled as in support - @test insupport(g,1.0) - @test pdf(g,0.0) == 0.0 - @test insupport(g,0.0) - @test pdf(g,1.0) == 0.0 - @test !insupport(g,-1e-8) - @test pdf(g,-1e-8) == 0.0 - @test !insupport(g,1+1e-8) - @test pdf(g,1+1e-8) == 0.0 + @test insupport(g, 1e-8) + # corner cases of 0 and 1 handled as in support + @test insupport(g, 1.0) + @test pdf(g, 0.0) == 0.0 + @test insupport(g, 0.0) + @test pdf(g, 1.0) == 0.0 + @test !insupport(g, -1e-8) + @test pdf(g, -1e-8) == 0.0 + @test !insupport(g, 1 + 1e-8) + @test pdf(g, 1 + 1e-8) == 0.0 # sampling if ismissing(rng) @@ -44,9 +47,9 @@ function test_logitnormal(g::LogitNormal, n_tsamples::Int=10^6, @test logpdf(g, X[i]) ≈ log(pdf(g, X[i])) end @test Base.Fix1(logpdf, g).(X) ≈ log.(Base.Fix1(pdf, g).(X)) - @test isequal(logpdf(g, 0),-Inf) - @test isequal(logpdf(g, 1),-Inf) - @test isequal(logpdf(g, -eps()),-Inf) + @test isequal(logpdf(g, 0), -Inf) + @test isequal(logpdf(g, 1), -Inf) + @test isequal(logpdf(g, -eps()), -Inf) # test the location and scale functions @test location(g) == g.μ @@ -57,19 +60,16 @@ end ###### General Testing @testset "Logitnormal tests" begin - test_logitnormal( LogitNormal() ) - test_logitnormal( LogitNormal(2,0.5) ) + test_logitnormal(LogitNormal()) + test_logitnormal(LogitNormal(2, 0.5)) d = LogitNormal(Float32(2)) typeof(rand(d, 5)) # still Float64 @test convert(LogitNormal{Float32}, d) === d - @test typeof(convert(LogitNormal{Float64}, d)) == typeof(LogitNormal(2,1)) + @test typeof(convert(LogitNormal{Float64}, d)) == typeof(LogitNormal(2, 1)) end @testset "Logitnormal Sampling Tests" begin - for d in [ - LogitNormal(-2, 3), - LogitNormal(0, 0.2) - ] + for d in [LogitNormal(-2, 3), LogitNormal(0, 0.2)] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/lognormal.jl b/test/univariate/continuous/lognormal.jl index 1cc44b433d..05e23eddb1 100644 --- a/test/univariate/continuous/lognormal.jl +++ b/test/univariate/continuous/lognormal.jl @@ -5,8 +5,7 @@ using Test isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @testset "LogNormal" begin - @test isa(convert(LogNormal{Float64}, Float16(0), Float16(1)), - LogNormal{Float64}) + @test isa(convert(LogNormal{Float64}, Float16(0), Float16(1)), LogNormal{Float64}) d = LogNormal(0, 1) @test convert(LogNormal{Float64}, d) === d @test convert(LogNormal{Float32}, d) isa LogNormal{Float32} @@ -16,13 +15,19 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @test iszero(logcdf(LogNormal(0, 0), 1)) @test iszero(logcdf(LogNormal(), Inf)) @test logdiffcdf(LogNormal(), Float32(exp(3)), Float32(exp(3))) === -Inf - @test logdiffcdf(LogNormal(), Float32(exp(5)), Float32(exp(3))) ≈ -6.607938594596893 rtol=1e-12 - @test logdiffcdf(LogNormal(), Float32(exp(5)), Float64(exp(3))) ≈ -6.60793859457367 rtol=1e-12 - @test logdiffcdf(LogNormal(), Float64(exp(5)), Float64(exp(3))) ≈ -6.607938594596893 rtol=1e-12 - let d = LogNormal(Float64(0), Float64(1)), x = Float64(exp(-60)), y = Float64(exp(-60.001)) + @test logdiffcdf(LogNormal(), Float32(exp(5)), Float32(exp(3))) ≈ -6.607938594596893 rtol = + 1e-12 + @test logdiffcdf(LogNormal(), Float32(exp(5)), Float64(exp(3))) ≈ -6.60793859457367 rtol = + 1e-12 + @test logdiffcdf(LogNormal(), Float64(exp(5)), Float64(exp(3))) ≈ -6.607938594596893 rtol = + 1e-12 + let d = LogNormal(Float64(0), Float64(1)), + x = Float64(exp(-60)), + y = Float64(exp(-60.001)) + float_res = logdiffcdf(d, x, y) - big_x = BigFloat(x; precision=100) - big_y = BigFloat(y; precision=100) + big_x = BigFloat(x; precision = 100) + big_y = BigFloat(y; precision = 100) big_float_res = log(cdf(d, big_x) - cdf(d, big_y)) @test float_res ≈ big_float_res end @@ -62,7 +67,8 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T # test for #996 being fixed let d = LogNormal(0, 1), x = exp(1), ∂x = exp(2) - @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) + @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ + ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) end end @@ -263,13 +269,13 @@ end # quantile @test @inferred(quantile(LogNormal(1.0, 0.0), 0.0f0)) === 0.0 @test @inferred(quantile(LogNormal(1.0, 0.0f0), 1.0)) === Inf - @test @inferred(quantile(LogNormal(1.0f0, 0.0), 0.5)) === exp(1) + @test @inferred(quantile(LogNormal(1.0f0, 0.0), 0.5)) === exp(1) @test isnan_type(Float64, @inferred(quantile(LogNormal(1.0f0, 0.0), NaN))) @test @inferred(quantile(LogNormal(1.0f0, 0.0f0), 0.0f0)) === 0.0f0 @test @inferred(quantile(LogNormal(1.0f0, 0.0f0), 1.0f0)) === Inf32 @test @inferred(quantile(LogNormal(1.0f0, 0.0f0), 0.5f0)) === exp(1.0f0) @test isnan_type(Float32, @inferred(quantile(LogNormal(1.0f0, 0.0f0), NaN32))) - @test @inferred(quantile(LogNormal(1//1, 0//1), 1//2)) === exp(1) + @test @inferred(quantile(LogNormal(1 // 1, 0 // 1), 1 // 2)) === exp(1) # cquantile @test @inferred(cquantile(LogNormal(1.0, 0.0), 0.0f0)) === Inf @@ -280,7 +286,7 @@ end @test @inferred(cquantile(LogNormal(1.0f0, 0.0f0), 1.0f0)) === 0.0f0 @test @inferred(cquantile(LogNormal(1.0f0, 0.0f0), 0.5f0)) === exp(1.0f0) @test isnan_type(Float32, @inferred(cquantile(LogNormal(1.0f0, 0.0f0), NaN32))) - @test @inferred(cquantile(LogNormal(1//1, 0//1), 1//2)) === exp(1) + @test @inferred(cquantile(LogNormal(1 // 1, 0 // 1), 1 // 2)) === exp(1) # gradlogpdf @test @inferred(gradlogpdf(LogNormal(0.0, 1.0), 1.0)) === -1.0 @@ -317,14 +323,14 @@ end @testset "LogNormal Sampling Tests" begin for d in [ - LogNormal() - LogNormal(1.0) - LogNormal(0.0, 2.0) - LogNormal(1.0, 2.0) - LogNormal(3.0, 0.5) - LogNormal(3.0, 1.0) - LogNormal(3.0, 2.0) - ] + LogNormal() + LogNormal(1.0) + LogNormal(0.0, 2.0) + LogNormal(1.0, 2.0) + LogNormal(3.0, 0.5) + LogNormal(3.0, 1.0) + LogNormal(3.0, 2.0) + ] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/loguniform.jl b/test/univariate/continuous/loguniform.jl index 3ba88f0216..09c58255a5 100644 --- a/test/univariate/continuous/loguniform.jl +++ b/test/univariate/continuous/loguniform.jl @@ -6,53 +6,53 @@ import Random @testset "LogUniform" begin rng = Random.MersenneTwister(0) - @test pdf(LogUniform(1f0, 2f0), 1) isa Float32 - @test pdf(LogUniform(1, 2), 1f0) isa Float32 + @test pdf(LogUniform(1.0f0, 2.0f0), 1) isa Float32 + @test pdf(LogUniform(1, 2), 1.0f0) isa Float32 @test pdf(LogUniform(1, 2), 1) isa Float64 @test quantile(LogUniform(1, 2), 1) isa Float64 - @test quantile(LogUniform(1, 2), 1f0) isa Float32 + @test quantile(LogUniform(1, 2), 1.0f0) isa Float32 @testset "$f" for f in [pdf, cdf, quantile, logpdf, logcdf] - @test @inferred(f(LogUniform(1,2), 1)) isa Float64 - @test @inferred(f(LogUniform(1,2), 1.0)) isa Float64 - @test @inferred(f(LogUniform(1.0,2), 1.0)) isa Float64 - @test @inferred(f(LogUniform(1.0f0,2), 1)) isa Float32 - @test @inferred(f(LogUniform(1.0f0,2), 1f0)) isa Float32 - @test @inferred(f(LogUniform(1,2), 1f0)) isa Float32 + @test @inferred(f(LogUniform(1, 2), 1)) isa Float64 + @test @inferred(f(LogUniform(1, 2), 1.0)) isa Float64 + @test @inferred(f(LogUniform(1.0, 2), 1.0)) isa Float64 + @test @inferred(f(LogUniform(1.0f0, 2), 1)) isa Float32 + @test @inferred(f(LogUniform(1.0f0, 2), 1.0f0)) isa Float32 + @test @inferred(f(LogUniform(1, 2), 1.0f0)) isa Float32 end - d = LogUniform(1,10) + d = LogUniform(1, 10) @test eltype(d) === Float64 @test 1 <= rand(rng, d) <= 10 @test rand(rng, d) isa eltype(d) - @test @inferred(quantile(d, 0)) ≈ 1 + @test @inferred(quantile(d, 0)) ≈ 1 @test quantile(d, 0.5) ≈ sqrt(10) # geomean - @test quantile(d, 1) ≈ 10 + @test quantile(d, 1) ≈ 10 @test mode(d) ≈ 1 @test !insupport(d, 0) @test @inferred(minimum(d)) === 1 @test @inferred(maximum(d)) === 10 @test partype(d) === Int - @test truncated(d, 2, 14) === LogUniform(2,10) + @test truncated(d, 2, 14) === LogUniform(2, 10) @test truncated(d, 0, 8) === LogUniform(1, 8) - @test truncated(d; upper=8) === LogUniform(1, 8) - @test truncated(d; lower=3) === LogUniform(3, 10) + @test truncated(d; upper = 8) === LogUniform(1, 8) + @test truncated(d; lower = 3) === LogUniform(3, 10) # numbers obtained by calling scipy.stats.loguniform - @test @inferred(std(d) ) ≈ 2.49399867607628 - @test @inferred(mean(d) ) ≈ 3.908650337129266 + @test @inferred(std(d)) ≈ 2.49399867607628 + @test @inferred(mean(d)) ≈ 3.908650337129266 @test @inferred(pdf(d, 1.0001)) ≈ 0.43425105679757203 - @test @inferred(pdf(d, 5 )) ≈ 0.08685889638065035 + @test @inferred(pdf(d, 5)) ≈ 0.08685889638065035 @test @inferred(pdf(d, 9.9999)) ≈ 0.04342988248915007 @test @inferred(cdf(d, 1.0001)) ≈ 4.342727686266485e-05 - @test @inferred(cdf(d, 5 )) ≈ 0.6989700043360187 + @test @inferred(cdf(d, 5)) ≈ 0.6989700043360187 @test @inferred(cdf(d, 9.9999)) ≈ 0.999995657033466 - @test @inferred(median(d) ) ≈ 3.1622776601683795 - @test @inferred(logpdf(d, 5) ) ≈ -2.443470357682056 + @test @inferred(median(d)) ≈ 3.1622776601683795 + @test @inferred(logpdf(d, 5)) ≈ -2.443470357682056 - for _ in 1:10 + for _ = 1:10 lo = rand(rng) - hi = lo + 10*rand(rng) - dist = LogUniform(lo,hi) + hi = lo + 10 * rand(rng) + dist = LogUniform(lo, hi) q = rand(rng) @test cdf(dist, quantile(dist, q)) ≈ q @@ -62,25 +62,26 @@ import Random x = rand(rng, dist) @test cdf(u, log(x)) ≈ cdf(dist, x) - @test @inferred(entropy(dist)) ≈ Distributions.expectation(x->-logpdf(dist,x), dist) + @test @inferred(entropy(dist)) ≈ + Distributions.expectation(x -> -logpdf(dist, x), dist) end - @test kldivergence(LogUniform(1,2), LogUniform(1,2)) ≈ 0 atol=100eps(Float64) - @test isfinite(kldivergence(LogUniform(1,2), LogUniform(1,10))) - @test kldivergence(LogUniform(1.1,10), LogUniform(1,2)) === Inf - @test kldivergence(LogUniform(0.1,10), LogUniform(1,2)) === Inf - @test kldivergence(LogUniform(0.1,1), LogUniform(1,2)) === Inf - @test @inferred(kldivergence(LogUniform(0.1f0,1), LogUniform(1,2))) === Inf32 + @test kldivergence(LogUniform(1, 2), LogUniform(1, 2)) ≈ 0 atol = 100eps(Float64) + @test isfinite(kldivergence(LogUniform(1, 2), LogUniform(1, 10))) + @test kldivergence(LogUniform(1.1, 10), LogUniform(1, 2)) === Inf + @test kldivergence(LogUniform(0.1, 10), LogUniform(1, 2)) === Inf + @test kldivergence(LogUniform(0.1, 1), LogUniform(1, 2)) === Inf + @test @inferred(kldivergence(LogUniform(0.1f0, 1), LogUniform(1, 2))) === Inf32 - for _ in 1:10 - aq = 10*rand(rng) - ap = aq + 10*rand(rng) - bp = ap + 10*rand(rng) - bq = bp + 10*rand(rng) + for _ = 1:10 + aq = 10 * rand(rng) + ap = aq + 10 * rand(rng) + bp = ap + 10 * rand(rng) + bq = bp + 10 * rand(rng) p = LogUniform(ap, bp) q = LogUniform(aq, bq) @test @inferred(kldivergence(p, q)) ≈ - kldivergence(Uniform(log(ap), log(bp)), Uniform(log(aq), log(bq))) + kldivergence(Uniform(log(ap), log(bp)), Uniform(log(aq), log(bq))) end end diff --git a/test/univariate/continuous/noncentralchisq.jl b/test/univariate/continuous/noncentralchisq.jl index b73d4e0426..1db62d7b96 100644 --- a/test/univariate/continuous/noncentralchisq.jl +++ b/test/univariate/continuous/noncentralchisq.jl @@ -1 +1 @@ -test_cgf(NoncentralChisq(3,2), (0.49, -1, -100, -1f6)) +test_cgf(NoncentralChisq(3, 2), (0.49, -1, -100, -1.0f6)) diff --git a/test/univariate/continuous/noncentralt.jl b/test/univariate/continuous/noncentralt.jl index 9fbf4d6395..3954f686fc 100644 --- a/test/univariate/continuous/noncentralt.jl +++ b/test/univariate/continuous/noncentralt.jl @@ -14,7 +14,7 @@ using Test λ = 5 d = NoncentralT(ν, λ) - @test mean(d) ≈ 5.0766 atol=1e-5 + @test mean(d) ≈ 5.0766 atol = 1e-5 λ = 0 d = NoncentralT(ν, λ) diff --git a/test/univariate/continuous/normal.jl b/test/univariate/continuous/normal.jl index 0378e05486..1d83264b24 100644 --- a/test/univariate/continuous/normal.jl +++ b/test/univariate/continuous/normal.jl @@ -3,10 +3,9 @@ using Test, Distributions, StatsFuns, ForwardDiff, OffsetArrays isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @testset "Normal" begin - test_cgf(Normal(0,1 ), (1, -1, 100f0, 1e6, -1e6)) - test_cgf(Normal(1,0.4), (1, -1, 100f0, 1e6, -1e6)) - @test isa(convert(Normal{Float64}, Float16(0), Float16(1)), - Normal{Float64}) + test_cgf(Normal(0, 1), (1, -1, 100.0f0, 1e6, -1e6)) + test_cgf(Normal(1, 0.4), (1, -1, 100.0f0, 1e6, -1e6)) + @test isa(convert(Normal{Float64}, Float16(0), Float16(1)), Normal{Float64}) d = Normal(1.1, 2.3) @test convert(Normal{Float64}, d) === d d32 = convert(Normal{Float32}, d) @@ -17,13 +16,14 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @test -Inf === logpdf(Normal(), Inf) @test iszero(logcdf(Normal(0, 0), 0)) @test iszero(logcdf(Normal(), Inf)) - @test @inferred(logdiffcdf(Normal(), 5f0, 3f0)) ≈ -6.607938594596893 rtol=1e-12 - @test @inferred(logdiffcdf(Normal(), 5f0, 3.0)) ≈ -6.607938594596893 rtol=1e-12 - @test @inferred(logdiffcdf(Normal(), 5.0, 3.0)) ≈ -6.607938594596893 rtol=1e-12 + @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0f0)) ≈ -6.607938594596893 rtol = 1e-12 + @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0)) ≈ -6.607938594596893 rtol = 1e-12 + @test @inferred(logdiffcdf(Normal(), 5.0, 3.0)) ≈ -6.607938594596893 rtol = 1e-12 @test_throws ArgumentError logdiffcdf(Normal(), 3, 5) # Arguments in the tails - logdiffcdf_big(d::Normal, x::Real, y::Real) = logsubexp(logcdf(d, big(y)), logcdf(d, big(x))) + logdiffcdf_big(d::Normal, x::Real, y::Real) = + logsubexp(logcdf(d, big(y)), logcdf(d, big(x))) for d in (Normal(), Normal(2.1, 0.1)), (a, b) in ((15, 10), (115, 100), (1015, 1000)) for (x, y) in ((a, b), (-b, -a)) @test isfinite(@inferred(logdiffcdf(d, x, y))) @@ -32,8 +32,8 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T end let d = Normal(Float64(0), Float64(1)), x = Float64(-60), y = Float64(-60.001) float_res = logdiffcdf(d, x, y) - big_x = BigFloat(x; precision=100) - big_y = BigFloat(y; precision=100) + big_x = BigFloat(x; precision = 100) + big_y = BigFloat(y; precision = 100) big_float_res = log(cdf(d, big_x) - cdf(d, big_y)) @test float_res ≈ big_float_res end @@ -64,124 +64,125 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @test isnan_type(Float64, invlogccdf(Normal(), NaN)) # test for #996 being fixed let d = Normal(0, 1), x = 1.0, ∂x = 2.0 - @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) + @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ + ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) end end @testset "Normal logpdf & pdf type inference" begin - @test @inferred(pdf(Normal(0.0, 0.0), 0.0)) === Inf - @test @inferred(pdf(Normal(0.0, 0.0), -1.0)) === 0.0 - @test @inferred(pdf(Normal(0.0, 0.0), 0.0f0)) === Inf + @test @inferred(pdf(Normal(0.0, 0.0), 0.0)) === Inf + @test @inferred(pdf(Normal(0.0, 0.0), -1.0)) === 0.0 + @test @inferred(pdf(Normal(0.0, 0.0), 0.0f0)) === Inf @test isnan_type(Float64, @inferred(pdf(Normal(0.0, 0.0), NaN))) - @test @inferred(pdf(Normal(0.0f0, 0.0f0), 0.0)) === Inf - @test @inferred(pdf(Normal(0.0f0, 0.0f0), 0.0f0)) === Inf32 + @test @inferred(pdf(Normal(0.0f0, 0.0f0), 0.0)) === Inf + @test @inferred(pdf(Normal(0.0f0, 0.0f0), 0.0f0)) === Inf32 @test isnan_type(Float64, @inferred(pdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(pdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(pdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(pdf(Normal(0 // 1, 0 // 1), 0 // 1)) === Inf + @test @inferred(pdf(Normal(0 // 1, 0 // 1), 0 // 1)) === Inf @test isnan_type(Float64, @inferred(pdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(pdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) - @test @inferred(pdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) + @test @inferred(pdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) + @test @inferred(pdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) @test isnan_type(BigFloat, @inferred(pdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(logpdf(Normal(0.0, 0.0), 0.0)) === Inf - @test @inferred(logpdf(Normal(0.0, 0.0), -1.0)) === -Inf - @test @inferred(logpdf(Normal(0.0, 0.0), 0.0f0)) === Inf + @test @inferred(logpdf(Normal(0.0, 0.0), 0.0)) === Inf + @test @inferred(logpdf(Normal(0.0, 0.0), -1.0)) === -Inf + @test @inferred(logpdf(Normal(0.0, 0.0), 0.0f0)) === Inf @test isnan_type(Float64, @inferred(logpdf(Normal(0.0, 0.0), NaN))) - @test @inferred(logpdf(Normal(0.0f0, 0.0f0), 0.0)) === Inf - @test @inferred(logpdf(Normal(0.0f0, 0.0f0), 0.0f0)) === Inf32 + @test @inferred(logpdf(Normal(0.0f0, 0.0f0), 0.0)) === Inf + @test @inferred(logpdf(Normal(0.0f0, 0.0f0), 0.0f0)) === Inf32 @test isnan_type(Float64, @inferred(logpdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(logpdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(logpdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(logpdf(Normal(0 // 1, 0 // 1), 0 // 1)) === Inf + @test @inferred(logpdf(Normal(0 // 1, 0 // 1), 0 // 1)) === Inf @test isnan_type(Float64, @inferred(logpdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(logpdf(Normal(0.0, 0.0), BigInt(1))) == big(-Inf) - @test @inferred(logpdf(Normal(0.0, 0.0), BigFloat(1))) == big(-Inf) + @test @inferred(logpdf(Normal(0.0, 0.0), BigInt(1))) == big(-Inf) + @test @inferred(logpdf(Normal(0.0, 0.0), BigFloat(1))) == big(-Inf) @test isnan_type(BigFloat, @inferred(logpdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(cdf(Normal(0.0, 0.0), 0.0)) === 1.0 - @test @inferred(cdf(Normal(0.0, 0.0), -1.0)) === 0.0 - @test @inferred(cdf(Normal(0.0, 0.0), 0.0f0)) === 1.0 + @test @inferred(cdf(Normal(0.0, 0.0), 0.0)) === 1.0 + @test @inferred(cdf(Normal(0.0, 0.0), -1.0)) === 0.0 + @test @inferred(cdf(Normal(0.0, 0.0), 0.0f0)) === 1.0 @test isnan_type(Float64, @inferred(cdf(Normal(0.0, 0.0), NaN))) - @test @inferred(cdf(Normal(0.0f0, 0.0f0), 0.0)) === 1.0 - @test @inferred(cdf(Normal(0.0f0, 0.0f0), 0.0f0)) === 1.0f0 + @test @inferred(cdf(Normal(0.0f0, 0.0f0), 0.0)) === 1.0 + @test @inferred(cdf(Normal(0.0f0, 0.0f0), 0.0f0)) === 1.0f0 @test isnan_type(Float64, @inferred(cdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(cdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(cdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(cdf(Normal(0 // 1, 0 // 1), 0 // 1)) === 1.0 + @test @inferred(cdf(Normal(0 // 1, 0 // 1), 0 // 1)) === 1.0 @test isnan_type(Float64, @inferred(cdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(cdf(Normal(0.0, 0.0), BigInt(1))) == big(1.0) - @test @inferred(cdf(Normal(0.0, 0.0), BigFloat(1))) == big(1.0) + @test @inferred(cdf(Normal(0.0, 0.0), BigInt(1))) == big(1.0) + @test @inferred(cdf(Normal(0.0, 0.0), BigFloat(1))) == big(1.0) @test isnan_type(BigFloat, @inferred(cdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(logcdf(Normal(0.0, 0.0), 0.0)) === -0.0 - @test @inferred(logcdf(Normal(0.0, 0.0), -1.0)) === -Inf - @test @inferred(logcdf(Normal(0.0, 0.0), 0.0f0)) === -0.0 + @test @inferred(logcdf(Normal(0.0, 0.0), 0.0)) === -0.0 + @test @inferred(logcdf(Normal(0.0, 0.0), -1.0)) === -Inf + @test @inferred(logcdf(Normal(0.0, 0.0), 0.0f0)) === -0.0 @test isnan_type(Float64, @inferred(logcdf(Normal(0.0, 0.0), NaN))) - @test @inferred(logcdf(Normal(0.0f0, 0.0f0), 0.0)) === -0.0 - @test @inferred(logcdf(Normal(0.0f0, 0.0f0), 0.0f0)) === -0.0f0 + @test @inferred(logcdf(Normal(0.0f0, 0.0f0), 0.0)) === -0.0 + @test @inferred(logcdf(Normal(0.0f0, 0.0f0), 0.0f0)) === -0.0f0 @test isnan_type(Float64, @inferred(logcdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(logcdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(logcdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(logcdf(Normal(0 // 1, 0 // 1), 0 // 1)) === -0.0 + @test @inferred(logcdf(Normal(0 // 1, 0 // 1), 0 // 1)) === -0.0 @test isnan_type(Float64, @inferred(logcdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(logcdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) - @test @inferred(logcdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) + @test @inferred(logcdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) + @test @inferred(logcdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) @test isnan_type(BigFloat, @inferred(logcdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(ccdf(Normal(0.0, 0.0), 0.0)) === 0.0 - @test @inferred(ccdf(Normal(0.0, 0.0), -1.0)) === 1.0 - @test @inferred(ccdf(Normal(0.0, 0.0), 0.0f0)) === 0.0 + @test @inferred(ccdf(Normal(0.0, 0.0), 0.0)) === 0.0 + @test @inferred(ccdf(Normal(0.0, 0.0), -1.0)) === 1.0 + @test @inferred(ccdf(Normal(0.0, 0.0), 0.0f0)) === 0.0 @test isnan_type(Float64, @inferred(ccdf(Normal(0.0, 0.0), NaN))) - @test @inferred(ccdf(Normal(0.0f0, 0.0f0), 0.0)) === 0.0 - @test @inferred(ccdf(Normal(0.0f0, 0.0f0), 0.0f0)) === 0.0f0 + @test @inferred(ccdf(Normal(0.0f0, 0.0f0), 0.0)) === 0.0 + @test @inferred(ccdf(Normal(0.0f0, 0.0f0), 0.0f0)) === 0.0f0 @test isnan_type(Float64, @inferred(ccdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(ccdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(ccdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(ccdf(Normal(0 // 1, 0 // 1), 0 // 1)) === 0.0 + @test @inferred(ccdf(Normal(0 // 1, 0 // 1), 0 // 1)) === 0.0 @test isnan_type(Float64, @inferred(ccdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(ccdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) - @test @inferred(ccdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) + @test @inferred(ccdf(Normal(0.0, 0.0), BigInt(1))) == big(0.0) + @test @inferred(ccdf(Normal(0.0, 0.0), BigFloat(1))) == big(0.0) @test isnan_type(BigFloat, @inferred(ccdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(logccdf(Normal(0.0, 0.0), 0.0)) === -Inf - @test @inferred(logccdf(Normal(0.0, 0.0), -1.0)) === -0.0 - @test @inferred(logccdf(Normal(0.0, 0.0), 0.0f0)) === -Inf + @test @inferred(logccdf(Normal(0.0, 0.0), 0.0)) === -Inf + @test @inferred(logccdf(Normal(0.0, 0.0), -1.0)) === -0.0 + @test @inferred(logccdf(Normal(0.0, 0.0), 0.0f0)) === -Inf @test isnan_type(Float64, @inferred(logccdf(Normal(0.0, 0.0), NaN))) - @test @inferred(logccdf(Normal(0.0f0, 0.0f0), 0.0)) === -Inf - @test @inferred(logccdf(Normal(0.0f0, 0.0f0), 0.0f0)) === -Inf32 + @test @inferred(logccdf(Normal(0.0f0, 0.0f0), 0.0)) === -Inf + @test @inferred(logccdf(Normal(0.0f0, 0.0f0), 0.0f0)) === -Inf32 @test isnan_type(Float64, @inferred(logccdf(Normal(0.0, 0.0), NaN))) @test isnan_type(Float64, @inferred(logccdf(Normal(NaN, 0.0), 0.0f0))) @test isnan_type(Float32, @inferred(logccdf(Normal(NaN32, 0.0f0), 0.0f0))) - @test @inferred(logccdf(Normal(0 // 1, 0 // 1), 0 // 1)) === -Inf + @test @inferred(logccdf(Normal(0 // 1, 0 // 1), 0 // 1)) === -Inf @test isnan_type(Float64, @inferred(logccdf(Normal(0 // 1, 0 // 1), NaN))) - @test @inferred(logccdf(Normal(0.0, 0.0), BigInt(1))) == big(-Inf) - @test @inferred(logccdf(Normal(0.0, 0.0), BigFloat(1))) == big(-Inf) + @test @inferred(logccdf(Normal(0.0, 0.0), BigInt(1))) == big(-Inf) + @test @inferred(logccdf(Normal(0.0, 0.0), BigFloat(1))) == big(-Inf) @test isnan_type(BigFloat, @inferred(logccdf(Normal(0.0, 0.0), BigFloat(NaN)))) - @test @inferred(quantile(Normal(1.0, 0.0), 0.0f0)) === -Inf - @test @inferred(quantile(Normal(1.0, 0.0f0), 1.0)) === Inf - @test @inferred(quantile(Normal(1.0f0, 0.0), 0.5)) === 1.0 + @test @inferred(quantile(Normal(1.0, 0.0), 0.0f0)) === -Inf + @test @inferred(quantile(Normal(1.0, 0.0f0), 1.0)) === Inf + @test @inferred(quantile(Normal(1.0f0, 0.0), 0.5)) === 1.0 @test isnan_type(Float64, @inferred(quantile(Normal(1.0f0, 0.0), NaN))) @test @inferred(quantile(Normal(1.0f0, 0.0f0), 0.0f0)) === -Inf32 - @test @inferred(quantile(Normal(1.0f0, 0.0f0), 1.0f0)) === Inf32 - @test @inferred(quantile(Normal(1.0f0, 0.0f0), 0.5f0)) === 1.0f0 + @test @inferred(quantile(Normal(1.0f0, 0.0f0), 1.0f0)) === Inf32 + @test @inferred(quantile(Normal(1.0f0, 0.0f0), 0.5f0)) === 1.0f0 @test isnan_type(Float32, @inferred(quantile(Normal(1.0f0, 0.0f0), NaN32))) - @test @inferred(quantile(Normal(1//1, 0//1), 1//2)) === 1.0 - @test @inferred(quantile(Normal(1f0, 0f0), 1//2)) === 1f0 - @test @inferred(quantile(Normal(1f0, 0.0), 1//2)) === 1.0 + @test @inferred(quantile(Normal(1 // 1, 0 // 1), 1 // 2)) === 1.0 + @test @inferred(quantile(Normal(1.0f0, 0.0f0), 1 // 2)) === 1.0f0 + @test @inferred(quantile(Normal(1.0f0, 0.0), 1 // 2)) === 1.0 - @test @inferred(cquantile(Normal(1.0, 0.0), 0.0f0)) === Inf - @test @inferred(cquantile(Normal(1.0, 0.0f0), 1.0)) === -Inf - @test @inferred(cquantile(Normal(1.0f0, 0.0), 0.5)) === 1.0 + @test @inferred(cquantile(Normal(1.0, 0.0), 0.0f0)) === Inf + @test @inferred(cquantile(Normal(1.0, 0.0f0), 1.0)) === -Inf + @test @inferred(cquantile(Normal(1.0f0, 0.0), 0.5)) === 1.0 @test isnan_type(Float64, @inferred(cquantile(Normal(1.0f0, 0.0), NaN))) - @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 0.0f0)) === Inf32 + @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 0.0f0)) === Inf32 @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 1.0f0)) === -Inf32 - @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 0.5f0)) === 1.0f0 + @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 0.5f0)) === 1.0f0 @test isnan_type(Float32, @inferred(cquantile(Normal(1.0f0, 0.0f0), NaN32))) - @test @inferred(cquantile(Normal(1//1, 0//1), 1//2)) === 1.0 - @test @inferred(cquantile(Normal(1f0, 0f0), 1//2)) === 1f0 - @test @inferred(cquantile(Normal(1f0, 0.0), 1//2)) === 1.0 + @test @inferred(cquantile(Normal(1 // 1, 0 // 1), 1 // 2)) === 1.0 + @test @inferred(cquantile(Normal(1.0f0, 0.0f0), 1 // 2)) === 1.0f0 + @test @inferred(cquantile(Normal(1.0f0, 0.0), 1 // 2)) === 1.0 end @testset "Normal: Sampling with integer-valued parameters" begin @@ -195,7 +196,8 @@ end @test canonform(Normal()) == NormalCanon() @test meanform(NormalCanon()) == Normal() @test meanform(canonform(Normal(0.25, 0.7))) ≈ Normal(0.25, 0.7) - @test convert(NormalCanon, convert(Normal, NormalCanon(0.3, 0.8))) ≈ NormalCanon(0.3, 0.8) + @test convert(NormalCanon, convert(Normal, NormalCanon(0.3, 0.8))) ≈ + NormalCanon(0.3, 0.8) @test mean(canonform(Normal(0.25, 0.7))) ≈ 0.25 @test std(canonform(Normal(0.25, 0.7))) ≈ 0.7 end diff --git a/test/univariate/continuous/pareto.jl b/test/univariate/continuous/pareto.jl index 195e34a96e..c86292c550 100644 --- a/test/univariate/continuous/pareto.jl +++ b/test/univariate/continuous/pareto.jl @@ -1,10 +1,10 @@ @testset "Pareto Sampling Tests" begin for d in [ - Pareto() - Pareto(2.0) - Pareto(2.0, 1.5) - Pareto(3.0, 2.0) - ] + Pareto() + Pareto(2.0) + Pareto(2.0, 1.5) + Pareto(3.0, 2.0) + ] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/pgeneralizedgaussian.jl b/test/univariate/continuous/pgeneralizedgaussian.jl index a80989613d..33a89ac339 100644 --- a/test/univariate/continuous/pgeneralizedgaussian.jl +++ b/test/univariate/continuous/pgeneralizedgaussian.jl @@ -9,18 +9,18 @@ using Test μ = randn() for p in (-0.2, 0) @test_throws DomainError PGeneralizedGaussian(p) - PGeneralizedGaussian(p; check_args=false) + PGeneralizedGaussian(p; check_args = false) @test_throws DomainError PGeneralizedGaussian(μ, 1.0, p) - PGeneralizedGaussian(μ, 1.0, p; check_args=false) + PGeneralizedGaussian(μ, 1.0, p; check_args = false) end for α in (-1.2, 0) @test_throws DomainError PGeneralizedGaussian(μ, α, 1.0) - PGeneralizedGaussian(μ, α, 1.0; check_args=false) + PGeneralizedGaussian(μ, α, 1.0; check_args = false) for p in (-0.2, 0) @test_throws DomainError PGeneralizedGaussian(μ, α, p) - PGeneralizedGaussian(μ, α, p; check_args=false) + PGeneralizedGaussian(μ, α, p; check_args = false) end end diff --git a/test/univariate/continuous/rician.jl b/test/univariate/continuous/rician.jl index a75397f891..b46d53cbd9 100644 --- a/test/univariate/continuous/rician.jl +++ b/test/univariate/continuous/rician.jl @@ -14,7 +14,8 @@ @test var(d1) ≈ var(d2) @test mode(d1) ≈ mode(d2) @test median(d1) ≈ median(d2) - @test Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) ≈ Base.Fix1(quantile, d2).([0.25, 0.45, 0.60, 0.80, 0.90]) + @test Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) ≈ + Base.Fix1(quantile, d2).([0.25, 0.45, 0.60, 0.80, 0.90]) @test Base.Fix1(pdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(pdf, d2).(0.0:0.1:1.0) @test Base.Fix1(cdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(cdf, d2).(0.0:0.1:1.0) @@ -26,8 +27,8 @@ x = rand(Rician(5.0, 5.0), 100000) d1 = fit(Rician, x) @test d1 isa Rician{Float64} - @test params(d1)[1] ≈ 5.0 atol=0.2 - @test params(d1)[2] ≈ 5.0 atol=0.2 + @test params(d1)[1] ≈ 5.0 atol = 0.2 + @test params(d1)[2] ≈ 5.0 atol = 0.2 d1 = Rician(10.0f0, 10.0f0) @test d1 isa Rician{Float32} @@ -67,7 +68,7 @@ @inferred pdf(d1, 1.0) @inferred pdf(d1, 1.0f0) @inferred pdf(d1, 1) - @inferred pdf(d1, 1//2) + @inferred pdf(d1, 1 // 2) @inferred pdf(d1, Inf) @inferred logpdf(d1, -Inf32) @@ -75,7 +76,7 @@ @inferred logpdf(d1, 1.0) @inferred logpdf(d1, 1.0f0) @inferred logpdf(d1, 1) - @inferred logpdf(d1, 1//2) + @inferred logpdf(d1, 1 // 2) @inferred logpdf(d1, Inf) @inferred cdf(d1, -Inf32) @@ -83,7 +84,7 @@ @inferred cdf(d1, 1.0) @inferred cdf(d1, 1.0f0) @inferred cdf(d1, 1) - @inferred cdf(d1, 1//2) + @inferred cdf(d1, 1 // 2) @inferred cdf(d1, Inf) @inferred logcdf(d1, -Inf32) @@ -91,7 +92,7 @@ @inferred logcdf(d1, 1.0) @inferred logcdf(d1, 1.0f0) @inferred logcdf(d1, 1) - @inferred logcdf(d1, 1//2) + @inferred logcdf(d1, 1 // 2) @inferred logcdf(d1, Inf) end diff --git a/test/univariate/continuous/semicircle.jl b/test/univariate/continuous/semicircle.jl index 7079aa9490..aadfb051ba 100644 --- a/test/univariate/continuous/semicircle.jl +++ b/test/univariate/continuous/semicircle.jl @@ -10,37 +10,37 @@ d = Semicircle(2.0) @test maximum(d) == +2.0 @test extrema(d) == (-2.0, 2.0) -@test mean(d) == .0 -@test var(d) == 1.0 -@test skewness(d) == .0 -@test median(d) == .0 -@test mode(d) == .0 -@test entropy(d) == 1.33787706640934544458 +@test mean(d) == 0.0 +@test var(d) == 1.0 +@test skewness(d) == 0.0 +@test median(d) == 0.0 +@test mode(d) == 0.0 +@test entropy(d) == 1.33787706640934544458 -@test pdf(d, -5) == .0 -@test pdf(d, -2) == .0 -@test pdf(d, 0) == .31830988618379067154 -@test pdf(d, +2) == .0 -@test pdf(d, +5) == .0 +@test pdf(d, -5) == 0.0 +@test pdf(d, -2) == 0.0 +@test pdf(d, 0) == 0.31830988618379067154 +@test pdf(d, +2) == 0.0 +@test pdf(d, +5) == 0.0 @test logpdf(d, -5) == -Inf @test logpdf(d, -2) == -Inf -@test logpdf(d, 0) ≈ log(.31830988618379067154) +@test logpdf(d, 0) ≈ log(0.31830988618379067154) @test logpdf(d, +2) == -Inf @test logpdf(d, +5) == -Inf -@test cdf(d, -5) == .0 -@test cdf(d, -2) == .0 -@test cdf(d, 0) == .5 +@test cdf(d, -5) == 0.0 +@test cdf(d, -2) == 0.0 +@test cdf(d, 0) == 0.5 @test cdf(d, +2) == 1.0 @test cdf(d, +5) == 1.0 -@test quantile(d, .0) == -2.0 -@test quantile(d, .5) == .0 +@test quantile(d, 0.0) == -2.0 +@test quantile(d, 0.5) == 0.0 @test quantile(d, 1.0) == +2.0 rng = StableRNG(123) -@testset for r in rand(rng, Uniform(0,10), 5) +@testset for r in rand(rng, Uniform(0, 10), 5) N = 10^4 semi = Semicircle(r) sample = rand(rng, semi, N) @@ -60,7 +60,7 @@ rng = StableRNG(123) @test lo < cdf(semi, ma) < hi # central limit theorem - dmean = Normal(mean(semi), std(semi)/√(N)) + dmean = Normal(mean(semi), std(semi) / √(N)) @test quantile(dmean, 0.01) < mean(sample) < quantile(dmean, 0.99) pvalue = pvalue_kolmogorovsmirnoff(sample, semi) diff --git a/test/univariate/continuous/skewedexponentialpower.jl b/test/univariate/continuous/skewedexponentialpower.jl index 5eea7d81cc..7c3b5475b6 100644 --- a/test/univariate/continuous/skewedexponentialpower.jl +++ b/test/univariate/continuous/skewedexponentialpower.jl @@ -10,7 +10,7 @@ using Test @test @inferred partype(d1) == Float32 d2 = SkewedExponentialPower(0, 1, 1, 0.5) @test @inferred partype(d2) == Float64 - @test @inferred params(d2) == (0., 1., 1., 0.5) + @test @inferred params(d2) == (0.0, 1.0, 1.0, 0.5) @test @inferred cdf(d2, Inf) == 1 @test @inferred cdf(d2, -Inf) == 0 @test @inferred quantile(d2, 1) == Inf @@ -32,7 +32,7 @@ using Test # where the variance is reparametrized as σₚ = p^(1/p)σ to ensure equal pdfs p = 1.2 d = SkewedExponentialPower(0, 1, p, 0.5) - de = PGeneralizedGaussian(0, p^(1/p), p) + de = PGeneralizedGaussian(0, p^(1 / p), p) @test @inferred mean(d) ≈ mean(de) @test @inferred var(d) ≈ var(de) @test @inferred skewness(d) ≈ skewness(de) @@ -52,7 +52,7 @@ using Test @test @inferred mean(d) ≈ mean(dn) @test @inferred var(d) ≈ var(dn) @test @inferred skewness(d) ≈ skewness(dn) - @test @inferred isapprox(kurtosis(d), kurtosis(dn), atol = 1/10^10) + @test @inferred isapprox(kurtosis(d), kurtosis(dn), atol = 1 / 10^10) @test @inferred pdf(d, 0.5) ≈ pdf(dn, 0.5) @test @inferred cdf(d, 0.5) ≈ cdf(dn, 0.5) @test @inferred quantile(d, 0.5) ≈ quantile(dn, 0.5) @@ -63,34 +63,34 @@ using Test # exponential power function in R from package # VaRES. Values are set to μ = 0, σ = 1, p = 0.5, α = 0.7 test = [ - -10.0000000 0.004770878 0.02119061; - -9.2631579 0.005831228 0.02508110; - -8.5263158 0.007185598 0.02985598; - -7.7894737 0.008936705 0.03576749; - -7.0526316 0.011232802 0.04315901; - -6.3157895 0.014293449 0.05250781; - -5.5789474 0.018454000 0.06449163; - -4.8421053 0.024246394 0.08010122; - -4.1052632 0.032555467 0.10083623; - -3.3684211 0.044947194 0.12906978; - -2.6315789 0.064438597 0.16879238; - -1.8947368 0.097617334 0.22732053; - -1.1578947 0.162209719 0.32007314; - -0.4210526 0.333932302 0.49013645; - 0.3157895 0.234346966 0.82757893; - 1.0526316 0.070717323 0.92258764; - 1.7894737 0.031620241 0.95773428; - 2.5263158 0.016507946 0.97472202; - 3.2631579 0.009427166 0.98399211; - 4.0000000 0.005718906 0.98942757; + -10.0000000 0.004770878 0.02119061 + -9.2631579 0.005831228 0.02508110 + -8.5263158 0.007185598 0.02985598 + -7.7894737 0.008936705 0.03576749 + -7.0526316 0.011232802 0.04315901 + -6.3157895 0.014293449 0.05250781 + -5.5789474 0.018454000 0.06449163 + -4.8421053 0.024246394 0.08010122 + -4.1052632 0.032555467 0.10083623 + -3.3684211 0.044947194 0.12906978 + -2.6315789 0.064438597 0.16879238 + -1.8947368 0.097617334 0.22732053 + -1.1578947 0.162209719 0.32007314 + -0.4210526 0.333932302 0.49013645 + 0.3157895 0.234346966 0.82757893 + 1.0526316 0.070717323 0.92258764 + 1.7894737 0.031620241 0.95773428 + 2.5263158 0.016507946 0.97472202 + 3.2631579 0.009427166 0.98399211 + 4.0000000 0.005718906 0.98942757 ] d = SkewedExponentialPower(0, 1, 0.5, 0.7) for t in eachrow(test) - @test @inferred(pdf(d, t[1])) ≈ t[2] rtol=1e-5 - @test @inferred(logpdf(d, t[1])) ≈ log(t[2]) rtol=1e-5 - @test @inferred(cdf(d, t[1])) ≈ t[3] rtol=1e-3 - @test @inferred(logcdf(d, t[1])) ≈ log(t[3]) rtol=1e-3 + @test @inferred(pdf(d, t[1])) ≈ t[2] rtol = 1e-5 + @test @inferred(logpdf(d, t[1])) ≈ log(t[2]) rtol = 1e-5 + @test @inferred(cdf(d, t[1])) ≈ t[3] rtol = 1e-3 + @test @inferred(logcdf(d, t[1])) ≈ log(t[3]) rtol = 1e-3 end test_distr(d, 10^6) @@ -106,7 +106,11 @@ using Test α, p = rand(2) d = SkewedExponentialPower(0, 1, p, α) # moments of the standard SEPD, Equation 18 in Zhy, D. and V. Zinde-Walsh (2009) - moments = [(2*p^(1/p))^k * ((-1)^k*α^(1+k) + (1-α)^(1+k)) * gamma((1+k)/p)/gamma(1/p) for k ∈ 1:4] + moments = [ + (2 * p^(1 / p))^k * + ((-1)^k * α^(1 + k) + (1 - α)^(1 + k)) * + gamma((1 + k) / p) / gamma(1 / p) for k ∈ 1:4 + ] @inferred var(d) ≈ moments[2] - moments[1]^2 @inferred skewness(d) ≈ moments[3] / (√(moments[2] - moments[1]^2))^3 diff --git a/test/univariate/continuous/skewnormal.jl b/test/univariate/continuous/skewnormal.jl index 5d1257162c..98077c76e6 100644 --- a/test/univariate/continuous/skewnormal.jl +++ b/test/univariate/continuous/skewnormal.jl @@ -12,7 +12,7 @@ import Distributions: normpdf, normcdf, normlogpdf, normlogcdf # Azzalini sn: sprintf("%.17f",dsn(3.3, xi=1, omega=2, alpha=3)) @test pdf(d1, 3.3) ≈ 0.20587854616839998 @test minimum(d1) ≈ -Inf - @test maximum(d1) ≈ Inf + @test maximum(d1) ≈ Inf @test logpdf(d1, 3.3) ≈ log(pdf(d1, 3.3)) ## cdf and quantile: when we get Owen's T #@test cdf(d, 4.5) ≈ 1.0 #when we get Owen's T diff --git a/test/univariate/continuous/triangular.jl b/test/univariate/continuous/triangular.jl index 213c7fc1c4..0009f2a07f 100644 --- a/test/univariate/continuous/triangular.jl +++ b/test/univariate/continuous/triangular.jl @@ -56,9 +56,10 @@ using Test @test params(d) == (a, b, c) @test mode(d) == c @test mean(d) == (a + b + c) / 3 - @test median(d) == (c >= middle(a, b) ? - a + sqrt((b - a) * (c - a) / 2) : - b - sqrt((b - a) * (b - c) / 2)) + @test median(d) == ( + c >= middle(a, b) ? a + sqrt((b - a) * (c - a) / 2) : + b - sqrt((b - a) * (b - c) / 2) + ) @test var(d) == (a^2 + b^2 + c^2 - a * b - a * c - b * c) / 18 @test kurtosis(d) == -3 / 5 @@ -109,10 +110,10 @@ using Test @test mgf(d, 0) == 1 @test fdm(Base.Fix1(mgf, d), 0.0) ≈ mean(d) - @test fdm2(Base.Fix1(mgf, d), 0.0) ≈ mean(d)^2 + var(d) rtol=1e-6 + @test fdm2(Base.Fix1(mgf, d), 0.0) ≈ mean(d)^2 + var(d) rtol = 1e-6 @test cf(d, 0) == 1 @test fdm(Base.Fix1(cf, d), 0.0) ≈ mean(d) * im - @test fdm2(Base.Fix1(cf, d), 0.0) ≈ -(mean(d)^2 + var(d)) rtol=1e-6 + @test fdm2(Base.Fix1(cf, d), 0.0) ≈ -(mean(d)^2 + var(d)) rtol = 1e-6 end end end diff --git a/test/univariate/continuous/uniform.jl b/test/univariate/continuous/uniform.jl index e3a5d729ed..9f61393193 100644 --- a/test/univariate/continuous/uniform.jl +++ b/test/univariate/continuous/uniform.jl @@ -9,13 +9,13 @@ using Test @testset "uniform.jl" begin # affine transformations test_affine_transformations(Uniform, rand(), 4 + rand()) - test_cgf(Uniform(0,1), (1, -1, 100f0, 1e6, -1e6)) - test_cgf(Uniform(100f0,101f0), (1, -1, 100f0, 1e6, -1e6)) + test_cgf(Uniform(0, 1), (1, -1, 100.0f0, 1e6, -1e6)) + test_cgf(Uniform(100.0f0, 101.0f0), (1, -1, 100.0f0, 1e6, -1e6)) @testset "ChainRules" begin # run test suite for values in the support - dist = Uniform(- 1 - rand(), 1 + rand()) - tangent = ChainRulesTestUtils.Tangent{Uniform{Float64}}(; a=randn(), b=randn()) + dist = Uniform(-1 - rand(), 1 + rand()) + tangent = ChainRulesTestUtils.Tangent{Uniform{Float64}}(; a = randn(), b = randn()) for x in (rand(), -rand()) test_frule(logpdf, dist ⊢ tangent, x) test_rrule(logpdf, dist ⊢ tangent, x) @@ -38,7 +38,7 @@ using Test @test Ω == -Inf @test @inferred(pullback(randn())) == ( ChainRulesTestUtils.NoTangent(), - ChainRulesTestUtils.Tangent{Uniform{Float64}}(; a=0.0, b=0.0), + ChainRulesTestUtils.Tangent{Uniform{Float64}}(; a = 0.0, b = 0.0), ChainRulesTestUtils.ZeroTangent(), ) end @@ -55,28 +55,32 @@ using Test ((Float16(0), Float16(1), sqrt(eps(Float16)))), ((Float16(0), Float16(1), Float16(0))), ((Float16(0), Float16(1), -sqrt(eps(Float16)))), - (0f0, 1f0, sqrt(eps(Float32))), - (0f0, 1f0, 0f0), - (0f0, 1f0, -sqrt(eps(Float32))), - (-2f0, 1f0, 1f-30), - (-2f-4, -1f-4, -2f-40), + (0.0f0, 1.0f0, sqrt(eps(Float32))), + (0.0f0, 1.0f0, 0.0f0), + (0.0f0, 1.0f0, -sqrt(eps(Float32))), + (-2.0f0, 1.0f0, 1.0f-30), + (-2.0f-4, -1.0f-4, -2.0f-40), (0.0, 1.0, sqrt(eps(Float64))), (0.0, 1.0, 0.0), (0.0, 1.0, -sqrt(eps(Float64))), (-2.0, 5.0, -1e-35), - ] + ] T = typeof(lo) @assert T == typeof(lo) == typeof(hi) == typeof(t) @assert t <= sqrt(eps(T)) d = Uniform(lo, hi) precision = 512 - d_big = Uniform(BigFloat(lo, precision=precision), BigFloat(hi; precision=precision)) - t_big = BigFloat(t, precision=precision) + d_big = Uniform( + BigFloat(lo, precision = precision), + BigFloat(hi; precision = precision), + ) + t_big = BigFloat(t, precision = precision) @test cgf(d, t) isa T if iszero(t) - @test cgf(d,t) === zero(t) + @test cgf(d, t) === zero(t) else - @test Distributions.cgf_around_zero(d, t) ≈ Distributions.cgf_away_from_zero(d_big, t_big) atol=eps(t) rtol=0 + @test Distributions.cgf_around_zero(d, t) ≈ + Distributions.cgf_away_from_zero(d_big, t_big) atol = eps(t) rtol = 0 @test Distributions.cgf_around_zero(d, t) === cgf(d, t) end end @@ -89,7 +93,7 @@ using Test TS = float(promote_type(T, S)) @test @inferred(pdf(d, S(1))) === TS(0) - @test @inferred(pdf(d, S(3))) === TS(1//2) + @test @inferred(pdf(d, S(3))) === TS(1 // 2) @test @inferred(pdf(d, S(5))) === TS(0) @test @inferred(logpdf(d, S(1))) === TS(-Inf) @@ -97,7 +101,7 @@ using Test @test @inferred(logpdf(d, S(5))) === TS(-Inf) @test @inferred(cdf(d, S(1))) === TS(0) - @test @inferred(cdf(d, S(3))) === TS(1//2) + @test @inferred(cdf(d, S(3))) === TS(1 // 2) @test @inferred(cdf(d, S(5))) === TS(1) @test @inferred(logcdf(d, S(1))) === TS(-Inf) @@ -105,7 +109,7 @@ using Test @test @inferred(logcdf(d, S(5))) === TS(0) @test @inferred(ccdf(d, S(1))) === TS(1) - @test @inferred(ccdf(d, S(3))) === TS(1//2) + @test @inferred(ccdf(d, S(3))) === TS(1 // 2) @test @inferred(ccdf(d, S(5))) === TS(0) @test @inferred(logccdf(d, S(1))) === TS(0) diff --git a/test/univariate/continuous/weibull.jl b/test/univariate/continuous/weibull.jl index 7e0463a73c..8647dd8e4a 100644 --- a/test/univariate/continuous/weibull.jl +++ b/test/univariate/continuous/weibull.jl @@ -13,7 +13,8 @@ using Test @test @inferred(pdf(d, T(-3))) === ST(0) @test @inferred(logpdf(d, T(-3))) === ST(-Inf) @test @inferred(pdf(d, T(0))) === (α == 1 ? ST(S(α) / S(θ)) : ST(0)) - @test @inferred(logpdf(d, T(0))) === (α == 1 ? ST(log(S(α) / S(θ))) : ST(-Inf)) + @test @inferred(logpdf(d, T(0))) === + (α == 1 ? ST(log(S(α) / S(θ))) : ST(-Inf)) if T <: AbstractFloat @test @inferred(pdf(d, T(-Inf))) === ST(0) diff --git a/test/univariate/discrete/bernoulli.jl b/test/univariate/discrete/bernoulli.jl index 2d72b33c0a..b8f622d43c 100644 --- a/test/univariate/discrete/bernoulli.jl +++ b/test/univariate/discrete/bernoulli.jl @@ -3,6 +3,6 @@ using Test, Random @test rand(Bernoulli()) isa Bool @test rand(Bernoulli(), 10) isa Vector{Bool} - -test_cgf(Bernoulli(0.5), (1f0, -1f0,1e6, -1e6)) -test_cgf(Bernoulli(0.1), (1f0, -1f0,1e6, -1e6)) + +test_cgf(Bernoulli(0.5), (1.0f0, -1.0f0, 1e6, -1e6)) +test_cgf(Bernoulli(0.1), (1.0f0, -1.0f0, 1e6, -1e6)) diff --git a/test/univariate/discrete/bernoullilogit.jl b/test/univariate/discrete/bernoullilogit.jl index 55560692a0..e60039ea79 100644 --- a/test/univariate/discrete/bernoullilogit.jl +++ b/test/univariate/discrete/bernoullilogit.jl @@ -35,13 +35,13 @@ end d = BernoulliLogit(logit(p)) @test @inferred(rand(d)) isa Bool @test @inferred(rand(d, 10)) isa Vector{Bool} - @test mean(rand(d, N)) ≈ p atol=0.01 + @test mean(rand(d, N)) ≈ p atol = 0.01 end end @testset "cgf" begin - test_cgf(BernoulliLogit(), (1f0, -1f0, 1e6, -1e6)) - test_cgf(BernoulliLogit(0.1), (1f0, -1f0, 1e6, -1e6)) + test_cgf(BernoulliLogit(), (1.0f0, -1.0f0, 1e6, -1e6)) + test_cgf(BernoulliLogit(0.1), (1.0f0, -1.0f0, 1e6, -1e6)) end @testset "comparison with `Bernoulli`" begin @@ -68,14 +68,14 @@ end end for q in (-0.2f0, 0.25, 0.6f0, 1.5) - @test @inferred(quantile(d, q)) ≈ quantile(d0, q) nans=true - @test @inferred(cquantile(d, q)) ≈ cquantile(d0, q) nans=true + @test @inferred(quantile(d, q)) ≈ quantile(d0, q) nans = true + @test @inferred(cquantile(d, q)) ≈ cquantile(d0, q) nans = true end for t in (-5.2, 1.2f0) - @test @inferred(mgf(d, t)) ≈ mgf(d0, t) rtol=1e-6 - @test @inferred(cgf(d, t)) ≈ cgf(d0, t) rtol=1e-6 - @test @inferred(cf(d, t)) ≈ cf(d0, t) rtol=1e-6 + @test @inferred(mgf(d, t)) ≈ mgf(d0, t) rtol = 1e-6 + @test @inferred(cgf(d, t)) ≈ cgf(d0, t) rtol = 1e-6 + @test @inferred(cf(d, t)) ≈ cf(d0, t) rtol = 1e-6 end end end diff --git a/test/univariate/discrete/betabinomial.jl b/test/univariate/discrete/betabinomial.jl index b1e05ad772..64f5b07a45 100644 --- a/test/univariate/discrete/betabinomial.jl +++ b/test/univariate/discrete/betabinomial.jl @@ -5,8 +5,8 @@ using Test @testset "logpdf" begin d = BetaBinomial(50, 0.2, 0.6) - for k in 1:50 - p = @inferred(pdf(d, k)) + for k = 1:50 + p = @inferred(pdf(d, k)) lp = @inferred(logpdf(d, k)) @test lp ≈ log(p) end @@ -15,7 +15,7 @@ using Test @testset "support" begin d = BetaBinomial(50, 0.2, 0.6) - for k in 1:50 + for k = 1:50 @test insupport(d, k) end @test !insupport(d, 51) @@ -27,13 +27,15 @@ using Test for n in (-1, 0, 3), α in (S(-1), S(0), S(1)), β in (T(-1), T(0), T(1)) if n >= 0 && α > 0 && β > 0 @test @inferred(BetaBinomial(n, α, β)) isa BetaBinomial{ST} - @test @inferred(BetaBinomial(n, α, β; check_args=true)) isa BetaBinomial{ST} + @test @inferred(BetaBinomial(n, α, β; check_args = true)) isa + BetaBinomial{ST} else @test_throws DomainError BetaBinomial(n, α, β) - @test_throws DomainError BetaBinomial(n, α, β; check_args=true) + @test_throws DomainError BetaBinomial(n, α, β; check_args = true) end - @test @inferred(BetaBinomial(n, α, β; check_args=false)) isa BetaBinomial{ST} + @test @inferred(BetaBinomial(n, α, β; check_args = false)) isa + BetaBinomial{ST} end end end diff --git a/test/univariate/discrete/binomial.jl b/test/univariate/discrete/binomial.jl index 55b3362144..2f5c8b37df 100644 --- a/test/univariate/discrete/binomial.jl +++ b/test/univariate/discrete/binomial.jl @@ -4,42 +4,51 @@ using Test, Random Random.seed!(1234) @testset "binomial" begin -# Test the consistency between the recursive and nonrecursive computation of the pdf -# of the Binomial distribution -for (p, n) in [(0.6, 10), (0.8, 6), (0.5, 40), (0.04, 20), (1., 100), (0., 10), (0.999999, 1000), (1e-7, 1000)] - d = Binomial(n, p) - - a = Base.Fix1(pdf, d).(0:n) - for t=0:n - @test pdf(d, t) ≈ a[1+t] + # Test the consistency between the recursive and nonrecursive computation of the pdf + # of the Binomial distribution + for (p, n) in [ + (0.6, 10), + (0.8, 6), + (0.5, 40), + (0.04, 20), + (1.0, 100), + (0.0, 10), + (0.999999, 1000), + (1e-7, 1000), + ] + d = Binomial(n, p) + + a = Base.Fix1(pdf, d).(0:n) + for t = 0:n + @test pdf(d, t) ≈ a[1+t] + end + + li = rand(0:n, 2) + rng = minimum(li):maximum(li) + b = Base.Fix1(pdf, d).(rng) + for t in rng + @test pdf(d, t) ≈ b[t-first(rng)+1] + end end - li = rand(0:n, 2) - rng = minimum(li):maximum(li) - b = Base.Fix1(pdf, d).(rng) - for t in rng - @test pdf(d, t) ≈ b[t - first(rng) + 1] - end -end + # Test calculation of expectation value for Binomial distribution + @test Distributions.expectation(identity, Binomial(6)) ≈ 3.0 + @test Distributions.expectation(x -> -x, Binomial(10, 0.2)) ≈ -2.0 + + # Test median + @test median(Binomial(25, 3 // 10)) == 7 + @test median(Binomial(45, 3 // 10)) == 13 + @test median(Binomial(65, 3 // 10)) == 19 + @test median(Binomial(85, 3 // 10)) == 25 + + @test all(median(Binomial(7, p)) == quantile(Binomial(7, p), 1 // 2) for p = 0:0.1:1) + + # Test mode + @test Distributions.mode(Binomial(100, 0.4)) == 40 + @test Distributions.mode(Binomial(1, 0.51)) == 1 + @test Distributions.mode(Binomial(1, 0.49)) == 0 -# Test calculation of expectation value for Binomial distribution -@test Distributions.expectation(identity, Binomial(6)) ≈ 3.0 -@test Distributions.expectation(x -> -x, Binomial(10, 0.2)) ≈ -2.0 - -# Test median -@test median(Binomial(25,3//10)) == 7 -@test median(Binomial(45,3//10)) == 13 -@test median(Binomial(65,3//10)) == 19 -@test median(Binomial(85,3//10)) == 25 - -@test all(median(Binomial(7, p)) == quantile(Binomial(7, p), 1//2) for p in 0:0.1:1) - -# Test mode -@test Distributions.mode(Binomial(100, 0.4)) == 40 -@test Distributions.mode(Binomial(1, 0.51)) == 1 -@test Distributions.mode(Binomial(1, 0.49)) == 0 - -@test isplatykurtic(Bernoulli(0.5)) -@test ismesokurtic(Normal(0.0, 1.0)) -@test isleptokurtic(Laplace(0.0, 1.0)) + @test isplatykurtic(Bernoulli(0.5)) + @test ismesokurtic(Normal(0.0, 1.0)) + @test isleptokurtic(Laplace(0.0, 1.0)) end diff --git a/test/univariate/discrete/categorical.jl b/test/univariate/discrete/categorical.jl index 6d87d4dc86..fc3be9bbfd 100644 --- a/test/univariate/discrete/categorical.jl +++ b/test/univariate/discrete/categorical.jl @@ -4,137 +4,138 @@ using StableRNGs @testset "Categorical" begin -for p in Any[ - [0.5, 0.5], - [0.5f0, 0.5f0], - [1//2, 1//2], - [0.1, 0.3, 0.2, 0.4], - [0.15, 0.25, 0.6] ] - - d = Categorical(p) - k = length(p) - println(" testing $d as Categorical") + for p in Any[ + [0.5, 0.5], + [0.5f0, 0.5f0], + [1 // 2, 1 // 2], + [0.1, 0.3, 0.2, 0.4], + [0.15, 0.25, 0.6], + ] + + d = Categorical(p) + k = length(p) + println(" testing $d as Categorical") + + @test isa(d, Categorical) + @test probs(d) == p + @test minimum(d) == 1 + @test maximum(d) == k + @test extrema(d) == (1, k) + @test ncategories(d) == k + @test d == d + @test d ≈ d - @test isa(d, Categorical) - @test probs(d) == p - @test minimum(d) == 1 - @test maximum(d) == k - @test extrema(d) == (1, k) - @test ncategories(d) == k - @test d == d - @test d ≈ d - - c = 0.0 - for i = 1:k - c += p[i] - @test @inferred(pdf(d, i)) == p[i] - @test @inferred(pdf(d, float(i))) == p[i] - @test @inferred(logpdf(d, i)) === log(p[i]) - @test @inferred(logpdf(d, float(i))) === log(p[i]) - @test @inferred(cdf(d, i)) ≈ c - @test @inferred(cdf(d, i + 0.5)) ≈ c - @test @inferred(ccdf(d, i)) ≈ 1 - c - @test @inferred(ccdf(d, i + 0.5)) ≈ 1 - c - @test @inferred(logcdf(d, i)) ≈ log(c) - @test @inferred(logcdf(d, i + 0.5)) ≈ log(c) - @test @inferred(logccdf(d, i)) ≈ log1p(-c) - @test @inferred(logccdf(d, i + 0.5)) ≈ log1p(-c) + c = 0.0 + for i = 1:k + c += p[i] + @test @inferred(pdf(d, i)) == p[i] + @test @inferred(pdf(d, float(i))) == p[i] + @test @inferred(logpdf(d, i)) === log(p[i]) + @test @inferred(logpdf(d, float(i))) === log(p[i]) + @test @inferred(cdf(d, i)) ≈ c + @test @inferred(cdf(d, i + 0.5)) ≈ c + @test @inferred(ccdf(d, i)) ≈ 1 - c + @test @inferred(ccdf(d, i + 0.5)) ≈ 1 - c + @test @inferred(logcdf(d, i)) ≈ log(c) + @test @inferred(logcdf(d, i + 0.5)) ≈ log(c) + @test @inferred(logccdf(d, i)) ≈ log1p(-c) + @test @inferred(logccdf(d, i + 0.5)) ≈ log1p(-c) + end + + @test pdf(d, 0) == 0 + @test pdf(d, k + 1) == 0 + @test logpdf(d, 0) == -Inf + @test logpdf(d, k + 1) == -Inf + @test iszero(cdf(d, -Inf)) + @test iszero(cdf(d, 0)) + @test isone(cdf(d, k + 1)) + @test isone(cdf(d, Inf)) + @test isnan(cdf(d, NaN)) + @test isone(ccdf(d, -Inf)) + @test isone(ccdf(d, 0)) + @test iszero(ccdf(d, k + 1)) + @test iszero(ccdf(d, Inf)) + @test isnan(ccdf(d, NaN)) + + @test Base.Fix1(pdf, d).(support(d)) == p + @test Base.Fix1(pdf, d).(1:k) == p + + @test cf(d, 0) ≈ 1.0 + @test cf(d, 1) ≈ p' * cis.(1:length(p)) + + @test mgf(d, 0) ≈ 1.0 + @test mgf(d, 1) ≈ p' * exp.(1:length(p)) + + # The test utilities are currently only able to handle Float64s + if partype(d) === Float64 + test_distr(d, 10^6) + end end - @test pdf(d, 0) == 0 - @test pdf(d, k+1) == 0 - @test logpdf(d, 0) == -Inf - @test logpdf(d, k+1) == -Inf - @test iszero(cdf(d, -Inf)) - @test iszero(cdf(d, 0)) - @test isone(cdf(d, k+1)) - @test isone(cdf(d, Inf)) - @test isnan(cdf(d, NaN)) - @test isone(ccdf(d, -Inf)) - @test isone(ccdf(d, 0)) - @test iszero(ccdf(d, k+1)) - @test iszero(ccdf(d, Inf)) - @test isnan(ccdf(d, NaN)) - - @test Base.Fix1(pdf, d).(support(d)) == p - @test Base.Fix1(pdf, d).(1:k) == p - - @test cf(d, 0) ≈ 1.0 - @test cf(d, 1) ≈ p' * cis.(1:length(p)) - - @test mgf(d, 0) ≈ 1.0 - @test mgf(d, 1) ≈ p' * exp.(1:length(p)) - - # The test utilities are currently only able to handle Float64s - if partype(d) === Float64 - test_distr(d, 10^6) + d = Categorical(4) + println(" testing $d as Categorical") + @test minimum(d) == 1 + @test maximum(d) == 4 + @test extrema(d) == (1, 4) + @test probs(d) == [0.25, 0.25, 0.25, 0.25] + + p = ones(10^6) * 1.0e-6 + @test Distributions.isprobvec(p) + + @test convert(Categorical{Float64,Vector{Float64}}, d) === d + for x in (d, probs(d)) + d32 = convert(Categorical{Float32,Vector{Float32}}, d) + @test d32 isa Categorical{Float32,Vector{Float32}} + @test probs(d32) == map(Float32, probs(d)) end -end - -d = Categorical(4) -println(" testing $d as Categorical") -@test minimum(d) == 1 -@test maximum(d) == 4 -@test extrema(d) == (1, 4) -@test probs(d) == [0.25, 0.25, 0.25, 0.25] - -p = ones(10^6) * 1.0e-6 -@test Distributions.isprobvec(p) - -@test convert(Categorical{Float64,Vector{Float64}}, d) === d -for x in (d, probs(d)) - d32 = convert(Categorical{Float32,Vector{Float32}}, d) - @test d32 isa Categorical{Float32,Vector{Float32}} - @test probs(d32) == map(Float32, probs(d)) -end - -@testset "test args... constructor" begin - @test Categorical(0.3, 0.7) == Categorical([0.3, 0.7]) -end -@testset "reproducibility across julia versions" begin - d = Categorical([0.1, 0.2, 0.7]) - rng = StableRNGs.StableRNG(600) - @test rand(rng, d, 10) == [3, 1, 1, 2, 3, 2, 3, 3, 2, 3] -end - -@testset "comparisons" begin - d1 = Categorical([0.4, 0.6]) - d2 = Categorical([0.6, 0.4]) - d3 = Categorical([0.2, 0.7, 0.1]) - - # Same distribution - for d in (d1, d2, d3) - @test d == d - @test d ≈ d + @testset "test args... constructor" begin + @test Categorical(0.3, 0.7) == Categorical([0.3, 0.7]) end - # Same support, different probabilities - @test d2 != d1 - @test !isapprox(d2, d1) - @test d2 ≈ d1 atol=0.4 - - # Different support - @test d3 != d1 - @test !isapprox(d3, d1) + @testset "reproducibility across julia versions" begin + d = Categorical([0.1, 0.2, 0.7]) + rng = StableRNGs.StableRNG(600) + @test rand(rng, d, 10) == [3, 1, 1, 2, 3, 2, 3, 3, 2, 3] + end - # issue #1675 - @test Categorical([0.5, 0.5]) ≈ Categorical([0.5, 0.5]) - @test Categorical([0.5, 0.5]) == Categorical([0.5f0, 0.5f0]) - @test Categorical([0.5, 0.5]) ≈ Categorical([0.5f0, 0.5f0]) -end + @testset "comparisons" begin + d1 = Categorical([0.4, 0.6]) + d2 = Categorical([0.6, 0.4]) + d3 = Categorical([0.2, 0.7, 0.1]) + + # Same distribution + for d in (d1, d2, d3) + @test d == d + @test d ≈ d + end + + # Same support, different probabilities + @test d2 != d1 + @test !isapprox(d2, d1) + @test d2 ≈ d1 atol = 0.4 + + # Different support + @test d3 != d1 + @test !isapprox(d3, d1) + + # issue #1675 + @test Categorical([0.5, 0.5]) ≈ Categorical([0.5, 0.5]) + @test Categorical([0.5, 0.5]) == Categorical([0.5f0, 0.5f0]) + @test Categorical([0.5, 0.5]) ≈ Categorical([0.5f0, 0.5f0]) + end -@testset "issue #832" begin - priorities = collect(Float64, 1:1000) - priorities[1:50] .= 1e8 + @testset "issue #832" begin + priorities = collect(Float64, 1:1000) + priorities[1:50] .= 1e8 - at = Distributions.AliasTable(priorities) - iat = rand(at, 16) + at = Distributions.AliasTable(priorities) + iat = rand(at, 16) - # failure rate of a single sample is sum(51:1000)/50e8 = 9.9845e-5 - # failure rate of 4 out of 16 samples is 1-cdf(Binomial(16, 9.9845e-5), 3) = 1.8074430840897548e-13 - # this test should randomly fail with a probability of 1.8074430840897548e-13 - @test count(==(1e8), priorities[iat]) >= 13 -end + # failure rate of a single sample is sum(51:1000)/50e8 = 9.9845e-5 + # failure rate of 4 out of 16 samples is 1-cdf(Binomial(16, 9.9845e-5), 3) = 1.8074430840897548e-13 + # this test should randomly fail with a probability of 1.8074430840897548e-13 + @test count(==(1e8), priorities[iat]) >= 13 + end end diff --git a/test/univariate/discrete/dirac.jl b/test/univariate/discrete/dirac.jl index cf027918b6..c3a561d7a1 100644 --- a/test/univariate/discrete/dirac.jl +++ b/test/univariate/discrete/dirac.jl @@ -2,8 +2,8 @@ using Distributions using Test @testset "Dirac tests" begin - test_cgf(Dirac(13) , (1, 1f-4, 1e10, 10, -4)) - test_cgf(Dirac(-1f2), (1, 1f-4, 1e10, 10,-4)) + test_cgf(Dirac(13), (1, 1.0f-4, 1e10, 10, -4)) + test_cgf(Dirac(-1.0f2), (1, 1.0f-4, 1e10, 10, -4)) for val in (3, 3.0, -3.5) d = Dirac(val) diff --git a/test/univariate/discrete/discretenonparametric.jl b/test/univariate/discrete/discretenonparametric.jl index 68354a064a..1a010201fe 100644 --- a/test/univariate/discrete/discretenonparametric.jl +++ b/test/univariate/discrete/discretenonparametric.jl @@ -8,161 +8,175 @@ Base.rand(::AllOneRNG, ::Type{T}) where {T<:Number} = one(T) rng = MersenneTwister(123) -@testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) - -d = DiscreteNonParametric([40., 80., 120., -60.], [.4, .3, .1, .2]) - -@test !(d ≈ DiscreteNonParametric([40., 80, 120, -60], [.4, .3, .1, .2], check_args=false)) -@test d ≈ DiscreteNonParametric([-60., 40., 80, 120], [.2, .4, .3, .1], check_args=false) - -# Invalid probability -@test_throws DomainError DiscreteNonParametric([40., 80, 120, -60], [.5, .3, .1, .2]) - -# Invalid probability, but no arg check -DiscreteNonParametric([40., 80, 120, -60], [.5, .3, .1, .2], check_args=false) - -test_range(d) -vs = Distributions.get_evalsamples(d, 0.00001) -test_evaluation(d, vs, true) -test_stats(d, vs) -test_params(d) - -@test func[1](d) ∈ [40., 80., 120., -60.] -@test func[1](sampler(d)) ∈ [40., 80., 120., -60.] - -@test pdf(d, -100.) == 0. -@test pdf(d, -100) == 0. -@test pdf(d, -60.) == .2 -@test pdf(d, -60) == .2 -@test pdf(d, 100.) == 0. -@test pdf(d, 120.) == .1 - -@test iszero(cdf(d, -Inf)) -@test cdf(d, -100.) == 0. -@test cdf(d, -100) == 0. -@test cdf(d, 0.) ≈ .2 -@test cdf(d, 100.) ≈ .9 -@test cdf(d, 100) ≈ .9 -@test cdf(d, 150.) == 1. -@test isone(cdf(d, Inf)) -@test isnan(cdf(d, NaN)) - -@test isone(ccdf(d, -Inf)) -@test ccdf(d, -100.) == 1. -@test ccdf(d, -100) == 1. -@test ccdf(d, 0.) ≈ .8 -@test ccdf(d, 100.) ≈ .1 -@test ccdf(d, 100) ≈ .1 -@test ccdf(d, 150.) == 0 -@test iszero(ccdf(d, Inf)) -@test isnan(ccdf(d, NaN)) - -@test logcdf(d, -Inf) == -Inf -@test logcdf(d, -100.) == -Inf -@test logcdf(d, -100) == -Inf -@test logcdf(d, 0.) ≈ log(0.2) -@test logcdf(d, 100.) ≈ log(0.9) -@test logcdf(d, 100) ≈ log(0.9) -@test iszero(logcdf(d, 150.)) -@test iszero(logcdf(d, Inf)) -@test isnan(logcdf(d, NaN)) - -@test iszero(logccdf(d, -Inf)) -@test iszero(logccdf(d, -100.)) -@test iszero(logccdf(d, -100)) -@test logccdf(d, 0.) ≈ log(0.8) -@test logccdf(d, 100.) ≈ log(0.1) -@test logccdf(d, 100) ≈ log(0.1) -@test logccdf(d, 150.) == -Inf -@test logccdf(d, Inf) == -Inf -@test isnan(logccdf(d, NaN)) - -@test quantile(d, 0) == -60 -@test quantile(d, .1) == -60 -@test quantile(d, .3) == 40 -@test quantile(d, .7) == 80 -@test quantile(d, 1) == 120 -@test minimum(d) == -60 -@test maximum(d) == 120 - -@test insupport(d, -60) -@test insupport(d, -60.) -@test !insupport(d, 20) -@test !insupport(d, 20.) -@test insupport(d, 80) -@test !insupport(d, 150) - -xs = support(d) -ws = ProbabilityWeights(probs(d)) -@test mean(d) ≈ mean(xs, ws) -@test var(d) ≈ var(xs, ws, corrected=false) -@test skewness(d) ≈ skewness(xs, ws) -@test kurtosis(d) ≈ kurtosis(xs, ws) -@test entropy(d) ≈ 1.2798542258336676 -@test mode(d) == 40 -@test modes(d) == [40] -@test mgf(d, 0) ≈ 1.0 -@test mgf(d, 0.17) ≈ 7.262034e7 -@test cf(d, 0) ≈ 1.0 -@test cf(d, 0.17) ≈ 0.3604521478 + 0.6953481124im - -# Fitting -xs = [1, 2,3,4,5,4,3,2,3,2,1] -ws = [1.,2,1,0,1,1,2,3,2,6,1] - -ss = suffstats(DiscreteNonParametric, xs) -@test ss isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} -@test ss.support == [1,2,3,4,5] -@test ss.freq == [2., 3., 3., 2., 1.] - -ss2 = suffstats(DiscreteNonParametric, xs, ws) -@test ss2 isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} -@test ss2.support == [1,2,3,4,5] -@test ss2.freq == [2., 11., 5., 1., 1.] - -d1 = fit_mle(DiscreteNonParametric, ss) -@test d1 isa DiscreteNonParametric{Int,Float64,Vector{Int}} -@test support(d1) == ss.support -@test probs(d1) ≈ ss.freq ./ 11 - -d2 = fit_mle(DiscreteNonParametric, xs) -@test typeof(d2) == typeof(d1) -@test support(d2) == support(d1) -@test probs(d2) ≈ probs(d1) - -d3 = fit(DiscreteNonParametric, xs) -@test typeof(d2) == typeof(d1) -@test support(d3) == support(d1) -@test probs(d3) ≈ probs(d1) - -# Numerical stability; see issue #872 and PR #926 -p = [1 - eps(Float32), eps(Float32)] -d = Categorical(p) -@test ([rand(d) for _ = 1:100_000]; true) - -# Numerical stability w/ large prob vectors; -# see issue #1017 -n = 20000 # large vector length -p = Float32[0.5; fill(0.5/(n ÷ 2) - 3e-8, n ÷ 2); fill(eps(Float64), n ÷ 2)] -d = Categorical(p) -rng = AllOneRNG() -@test (rand(rng, d); true) - -# Sampling with values of zero probability; see issue #1105 -d = DiscreteNonParametric([0, 1, 2, 3, 4], [0.0f0, 0.1f0, 0.2f0, 0.3f0, 0.4f0]) -@test iszero(count(iszero(rand(d)) for _ in 1:100_000_000)) - -d = DiscreteNonParametric([4, 3, 2, 1, 0], [0.4f0, 0.3f0, 0.2f0, 0.1f0, 0.0f0]) -@test iszero(count(iszero(rand(d)) for _ in 1:100_000_000)) - -# Sampling with integer-valued probabilities; see issue #1111 -d = DiscreteNonParametric([1, 2], [0, 1]) -@test iszero(count(isone(rand(d)) for _ in 1:100)) - -d = DiscreteNonParametric([2, 1], [1, 0]) -@test iszero(count(isone(rand(d)) for _ in 1:100)) +@testset "Testing matrix-variates with $key" for (key, func) in Dict( + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], +) + + d = DiscreteNonParametric([40.0, 80.0, 120.0, -60.0], [0.4, 0.3, 0.1, 0.2]) + + @test !( + d ≈ DiscreteNonParametric( + [40.0, 80, 120, -60], + [0.4, 0.3, 0.1, 0.2], + check_args = false, + ) + ) + @test d ≈ DiscreteNonParametric( + [-60.0, 40.0, 80, 120], + [0.2, 0.4, 0.3, 0.1], + check_args = false, + ) + + # Invalid probability + @test_throws DomainError DiscreteNonParametric( + [40.0, 80, 120, -60], + [0.5, 0.3, 0.1, 0.2], + ) + + # Invalid probability, but no arg check + DiscreteNonParametric([40.0, 80, 120, -60], [0.5, 0.3, 0.1, 0.2], check_args = false) + + test_range(d) + vs = Distributions.get_evalsamples(d, 0.00001) + test_evaluation(d, vs, true) + test_stats(d, vs) + test_params(d) + + @test func[1](d) ∈ [40.0, 80.0, 120.0, -60.0] + @test func[1](sampler(d)) ∈ [40.0, 80.0, 120.0, -60.0] + + @test pdf(d, -100.0) == 0.0 + @test pdf(d, -100) == 0.0 + @test pdf(d, -60.0) == 0.2 + @test pdf(d, -60) == 0.2 + @test pdf(d, 100.0) == 0.0 + @test pdf(d, 120.0) == 0.1 + + @test iszero(cdf(d, -Inf)) + @test cdf(d, -100.0) == 0.0 + @test cdf(d, -100) == 0.0 + @test cdf(d, 0.0) ≈ 0.2 + @test cdf(d, 100.0) ≈ 0.9 + @test cdf(d, 100) ≈ 0.9 + @test cdf(d, 150.0) == 1.0 + @test isone(cdf(d, Inf)) + @test isnan(cdf(d, NaN)) + + @test isone(ccdf(d, -Inf)) + @test ccdf(d, -100.0) == 1.0 + @test ccdf(d, -100) == 1.0 + @test ccdf(d, 0.0) ≈ 0.8 + @test ccdf(d, 100.0) ≈ 0.1 + @test ccdf(d, 100) ≈ 0.1 + @test ccdf(d, 150.0) == 0 + @test iszero(ccdf(d, Inf)) + @test isnan(ccdf(d, NaN)) + + @test logcdf(d, -Inf) == -Inf + @test logcdf(d, -100.0) == -Inf + @test logcdf(d, -100) == -Inf + @test logcdf(d, 0.0) ≈ log(0.2) + @test logcdf(d, 100.0) ≈ log(0.9) + @test logcdf(d, 100) ≈ log(0.9) + @test iszero(logcdf(d, 150.0)) + @test iszero(logcdf(d, Inf)) + @test isnan(logcdf(d, NaN)) + + @test iszero(logccdf(d, -Inf)) + @test iszero(logccdf(d, -100.0)) + @test iszero(logccdf(d, -100)) + @test logccdf(d, 0.0) ≈ log(0.8) + @test logccdf(d, 100.0) ≈ log(0.1) + @test logccdf(d, 100) ≈ log(0.1) + @test logccdf(d, 150.0) == -Inf + @test logccdf(d, Inf) == -Inf + @test isnan(logccdf(d, NaN)) + + @test quantile(d, 0) == -60 + @test quantile(d, 0.1) == -60 + @test quantile(d, 0.3) == 40 + @test quantile(d, 0.7) == 80 + @test quantile(d, 1) == 120 + @test minimum(d) == -60 + @test maximum(d) == 120 + + @test insupport(d, -60) + @test insupport(d, -60.0) + @test !insupport(d, 20) + @test !insupport(d, 20.0) + @test insupport(d, 80) + @test !insupport(d, 150) + + xs = support(d) + ws = ProbabilityWeights(probs(d)) + @test mean(d) ≈ mean(xs, ws) + @test var(d) ≈ var(xs, ws, corrected = false) + @test skewness(d) ≈ skewness(xs, ws) + @test kurtosis(d) ≈ kurtosis(xs, ws) + @test entropy(d) ≈ 1.2798542258336676 + @test mode(d) == 40 + @test modes(d) == [40] + @test mgf(d, 0) ≈ 1.0 + @test mgf(d, 0.17) ≈ 7.262034e7 + @test cf(d, 0) ≈ 1.0 + @test cf(d, 0.17) ≈ 0.3604521478 + 0.6953481124im + + # Fitting + xs = [1, 2, 3, 4, 5, 4, 3, 2, 3, 2, 1] + ws = [1.0, 2, 1, 0, 1, 1, 2, 3, 2, 6, 1] + + ss = suffstats(DiscreteNonParametric, xs) + @test ss isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} + @test ss.support == [1, 2, 3, 4, 5] + @test ss.freq == [2.0, 3.0, 3.0, 2.0, 1.0] + + ss2 = suffstats(DiscreteNonParametric, xs, ws) + @test ss2 isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} + @test ss2.support == [1, 2, 3, 4, 5] + @test ss2.freq == [2.0, 11.0, 5.0, 1.0, 1.0] + + d1 = fit_mle(DiscreteNonParametric, ss) + @test d1 isa DiscreteNonParametric{Int,Float64,Vector{Int}} + @test support(d1) == ss.support + @test probs(d1) ≈ ss.freq ./ 11 + + d2 = fit_mle(DiscreteNonParametric, xs) + @test typeof(d2) == typeof(d1) + @test support(d2) == support(d1) + @test probs(d2) ≈ probs(d1) + + d3 = fit(DiscreteNonParametric, xs) + @test typeof(d2) == typeof(d1) + @test support(d3) == support(d1) + @test probs(d3) ≈ probs(d1) + + # Numerical stability; see issue #872 and PR #926 + p = [1 - eps(Float32), eps(Float32)] + d = Categorical(p) + @test ([rand(d) for _ = 1:100_000]; true) + + # Numerical stability w/ large prob vectors; + # see issue #1017 + n = 20000 # large vector length + p = Float32[0.5; fill(0.5 / (n ÷ 2) - 3e-8, n ÷ 2); fill(eps(Float64), n ÷ 2)] + d = Categorical(p) + rng = AllOneRNG() + @test (rand(rng, d); true) + + # Sampling with values of zero probability; see issue #1105 + d = DiscreteNonParametric([0, 1, 2, 3, 4], [0.0f0, 0.1f0, 0.2f0, 0.3f0, 0.4f0]) + @test iszero(count(iszero(rand(d)) for _ = 1:100_000_000)) + + d = DiscreteNonParametric([4, 3, 2, 1, 0], [0.4f0, 0.3f0, 0.2f0, 0.1f0, 0.0f0]) + @test iszero(count(iszero(rand(d)) for _ = 1:100_000_000)) + + # Sampling with integer-valued probabilities; see issue #1111 + d = DiscreteNonParametric([1, 2], [0, 1]) + @test iszero(count(isone(rand(d)) for _ = 1:100)) + + d = DiscreteNonParametric([2, 1], [1, 0]) + @test iszero(count(isone(rand(d)) for _ = 1:100)) @testset "type stability" begin d = DiscreteNonParametric([0, 1], [0.5, 0.5]) @@ -192,7 +206,7 @@ end # Same support, different probabilities @test d2 != d1 @test !isapprox(d2, d1) - @test d2 ≈ d1 atol=0.4 + @test d2 ≈ d1 atol = 0.4 # Different support, same probabilities @test d3 != d1 @@ -201,16 +215,19 @@ end # Different support, different probabilities, same dimension @test d4 != d1 @test !isapprox(d4, d1) - @test d4 ≈ d1 atol=0.4 + @test d4 ≈ d1 atol = 0.4 # Different cardinality of the support @test d5 != d1 @test !isapprox(d5, d1) # issue #1140 - @test DiscreteNonParametric(1:2, [0.5, 0.5]) != DiscreteNonParametric(1:3, [0.2, 0.4, 0.4]) + @test DiscreteNonParametric(1:2, [0.5, 0.5]) != + DiscreteNonParametric(1:3, [0.2, 0.4, 0.4]) # Different types - @test DiscreteNonParametric(1:2, [0.5, 0.5]) == DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) - @test DiscreteNonParametric(1:2, [0.5, 0.5]) ≈ DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) -end \ No newline at end of file + @test DiscreteNonParametric(1:2, [0.5, 0.5]) == + DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) + @test DiscreteNonParametric(1:2, [0.5, 0.5]) ≈ + DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) +end diff --git a/test/univariate/discrete/discreteuniform.jl b/test/univariate/discrete/discreteuniform.jl index 008397f2f4..df06bde7bf 100644 --- a/test/univariate/discrete/discreteuniform.jl +++ b/test/univariate/discrete/discreteuniform.jl @@ -9,17 +9,17 @@ using Test for (a, b) in ((-8, -6), (-8, -5), (-5, 1), (-5, 2), (-2, 3), (-2, 4), (2, 4), (2, 5)) d = DiscreteUniform(a, b) - for x in a:b + for x = a:b @test @inferred(quantile(d, cdf(d, x))) === x end - @test @inferred(median(d)) === quantile(d, 1//2) + @test @inferred(median(d)) === quantile(d, 1 // 2) end end @testset "type stability regression tests" begin - @test @inferred(mgf(DiscreteUniform(2, 5), 0//1)) === 1.0 - @test @inferred(cf(DiscreteUniform(1, 3), 0//1)) === 1.0 + 0.0im + @test @inferred(mgf(DiscreteUniform(2, 5), 0 // 1)) === 1.0 + @test @inferred(cf(DiscreteUniform(1, 3), 0 // 1)) === 1.0 + 0.0im end @testset "fit: array indexing (#1253)" begin diff --git a/test/univariate/discrete/geometric.jl b/test/univariate/discrete/geometric.jl index 82f7a1e2ce..fe2ca7ff9e 100644 --- a/test/univariate/discrete/geometric.jl +++ b/test/univariate/discrete/geometric.jl @@ -15,8 +15,8 @@ using FiniteDifferences m2 = var(d) + mean(d)^2 @test fdm2(Base.Fix1(mgf, d), 0) ≈ m2 @test fdm2(Base.Fix1(cf, d), 0) ≈ -m2 - test_cgf(Geometric(0.1), (1f-1, -1e6)) - test_cgf(Geometric(0.5), (1f-1, -1e6)) + test_cgf(Geometric(0.1), (1.0f-1, -1e6)) + test_cgf(Geometric(0.5), (1.0f-1, -1e6)) end @testset "Support" begin diff --git a/test/univariate/discrete/negativebinomial.jl b/test/univariate/discrete/negativebinomial.jl index f7d9bf6ad2..07b8a1e505 100644 --- a/test/univariate/discrete/negativebinomial.jl +++ b/test/univariate/discrete/negativebinomial.jl @@ -7,11 +7,12 @@ using StatsFuns # Currently, most of the tests for NegativeBinomial are in the "ref" folder. # Eventually, we might want to consolidate the tests here -test_cgf(NegativeBinomial(10,0.5), (-1f0, -200.0,-1e6)) -test_cgf(NegativeBinomial(3,0.1), (-1f0, -200.0,-1e6)) +test_cgf(NegativeBinomial(10, 0.5), (-1.0f0, -200.0, -1e6)) +test_cgf(NegativeBinomial(3, 0.1), (-1.0f0, -200.0, -1e6)) -mydiffp(r, p, k) = iszero(k) ? r/p : r/p - k/(1 - p) -mydiffr(r, p, k) = iszero(k) ? log(p) : log(p) - inv(k + r) - digamma(r) + digamma(r + k + 1) +mydiffp(r, p, k) = iszero(k) ? r / p : r / p - k / (1 - p) +mydiffr(r, p, k) = + iszero(k) ? log(p) : log(p) - inv(k + r) - digamma(r) + digamma(r + k + 1) @testset "issue #1603" begin d = NegativeBinomial(4, 0.2) @@ -27,13 +28,14 @@ mydiffr(r, p, k) = iszero(k) ? log(p) : log(p) - inv(k + r) - digamma(r) + digam @test fdm2(Base.Fix1(cf, d), 0) ≈ -m2 end -@testset "NegativeBinomial r=$r, p=$p, k=$k" for - p in exp10.(-10:0) .- eps(), # avoid p==1 since it's not differentiable - r in exp10.(range(-10, stop=2, length=25)), - k in (0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024) +@testset "NegativeBinomial r=$r, p=$p, k=$k" for p in exp10.(-10:0) .- eps(), # avoid p==1 since it's not differentiable + r in exp10.(range(-10, stop = 2, length = 25)), + k in (0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024) - @test ForwardDiff.derivative(_p -> logpdf(NegativeBinomial(r, _p), k), p) ≈ mydiffp(r, p, k) rtol=1e-12 atol=1e-12 - @test ForwardDiff.derivative(_r -> logpdf(NegativeBinomial(_r, p), k), r) ≈ mydiffr(r, p, k) rtol=1e-12 atol=1e-12 + @test ForwardDiff.derivative(_p -> logpdf(NegativeBinomial(r, _p), k), p) ≈ + mydiffp(r, p, k) rtol = 1e-12 atol = 1e-12 + @test ForwardDiff.derivative(_r -> logpdf(NegativeBinomial(_r, p), k), r) ≈ + mydiffr(r, p, k) rtol = 1e-12 atol = 1e-12 end @testset "Check the corner case p==1" begin @@ -47,7 +49,7 @@ end end @testset "Check the corner case k==0" begin - for r in randexp(5), p in rand(5) + for r in randexp(5), p in rand(5) @test @inferred(logpdf(NegativeBinomial(r, p), 0)) === xlogy(r, p) end end @@ -58,26 +60,35 @@ end # Test with values in and outside of support p = rand() dist = NegativeBinomial(r, p) - fdm = central_fdm(5, 1; max_range=min(r, p, 1-p)/2) # avoids numerical issues with finite differencing + fdm = central_fdm(5, 1; max_range = min(r, p, 1 - p) / 2) # avoids numerical issues with finite differencing for k in (0, 10, 42, -1, -5, -13) # Test both integers and floating point numbers. # For floating point numbers we have to tell FiniteDifferences explicitly that the # argument is non-differentiable. Otherwise it will compute `NaN` as derivative. - test_rrule(logpdf, dist, k; fdm=fdm, nans=true) - test_rrule(logpdf, dist, float(k) ⊢ ChainRulesTestUtils.NoTangent(); fdm=fdm, nans=true) + test_rrule(logpdf, dist, k; fdm = fdm, nans = true) + test_rrule( + logpdf, + dist, + float(k) ⊢ ChainRulesTestUtils.NoTangent(); + fdm = fdm, + nans = true, + ) end # Test edge case `p = 1` and `k = 0` dist = NegativeBinomial(r, 1) - fdm = backward_fdm(5, 1; max_range = r/10) - test_rrule(logpdf, dist, 0; fdm=fdm) - test_rrule(logpdf, dist, 0.0 ⊢ ChainRulesTestUtils.NoTangent(); fdm=fdm) + fdm = backward_fdm(5, 1; max_range = r / 10) + test_rrule(logpdf, dist, 0; fdm = fdm) + test_rrule(logpdf, dist, 0.0 ⊢ ChainRulesTestUtils.NoTangent(); fdm = fdm) end @testset "issue #1582" begin dp = mydiffp(1.0, 1.0, 0.0) @test dp == 1 - @test ForwardDiff.derivative(p -> logpdf(NegativeBinomial(1.0, p; check_args=false), 0.0), 1.0) == dp + @test ForwardDiff.derivative( + p -> logpdf(NegativeBinomial(1.0, p; check_args = false), 0.0), + 1.0, + ) == dp dr = mydiffr(1.0, 1.0, 0.0) @test dr == 0 diff --git a/test/univariate/discrete/poisson.jl b/test/univariate/discrete/poisson.jl index 0861de4a7f..9172c6b3dc 100644 --- a/test/univariate/discrete/poisson.jl +++ b/test/univariate/discrete/poisson.jl @@ -1,8 +1,8 @@ using Test, Distributions, OffsetArrays -test_cgf(Poisson(1 ), (1f0,2f0,10.0,50.0)) -test_cgf(Poisson(10 ), (1f0,2f0,10.0,50.0)) -test_cgf(Poisson(1e-3), (1f0,2f0,10.0,50.0)) +test_cgf(Poisson(1), (1.0f0, 2.0f0, 10.0, 50.0)) +test_cgf(Poisson(10), (1.0f0, 2.0f0, 10.0, 50.0)) +test_cgf(Poisson(1e-3), (1.0f0, 2.0f0, 10.0, 50.0)) @testset "Poisson suffstats and OffsetArrays" begin a = rand(Poisson(), 11) @@ -22,4 +22,4 @@ test_cgf(Poisson(1e-3), (1f0,2f0,10.0,50.0)) @test resultwa == resultwb @test_throws DimensionMismatch suffstats(Poisson, a, wb) -end \ No newline at end of file +end diff --git a/test/univariate/discrete/poissonbinomial.jl b/test/univariate/discrete/poissonbinomial.jl index 611a54c258..c0f7487790 100644 --- a/test/univariate/discrete/poissonbinomial.jl +++ b/test/univariate/discrete/poissonbinomial.jl @@ -4,158 +4,162 @@ using ForwardDiff using Test @testset "poissonbinomial" begin -function naive_esf(x::AbstractVector{T}) where T <: Real - n = length(x) - S = zeros(T, n+1) - states = hcat(reverse.(digits.(0:2^n-1,base=2,pad=n))...)' + function naive_esf(x::AbstractVector{T}) where {T<:Real} + n = length(x) + S = zeros(T, n + 1) + states = hcat(reverse.(digits.(0:2^n-1, base = 2, pad = n))...)' - r_states = vec(mapslices(sum, states, dims=2)) + r_states = vec(mapslices(sum, states, dims = 2)) - for r in 0:n - idx = findall(r_states .== r) - S[r+1] = sum(mapslices(x->prod(x[x .!= 0]), states[idx, :] .* x', dims=2)) + for r = 0:n + idx = findall(r_states .== r) + S[r+1] = sum(mapslices(x -> prod(x[x.!=0]), states[idx, :] .* x', dims = 2)) + end + return S end - return S -end -function naive_pb(p::AbstractVector{T}) where T <: Real - x = p ./ (1 .- p) - naive_esf(x) * prod(1 ./ (1 .+ x)) -end - -# test PoissonBinomial PMF algorithms -x = [3.5118, .6219, .2905, .8450, 1.8648] -n = length(x) -p = x ./ (1 .+ x) - -naive_sol = naive_pb(p) - -@test Distributions.poissonbinomial_pdf_fft(p) ≈ naive_sol -@test Distributions.poissonbinomial_pdf(p) ≈ naive_sol -@test Distributions.poissonbinomial_pdf(Tuple(p)) ≈ naive_sol - -@test Distributions.poissonbinomial_pdf_fft(p) ≈ Distributions.poissonbinomial_pdf(p) - -# Test the special base where PoissonBinomial distribution reduces -# to Binomial distribution -for (p, n) in [(0.8, 6), (0.5, 10), (0.04, 20)] - d = PoissonBinomial(fill(p, n)) - dref = Binomial(n, p) - println(" testing PoissonBinomial p=$p, n=$n") - - @test isa(d, PoissonBinomial) - @test minimum(d) == 0 - @test maximum(d) == n - @test extrema(d) == (0, n) - @test ntrials(d) == n - @test @inferred(entropy(d)) ≈ entropy(dref) - @test @inferred(median(d)) ≈ median(dref) - @test @inferred(mean(d)) ≈ mean(dref) - @test @inferred(var(d)) ≈ var(dref) - @test @inferred(kurtosis(d)) ≈ kurtosis(dref) - @test @inferred(skewness(d)) ≈ skewness(dref) - - for t=0:5 - @test @inferred(mgf(d, t)) ≈ mgf(dref, t) - @test @inferred(cf(d, t)) ≈ cf(dref, t) - end - for i=0.1:0.1:.9 - @test @inferred(quantile(d, i)) ≈ quantile(dref, i) + function naive_pb(p::AbstractVector{T}) where {T<:Real} + x = p ./ (1 .- p) + naive_esf(x) * prod(1 ./ (1 .+ x)) end - for i=0:n - @test @inferred(pdf(d, i)) ≈ pdf(dref, i) atol=1e-14 - @test @inferred(pdf(d, i//1)) ≈ pdf(dref, i) atol=1e-14 - @test @inferred(logpdf(d, i)) ≈ logpdf(dref, i) - @test @inferred(logpdf(d, i//1)) ≈ logpdf(dref, i) - for f in (cdf, ccdf, logcdf, logccdf) - @test @inferred(f(d, i)) ≈ f(dref, i) rtol=1e-6 - @test @inferred(f(d, i//1)) ≈ f(dref, i) rtol=1e-6 - @test @inferred(f(d, i + 0.5)) ≈ f(dref, i) rtol=1e-6 + + # test PoissonBinomial PMF algorithms + x = [3.5118, 0.6219, 0.2905, 0.8450, 1.8648] + n = length(x) + p = x ./ (1 .+ x) + + naive_sol = naive_pb(p) + + @test Distributions.poissonbinomial_pdf_fft(p) ≈ naive_sol + @test Distributions.poissonbinomial_pdf(p) ≈ naive_sol + @test Distributions.poissonbinomial_pdf(Tuple(p)) ≈ naive_sol + + @test Distributions.poissonbinomial_pdf_fft(p) ≈ Distributions.poissonbinomial_pdf(p) + + # Test the special base where PoissonBinomial distribution reduces + # to Binomial distribution + for (p, n) in [(0.8, 6), (0.5, 10), (0.04, 20)] + d = PoissonBinomial(fill(p, n)) + dref = Binomial(n, p) + println(" testing PoissonBinomial p=$p, n=$n") + + @test isa(d, PoissonBinomial) + @test minimum(d) == 0 + @test maximum(d) == n + @test extrema(d) == (0, n) + @test ntrials(d) == n + @test @inferred(entropy(d)) ≈ entropy(dref) + @test @inferred(median(d)) ≈ median(dref) + @test @inferred(mean(d)) ≈ mean(dref) + @test @inferred(var(d)) ≈ var(dref) + @test @inferred(kurtosis(d)) ≈ kurtosis(dref) + @test @inferred(skewness(d)) ≈ skewness(dref) + + for t = 0:5 + @test @inferred(mgf(d, t)) ≈ mgf(dref, t) + @test @inferred(cf(d, t)) ≈ cf(dref, t) + end + for i = 0.1:0.1:0.9 + @test @inferred(quantile(d, i)) ≈ quantile(dref, i) + end + for i = 0:n + @test @inferred(pdf(d, i)) ≈ pdf(dref, i) atol = 1e-14 + @test @inferred(pdf(d, i // 1)) ≈ pdf(dref, i) atol = 1e-14 + @test @inferred(logpdf(d, i)) ≈ logpdf(dref, i) + @test @inferred(logpdf(d, i // 1)) ≈ logpdf(dref, i) + for f in (cdf, ccdf, logcdf, logccdf) + @test @inferred(f(d, i)) ≈ f(dref, i) rtol = 1e-6 + @test @inferred(f(d, i // 1)) ≈ f(dref, i) rtol = 1e-6 + @test @inferred(f(d, i + 0.5)) ≈ f(dref, i) rtol = 1e-6 + end end - end - @test iszero(@inferred(cdf(d, -Inf))) - @test isone(@inferred(cdf(d, Inf))) - @test @inferred(logcdf(d, -Inf)) == -Inf - @test iszero(@inferred(logcdf(d, Inf))) - @test isone(@inferred(ccdf(d, -Inf))) - @test iszero(@inferred(ccdf(d, Inf))) - @test iszero(@inferred(logccdf(d, -Inf))) - @test @inferred(logccdf(d, Inf)) == -Inf - for f in (cdf, ccdf, logcdf, logccdf) - @test isnan(f(d, NaN)) + @test iszero(@inferred(cdf(d, -Inf))) + @test isone(@inferred(cdf(d, Inf))) + @test @inferred(logcdf(d, -Inf)) == -Inf + @test iszero(@inferred(logcdf(d, Inf))) + @test isone(@inferred(ccdf(d, -Inf))) + @test iszero(@inferred(ccdf(d, Inf))) + @test iszero(@inferred(logccdf(d, -Inf))) + @test @inferred(logccdf(d, Inf)) == -Inf + for f in (cdf, ccdf, logcdf, logccdf) + @test isnan(f(d, NaN)) + end end -end -# Test against a sum of three Binomial distributions -for (n₁, n₂, n₃, p₁, p₂, p₃) in [(10, 10, 10, 0.1, 0.5, 0.9), - (1, 10, 100, 0.99, 0.1, 0.05), - (5, 1, 3, 0.01, 0.99, 0.999), - (10, 7, 10, 0., 0.9, 0.5)] - - n = n₁ + n₂ + n₃ - p = zeros(n) - p[1:n₁] .= p₁ - p[n₁+1: n₁ + n₂] .= p₂ - p[n₁ + n₂ + 1:end] .= p₃ - d = PoissonBinomial(p) - println(" testing PoissonBinomial [$(n₁) × $(p₁), $(n₂) × $(p₂), $(n₃) × $(p₃)]") - b1 = Binomial(n₁, p₁) - b2 = Binomial(n₂, p₂) - b3 = Binomial(n₃, p₃) - - pmf1 = Base.Fix1(pdf, b1).(support(b1)) - pmf2 = Base.Fix1(pdf, b2).(support(b2)) - pmf3 = Base.Fix1(pdf, b3).(support(b3)) - - @test @inferred(mean(d)) ≈ (mean(b1) + mean(b2) + mean(b3)) - @test @inferred(var(d)) ≈ (var(b1) + var(b2) + var(b3)) - for t=0:5 - @test @inferred(mgf(d, t)) ≈ (mgf(b1, t) * mgf(b2, t) * mgf(b3, t)) - @test @inferred(cf(d, t)) ≈ (cf(b1, t) * cf(b2, t) * cf(b3, t)) - end + # Test against a sum of three Binomial distributions + for (n₁, n₂, n₃, p₁, p₂, p₃) in [ + (10, 10, 10, 0.1, 0.5, 0.9), + (1, 10, 100, 0.99, 0.1, 0.05), + (5, 1, 3, 0.01, 0.99, 0.999), + (10, 7, 10, 0.0, 0.9, 0.5), + ] + + n = n₁ + n₂ + n₃ + p = zeros(n) + p[1:n₁] .= p₁ + p[n₁+1:n₁+n₂] .= p₂ + p[n₁+n₂+1:end] .= p₃ + d = PoissonBinomial(p) + println(" testing PoissonBinomial [$(n₁) × $(p₁), $(n₂) × $(p₂), $(n₃) × $(p₃)]") + b1 = Binomial(n₁, p₁) + b2 = Binomial(n₂, p₂) + b3 = Binomial(n₃, p₃) + + pmf1 = Base.Fix1(pdf, b1).(support(b1)) + pmf2 = Base.Fix1(pdf, b2).(support(b2)) + pmf3 = Base.Fix1(pdf, b3).(support(b3)) + + @test @inferred(mean(d)) ≈ (mean(b1) + mean(b2) + mean(b3)) + @test @inferred(var(d)) ≈ (var(b1) + var(b2) + var(b3)) + for t = 0:5 + @test @inferred(mgf(d, t)) ≈ (mgf(b1, t) * mgf(b2, t) * mgf(b3, t)) + @test @inferred(cf(d, t)) ≈ (cf(b1, t) * cf(b2, t) * cf(b3, t)) + end - for k=0:n - m = 0. - for i=0:min(n₁, k) - mc = 0. - for j=i:min(i+n₂, k) - mc += (k - j <= n₃) && pmf2[j-i+1] * pmf3[k-j+1] + for k = 0:n + m = 0.0 + for i = 0:min(n₁, k) + mc = 0.0 + for j = i:min(i + n₂, k) + mc += (k - j <= n₃) && pmf2[j-i+1] * pmf3[k-j+1] + end + m += pmf1[i+1] * mc end - m += pmf1[i+1] * mc + @test @inferred(pdf(d, k)) ≈ m atol = 1e-14 + @test @inferred(pdf(d, k // 1)) ≈ m atol = 1e-14 + @test @inferred(logpdf(d, k)) ≈ log(m) + @test @inferred(logpdf(d, k // 1)) ≈ log(m) end - @test @inferred(pdf(d, k)) ≈ m atol=1e-14 - @test @inferred(pdf(d, k//1)) ≈ m atol=1e-14 - @test @inferred(logpdf(d, k)) ≈ log(m) - @test @inferred(logpdf(d, k//1)) ≈ log(m) end -end -# Test the _dft helper function -@testset "_dft" begin - x = Distributions._dft(collect(1:8)) - # Comparator computed from FFTW - fftw_fft = [36.0 + 0.0im, - -4.0 + 9.65685424949238im, - -4.0 + 4.0im, - -4.0 + 1.6568542494923806im, - -4.0 + 0.0im, - -4.0 - 1.6568542494923806im, - -4.0 - 4.0im, - -4.0 - 9.65685424949238im] - @test x ≈ fftw_fft -end + # Test the _dft helper function + @testset "_dft" begin + x = Distributions._dft(collect(1:8)) + # Comparator computed from FFTW + fftw_fft = [ + 36.0 + 0.0im, + -4.0 + 9.65685424949238im, + -4.0 + 4.0im, + -4.0 + 1.6568542494923806im, + -4.0 + 0.0im, + -4.0 - 1.6568542494923806im, + -4.0 - 4.0im, + -4.0 - 9.65685424949238im, + ] + @test x ≈ fftw_fft + end -@testset "automatic differentiation" begin - # Test autodiff using ForwardDiff - f = x -> logpdf(PoissonBinomial(x), 0) - at = [0.5, 0.5] - @test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol=1e-6) + @testset "automatic differentiation" begin + # Test autodiff using ForwardDiff + f = x -> logpdf(PoissonBinomial(x), 0) + at = [0.5, 0.5] + @test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1e-6) - # Test ChainRules definition - for f in (Distributions.poissonbinomial_pdf, Distributions.poissonbinomial_pdf_fft) - test_frule(f, rand(50)) - test_rrule(f, rand(50)) + # Test ChainRules definition + for f in (Distributions.poissonbinomial_pdf, Distributions.poissonbinomial_pdf_fft) + test_frule(f, rand(50)) + test_rrule(f, rand(50)) + end end end -end diff --git a/test/univariate/discrete/soliton.jl b/test/univariate/discrete/soliton.jl index af0aefddc1..ff5fd3b7f4 100644 --- a/test/univariate/discrete/soliton.jl +++ b/test/univariate/discrete/soliton.jl @@ -3,8 +3,8 @@ using Distributions @testset "Soliton" begin K, M, δ, atol = 100, 60, 0.2, 0 Ω = Soliton(K, M, δ, atol) - @test pdf(Ω, M) > pdf(Ω, M-1) - @test pdf(Ω, M) > pdf(Ω, M+1) + @test pdf(Ω, M) > pdf(Ω, M - 1) + @test pdf(Ω, M) > pdf(Ω, M + 1) @test cumsum(Base.Fix1(pdf, Ω).(1:K)) ≈ Base.Fix1(cdf, Ω).(1:K) @test cdf(Ω, 0) ≈ 0 @test cdf(Ω, K) ≈ 1 @@ -19,7 +19,7 @@ using Distributions K, M, δ, atol = 100, 60, 0.2, 1e-3 Ω = Soliton(K, M, δ, atol) - ds = [d for d in 1:K if pdf(Ω, d) > 0] + ds = [d for d = 1:K if pdf(Ω, d) > 0] @test all(Base.Fix1(pdf, Ω).(ds) .> atol) @test cdf(Ω, 0) ≈ 0 @test cdf(Ω, K) ≈ 1 diff --git a/test/univariate/locationscale.jl b/test/univariate/locationscale.jl index 805589e8a1..70cf62f1bd 100644 --- a/test/univariate/locationscale.jl +++ b/test/univariate/locationscale.jl @@ -1,24 +1,24 @@ function test_location_scale( - rng::Union{AbstractRNG, Missing}, - μ::Real, σ::Real, ρ::UnivariateDistribution, dref::UnivariateDistribution, + rng::Union{AbstractRNG,Missing}, + μ::Real, + σ::Real, + ρ::UnivariateDistribution, + dref::UnivariateDistribution, ) d = Distributions.AffineDistribution(μ, σ, ρ) - @test params(d) == (μ,σ,ρ) + @test params(d) == (μ, σ, ρ) @test eltype(d) === eltype(dref) # Different ways to construct the AffineDistribution object if dref isa DiscreteDistribution # floating point division introduces numerical errors # Better: multiply with rational numbers - d_dict = Dict( - "original" => d, - "sugar" => σ * ρ + μ, - ) + d_dict = Dict("original" => d, "sugar" => σ * ρ + μ) else d_dict = Dict( "original" => d, "sugar" => σ * ρ + μ, - "composed" => μ + ((2 * ρ) * σ - 2) / 2 + 1 + "composed" => μ + ((2 * ρ) * σ - 2) / 2 + 1, ) end @@ -27,7 +27,7 @@ function test_location_scale( #### Support / Domain @testset "Support" begin - @testset "$k" for (k,dtest) in d_dict + @testset "$k" for (k, dtest) in d_dict @test minimum(dtest) == minimum(dref) @test maximum(dtest) == maximum(dref) @test extrema(dtest) == (minimum(dref), maximum(dref)) @@ -39,11 +39,11 @@ function test_location_scale( #### Promotions and conversions @testset "Promotions and conversions" begin - @testset "$k" for (k,dtest) in d_dict + @testset "$k" for (k, dtest) in d_dict if dtest isa Distributions.AffineDistribution @test typeof(dtest.μ) === typeof(dtest.σ) - @test location(dtest) ≈ μ atol=1e-15 - @test scale(dtest) ≈ σ atol=1e-15 + @test location(dtest) ≈ μ atol = 1e-15 + @test scale(dtest) ≈ σ atol = 1e-15 end end end @@ -51,7 +51,7 @@ function test_location_scale( #### Statistics @testset "Statistics" begin - @testset "$k" for (k,dtest) in d_dict + @testset "$k" for (k, dtest) in d_dict @test mean(dtest) ≈ mean(dref) @test median(dtest) ≈ median(dref) @test mode(dtest) ≈ mode(dref) @@ -68,14 +68,14 @@ function test_location_scale( @test ismesokurtic(dtest) == ismesokurtic(dref) @test entropy(dtest) ≈ entropy(dref) - @test mgf(dtest,-0.1) ≈ mgf(dref,-0.1) + @test mgf(dtest, -0.1) ≈ mgf(dref, -0.1) end end #### Evaluation & Sampling @testset "Evaluation & Sampling" begin - @testset "$k" for (k,dtest) in d_dict + @testset "$k" for (k, dtest) in d_dict xs = rand(dref, 5) x = first(xs) insupport(dtest, x) == insupport(dref, x) @@ -90,9 +90,9 @@ function test_location_scale( @test loglikelihood(dtest, xs) ≈ loglikelihood(dref, xs) @test cdf(dtest, x) ≈ cdf(dref, x) - @test logcdf(dtest, x) ≈ logcdf(dref, x) atol=1e-14 - @test ccdf(dtest, x) ≈ ccdf(dref, x) atol=1e-14 - @test logccdf(dtest, x) ≈ logccdf(dref, x) atol=1e-14 + @test logcdf(dtest, x) ≈ logcdf(dref, x) atol = 1e-14 + @test ccdf(dtest, x) ≈ ccdf(dref, x) atol = 1e-14 + @test logccdf(dtest, x) ≈ logccdf(dref, x) atol = 1e-14 @test quantile(dtest, 0.1) ≈ quantile(dref, 0.1) @test quantile(dtest, 0.5) ≈ quantile(dref, 0.5) @@ -116,9 +116,9 @@ function test_location_scale( else rand!(rng, dtest, r) end - @test mean(r) ≈ mean(dref) atol=0.02 - @test std(r) ≈ std(dref) atol=0.02 - @test cf(dtest, -0.1) ≈ cf(dref,-0.1) + @test mean(r) ≈ mean(dref) atol = 0.02 + @test std(r) ≈ std(dref) atol = 0.02 + @test cf(dtest, -0.1) ≈ cf(dref, -0.1) if dref isa ContinuousDistribution @test gradlogpdf(dtest, 0.1) ≈ gradlogpdf(dref, 0.1) @@ -128,16 +128,24 @@ function test_location_scale( end function test_location_scale_normal( - rng::Union{AbstractRNG, Missing}, μ::Real, σ::Real, μD::Real, σD::Real, + rng::Union{AbstractRNG,Missing}, + μ::Real, + σ::Real, + μD::Real, + σD::Real, ) ρ = Normal(μD, σD) - dref = Normal(μ + σ * μD, abs(σ) * σD) + dref = Normal(μ + σ * μD, abs(σ) * σD) @test dref === μ + σ * ρ return test_location_scale(rng, μ, σ, ρ, dref) end function test_location_scale_discretenonparametric( - rng::Union{AbstractRNG, Missing}, μ::Real, σ::Real, support, probs, + rng::Union{AbstractRNG,Missing}, + μ::Real, + σ::Real, + support, + probs, ) ρ = DiscreteNonParametric(support, probs) dref = DiscreteNonParametric(μ .+ σ .* support, probs) @@ -158,15 +166,33 @@ end @testset "DiscreteNonParametric" begin _probs = normalize!(rand(10), 1) @testset for _rng in (missing, rng), sign in (1, -1) - test_location_scale_discretenonparametric(_rng, 1//3, sign * 1//2, 1:10, _probs) - test_location_scale_discretenonparametric(_rng, -1//4, sign * 1//3, (-10):(-1), _probs) - test_location_scale_discretenonparametric(_rng, 6//5, sign * 3//2, 15:24, _probs) + test_location_scale_discretenonparametric( + _rng, + 1 // 3, + sign * 1 // 2, + 1:10, + _probs, + ) + test_location_scale_discretenonparametric( + _rng, + -1 // 4, + sign * 1 // 3, + (-10):(-1), + _probs, + ) + test_location_scale_discretenonparametric( + _rng, + 6 // 5, + sign * 3 // 2, + 15:24, + _probs, + ) end end @test_logs Distributions.AffineDistribution(1.0, 1, Normal()) @test_deprecated ls_norm = LocationScale(1.0, 1, Normal()) - @test ls_norm isa LocationScale{Float64, Continuous, Normal{Float64}} - @test ls_norm isa Distributions.AffineDistribution{Float64, Continuous, Normal{Float64}} + @test ls_norm isa LocationScale{Float64,Continuous,Normal{Float64}} + @test ls_norm isa Distributions.AffineDistribution{Float64,Continuous,Normal{Float64}} end diff --git a/test/univariate/orderstatistic.jl b/test/univariate/orderstatistic.jl index 5cd65cb856..21d1d8896b 100644 --- a/test/univariate/orderstatistic.jl +++ b/test/univariate/orderstatistic.jl @@ -4,7 +4,7 @@ using StatsBase @testset "OrderStatistic" begin @testset "basic" begin - for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i in 1:n + for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i = 1:n d = OrderStatistic(dist, n, i) @test d isa OrderStatistic if dist isa DiscreteUnivariateDistribution @@ -17,15 +17,15 @@ using StatsBase @test d.rank == i end @test_throws ArgumentError OrderStatistic(Normal(), 0, 1) - OrderStatistic(Normal(), 0, 1; check_args=false) + OrderStatistic(Normal(), 0, 1; check_args = false) @test_throws ArgumentError OrderStatistic(Normal(), 10, 11) - OrderStatistic(Normal(), 10, 11; check_args=false) + OrderStatistic(Normal(), 10, 11; check_args = false) @test_throws ArgumentError OrderStatistic(Normal(), 10, 0) - OrderStatistic(Normal(), 10, 0; check_args=false) + OrderStatistic(Normal(), 10, 0; check_args = false) end @testset "params" begin - for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i in 1:n + for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i = 1:n d = OrderStatistic(dist, n, i) @test params(d) == (params(dist)..., n, i) @test partype(d) === partype(dist) @@ -34,7 +34,7 @@ using StatsBase @testset "support" begin n = 10 - for i in 1:10 + for i = 1:10 d1 = OrderStatistic(Uniform(), n, i) @test minimum(d1) == 0 @test maximum(d1) == 1 @@ -78,13 +78,13 @@ using StatsBase @testset for dist in [Uniform(T(-2), T(1)), Normal(T(3), T(2)), Exponential(T(10))], n in [1, 10, 100], - i in 1:n + i = 1:n d = OrderStatistic(dist, n, i) c = factorial(big(n)) / factorial(big(i - 1)) / factorial(big(n - i)) # since density is concentrated around the i/n quantile, sample a point # nearby it - x = quantile(dist, clamp(i / n + (rand() - 1//2) / 10, 0, 1)) + x = quantile(dist, clamp(i / n + (rand() - 1 // 2) / 10, 0, 1)) p = cdf(dist, big(x)) pdf_exp = c * p^(i - 1) * (1 - p)^(n - i) * pdf(dist, big(x)) @test @inferred(T, pdf(d, x)) ≈ T(pdf_exp) @@ -97,7 +97,7 @@ using StatsBase @testset for dist in [DiscreteUniform(10, 30), Poisson(100.0), Binomial(20, 0.3)], n in [1, 10, 100], - i in 1:n + i = 1:n d = OrderStatistic(dist, n, i) xs = quantile(dist, 0.01):quantile(dist, 0.99) @@ -120,7 +120,7 @@ using StatsBase Poisson(100), ], n in [1, 10, 20], - i in 1:n + i = 1:n d = OrderStatistic(dist, n, i) Distributions.expectation(one, d) ≈ 1 @@ -137,12 +137,12 @@ using StatsBase (DiscreteUniform(1, 10), DiscreteUniform(1, 10)), (Poisson(T(20)), Poisson(big(20))), ] - @testset for (dist, bigdist) in dists, n in [10, 100], i in 1:n + @testset for (dist, bigdist) in dists, n in [10, 100], i = 1:n dist isa DiscreteDistribution && T !== Float64 && continue d = OrderStatistic(dist, n, i) # since density is concentrated around the i/n quantile, sample a point # nearby it - x = quantile(dist, clamp(i / n + (rand() - 1//2) / 10, 1e-4, 1 - 1e-4)) + x = quantile(dist, clamp(i / n + (rand() - 1 // 2) / 10, 1e-4, 1 - 1e-4)) p = cdf(bigdist, big(x)) cdf_exp = sum(i:n) do j c = binomial(big(n), big(j)) @@ -169,8 +169,8 @@ using StatsBase xq = @inferred(T, quantile(d, q)) xqc = @inferred(T, cquantile(d, 1 - q)) @test xq ≈ xqc - @test isapprox(xq, T(x); atol=1e-4) || - (dist isa DiscreteDistribution && xq < x) + @test isapprox(xq, T(x); atol = 1e-4) || + (dist isa DiscreteDistribution && xq < x) end end end @@ -202,7 +202,7 @@ using StatsBase tol = quantile(Normal(), 1 - α) @testset for dist in [Uniform(), Exponential(), Poisson(20), Binomial(20, 0.3)] - @testset for n in [1, 10, 100], i in 1:n + @testset for n in [1, 10, 100], i = 1:n d = OrderStatistic(dist, n, i) x = rand(d, ndraws) m, v = mean_and_var(x) diff --git a/test/univariate_bounds.jl b/test/univariate_bounds.jl index aa633ec39a..c0ae9da898 100644 --- a/test/univariate_bounds.jl +++ b/test/univariate_bounds.jl @@ -3,7 +3,12 @@ using Test # to make sure that subtypes provides the required behavior without having to add # a dependency to InteractiveUtils -function _subtypes(m::Module, x::Type, sts=Base.IdSet{Any}(), visited=Base.IdSet{Module}()) +function _subtypes( + m::Module, + x::Type, + sts = Base.IdSet{Any}(), + visited = Base.IdSet{Module}(), +) push!(visited, m) xt = Base.unwrap_unionall(x) if !isa(xt, DataType) @@ -47,7 +52,7 @@ function _subtypes_in(mods::Array, x::Type) for m in mods _subtypes(m, x, sts, visited) end - return sort!(collect(sts), by=string) + return sort!(collect(sts), by = string) end get_subtypes(m::Module, x::Type) = _subtypes_in([m], x) @@ -61,7 +66,7 @@ filter!(x -> isbounded(x()), dists) @testset "bound checking $dist" for dist in dists d = dist() - lb,ub = float.(extrema(support(d))) + lb, ub = float.(extrema(support(d))) lb = prevfloat(lb) ub = nextfloat(ub) @test iszero(cdf(d, lb)) @@ -70,7 +75,7 @@ filter!(x -> isbounded(x()), dists) @test isone(cdf(d, Inf)) @test isnan(cdf(d, NaN)) - lb_lcdf = logcdf(d,lb) + lb_lcdf = logcdf(d, lb) @test isinf(lb_lcdf) & (lb_lcdf < 0) @test iszero(logcdf(d, ub)) @test logcdf(d, -Inf) == -Inf @@ -83,7 +88,7 @@ filter!(x -> isbounded(x()), dists) @test iszero(ccdf(d, Inf)) @test isnan(ccdf(d, NaN)) - ub_lccdf = logccdf(d,ub) + ub_lccdf = logccdf(d, ub) @test isinf(ub_lccdf) & (ub_lccdf < 0) @test iszero(logccdf(d, lb)) @test iszero(logccdf(d, -Inf)) diff --git a/test/univariates.jl b/test/univariates.jl index f6304de592..50711cde27 100644 --- a/test/univariates.jl +++ b/test/univariates.jl @@ -2,7 +2,7 @@ using Distributions import JSON -using Test +using Test function verify_and_test_drive(jsonfile, selected, n_tsamples::Int) @@ -43,12 +43,15 @@ _json_value(x::Number) = x _json_value(x::AbstractString) = x == "inf" ? Inf : - x == "-inf" ? -Inf : - x == "nan" ? NaN : - error("Invalid numerical value: $x") + x == "-inf" ? -Inf : x == "nan" ? NaN : error("Invalid numerical value: $x") -function verify_and_test(D::Union{Type,Function}, d::UnivariateDistribution, dct::Dict, n_tsamples::Int) +function verify_and_test( + D::Union{Type,Function}, + d::UnivariateDistribution, + dct::Dict, + n_tsamples::Int, +) # verify properties # # Note: properties include all applicable params and stats @@ -65,8 +68,14 @@ function verify_and_test(D::Union{Type,Function}, d::UnivariateDistribution, dct # verify parameter type # truncated parameters may be nothing @test partype(d) === mapfoldl( - typeof, (S, T) -> T <: Distribution ? promote_type(S, partype(T)) : (T <: Nothing ? S : promote_type(S, eltype(T))), pars; init = Union{}) - + typeof, + (S, T) -> + T <: Distribution ? promote_type(S, partype(T)) : + (T <: Nothing ? S : promote_type(S, eltype(T))), + pars; + init = Union{}, + ) + # promotion constructor: float_pars = map(x -> isa(x, AbstractFloat), pars) if length(pars) > 1 && sum(float_pars) > 1 && !isa(D, typeof(truncated)) @@ -90,7 +99,7 @@ function verify_and_test(D::Union{Type,Function}, d::UnivariateDistribution, dct expect_v = _json_value(val) f = eval(Symbol(fname)) @assert isa(f, Function) - @test isapprox(f(d), expect_v; atol=1e-12, rtol=1e-8, nans=true) + @test isapprox(f(d), expect_v; atol = 1e-12, rtol = 1e-8, nans = true) end @test extrema(d) == (minimum(d), maximum(d)) @@ -104,38 +113,43 @@ function verify_and_test(D::Union{Type,Function}, d::UnivariateDistribution, dct # pdf method is not implemented for StudentizedRange if !isa(d, StudentizedRange) - @test Base.Fix1(pdf, d).(x) ≈ p atol=1e-16 rtol=1e-8 - @test Base.Fix1(logpdf, d).(x) ≈ lp atol=isa(d, NoncentralHypergeometric) ? 1e-4 : 1e-12 + @test Base.Fix1(pdf, d).(x) ≈ p atol = 1e-16 rtol = 1e-8 + @test Base.Fix1(logpdf, d).(x) ≈ lp atol = + isa(d, NoncentralHypergeometric) ? 1e-4 : 1e-12 end # cdf method is not implemented for NormalInverseGaussian if !isa(d, NormalInverseGaussian) - @test isapprox(cdf(d, x), cf; atol=isa(d, NoncentralHypergeometric) ? 1e-8 : 1e-12) + @test isapprox( + cdf(d, x), + cf; + atol = isa(d, NoncentralHypergeometric) ? 1e-8 : 1e-12, + ) end end # verify quantiles - if !isa(d, Union{Skellam, VonMises, NormalInverseGaussian}) + if !isa(d, Union{Skellam,VonMises,NormalInverseGaussian}) qts = dct["quans"] for qt in qts q = Float64(qt["q"]) x = Float64(qt["x"]) - @test isapprox(quantile(d, q), x, atol=1.0e-8) + @test isapprox(quantile(d, q), x, atol = 1.0e-8) end end try - m = mgf(d,0.0) + m = mgf(d, 0.0) @test m ≈ 1.0 catch e isa(e, MethodError) || throw(e) end try - c = cf(d,0.0) + c = cf(d, 0.0) @test c ≈ 1.0 # test some extra values: should all be well-defined - for t in (0.1,-0.1,1.0,-1.0) - @test !isnan(cf(d,t)) + for t in (0.1, -0.1, 1.0, -1.0) + @test !isnan(cf(d, t)) end catch e isa(e, MethodError) || throw(e) @@ -151,10 +165,7 @@ function verify_and_test(D::Union{Type,Function}, d::UnivariateDistribution, dct n_tsamples = min(n_tsamples, 100) end - if !isa(d, Union{Skellam, - VonMises, - NoncentralHypergeometric, - NormalInverseGaussian}) + if !isa(d, Union{Skellam,VonMises,NoncentralHypergeometric,NormalInverseGaussian}) test_distr(d, n_tsamples) end end @@ -162,8 +173,7 @@ end ## main -for c in ["discrete", - "continuous"] +for c in ["discrete", "continuous"] title = string(uppercase(c[1]), c[2:end]) println(" [$title]") @@ -185,16 +195,16 @@ end # #1471 @testset "InverseGamma constructor (#1471)" begin @test_throws DomainError InverseGamma(-1, 2) - InverseGamma(-1, 2; check_args=false) # no error + InverseGamma(-1, 2; check_args = false) # no error end # #1479 @testset "Inner and outer constructors" begin @test_throws DomainError InverseGaussian(0.0, 0.0) - @test InverseGaussian(0.0, 0.0; check_args=false) isa InverseGaussian{Float64} + @test InverseGaussian(0.0, 0.0; check_args = false) isa InverseGaussian{Float64} @test InverseGaussian{Float64}(0.0, 0.0) isa InverseGaussian{Float64} @test_throws DomainError Levy(0.0, 0.0) - @test Levy(0.0, 0.0; check_args=false) isa Levy{Float64} + @test Levy(0.0, 0.0; check_args = false) isa Levy{Float64} @test Levy{Float64}(0.0, 0.0) isa Levy{Float64} end diff --git a/test/utils.jl b/test/utils.jl index 2946689bcc..b83536a356 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -10,7 +10,7 @@ r = RealInterval(1.5, 4.0) @test partype(Gamma(1, 2)) == Float64 @test partype(Gamma(1.1, 2)) == Float64 -@test partype(Normal(1//1, 2//1)) == Rational{Int} +@test partype(Normal(1 // 1, 2 // 1)) == Rational{Int} @test partype(MvNormal(rand(Float32, 5), Matrix{Float32}(I, 5, 5))) == Float32 # special cases @@ -22,14 +22,14 @@ r = RealInterval(1.5, 4.0) A = rand(1:10, 5, 5) B = rand(Float32, 4) -C = 1//2 +C = 1 // 2 L = rand(Float32, 4, 4) D = PDMats.PDMat(L * L') # Ensure that utilities functions works with abstract arrays @test isprobvec(GenericArray([1, 1, 1])) == false -@test isprobvec(GenericArray([1/3, 1/3, 1/3])) +@test isprobvec(GenericArray([1 / 3, 1 / 3, 1 / 3])) # Positive definite matrix M = GenericArray([1.0 0.0; 0.0 1.0]) @@ -41,20 +41,23 @@ N = GenericArray([1.0 0.0; 1.0 0.0]) n = 10 -areal = randn(n,n)/2 -aimg = randn(n,n)/2 -@testset "For A containing $eltya" for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int) - ainit = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) +areal = randn(n, n) / 2 +aimg = randn(n, n) / 2 +@testset "For A containing $eltya" for eltya in + (Float32, Float64, ComplexF32, ComplexF64, Int) + ainit = + eltya == Int ? rand(1:7, n, n) : + convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) @testset "Positive semi-definiteness" begin notsymmetric = ainit - notsquare = [ainit ainit] + notsquare = [ainit ainit] @test !ispossemdef(notsymmetric) @test !ispossemdef(notsquare) - for truerank in 0:n + for truerank = 0:n X = ainit[:, 1:truerank] A = truerank == 0 ? zeros(eltya, n, n) : X * X' @test ispossemdef(A) - for testrank in 0:n + for testrank = 0:n if testrank == truerank @test ispossemdef(A, testrank) else diff --git a/test/vonmises.jl b/test/vonmises.jl index 8765d0c2e2..b10bc8c41c 100644 --- a/test/vonmises.jl +++ b/test/vonmises.jl @@ -9,7 +9,7 @@ function test_vonmises(μ::Float64, κ::Float64) @test mean(d) == μ @test median(d) == μ @test mode(d) == μ - @test d == typeof(d)(params(d)...,d.I0κx) + @test d == typeof(d)(params(d)..., d.I0κx) @test d == deepcopy(d) @test partype(d) == Float64 # println(d) @@ -21,20 +21,16 @@ function test_vonmises(μ::Float64, κ::Float64) @test params(d32) == map(Float32, params(d)) # Support - @test support(d) == RealInterval(d.μ-π,d.μ+π) - @test pdf(d, d.μ-2π) == 0.0 - @test pdf(d, d.μ+2π) == 0.0 + @test support(d) == RealInterval(d.μ - π, d.μ + π) + @test pdf(d, d.μ - 2π) == 0.0 + @test pdf(d, d.μ + 2π) == 0.0 end ## General testing -for (μ, κ) in [(2.0, 1.0), - (2.0, 5.0), - (3.0, 1.0), - (3.0, 5.0), - (5.0, 2.0)] +for (μ, κ) in [(2.0, 1.0), (2.0, 5.0), (3.0, 1.0), (3.0, 5.0), (5.0, 2.0)] test_vonmises(μ, κ) end From 22ef918dc7988c2d0c130a382d12b53fb496b1ce Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 16:52:37 +0200 Subject: [PATCH 2/8] more formatting, + line in readme --- .formatting/Project.toml | 2 ++ .formatting/format_all.jl | 11 +++++++++++ README.md | 7 ++++++- src/matrix/lkj.jl | 10 +++++----- src/multivariate/multinomial.jl | 6 +++--- src/multivariates.jl | 4 ++-- src/samplers/multinomial.jl | 2 +- src/univariate/continuous/beta.jl | 13 +++++-------- src/univariate/continuous/betaprime.jl | 6 ++---- src/univariate/continuous/chernoff.jl | 2 +- src/univariate/continuous/chi.jl | 11 ++++------- src/univariate/continuous/fdist.jl | 7 +++---- src/univariate/continuous/frechet.jl | 3 +-- src/univariate/continuous/inversegamma.jl | 3 +-- src/univariate/continuous/ksdist.jl | 2 +- src/univariate/continuous/ksonesided.jl | 2 +- src/univariate/continuous/lognormal.jl | 6 ++---- src/univariate/continuous/normalcanon.jl | 3 +-- src/univariate/continuous/pareto.jl | 6 ++---- src/univariate/discrete/poissonbinomial.jl | 2 +- src/univariates.jl | 8 ++++---- test/censored.jl | 4 ++++ test/cholesky/lkjcholesky.jl | 4 ++-- test/fit.jl | 8 ++++---- test/matrixreshaped.jl | 18 +++++++++--------- test/matrixvariates.jl | 2 +- test/multivariate/jointorderstatistics.jl | 2 +- test/testutils.jl | 12 ++++++------ test/univariate/continuous/kumaraswamy.jl | 2 +- test/univariate/discrete/poissonbinomial.jl | 10 +++++----- 30 files changed, 92 insertions(+), 86 deletions(-) create mode 100644 .formatting/Project.toml create mode 100644 .formatting/format_all.jl diff --git a/.formatting/Project.toml b/.formatting/Project.toml new file mode 100644 index 0000000000..f3aab8b8bf --- /dev/null +++ b/.formatting/Project.toml @@ -0,0 +1,2 @@ +[deps] +JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" diff --git a/.formatting/format_all.jl b/.formatting/format_all.jl new file mode 100644 index 0000000000..c4001aaba9 --- /dev/null +++ b/.formatting/format_all.jl @@ -0,0 +1,11 @@ +using JuliaFormatter + +project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..") + +not_formatted = format(project_path; verbose = true) +if not_formatted + @info "Formatting verified." +else + @warn "Formatting verification failed: Some files are not properly formatted!" +end +exit(not_formatted ? 0 : 1) diff --git a/README.md b/README.md index 0071cf3617..c4290bd516 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ not been reported yet on the issues of the repository. If not, you can file a new issue, add your version of the package which you can get with this command in the Julia REPL: ```julia -julia> ]status Distributions +julia> ] status Distributions ``` Be exhaustive in your report, summarize the bug, and provide: @@ -55,6 +55,11 @@ clone it and make modifications on a new branch, Once your changes are made, push them on your fork and create the Pull Request on the main repository. +To format the code, run the following command: +```bash +julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_all.jl")' +``` + ### Requirements Distributions is a central package which many rely on, diff --git a/src/matrix/lkj.jl b/src/matrix/lkj.jl index 34672ee3a5..3ff40d1d76 100644 --- a/src/matrix/lkj.jl +++ b/src/matrix/lkj.jl @@ -135,7 +135,7 @@ function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.defa R[1, 2] = 2u - 1 R[2, 1] = R[1, 2] # 2. - for k = 2:d-1 + for k = 2:(d-1) # (a) β -= 0.5 # (b) @@ -230,7 +230,7 @@ function lkj_vine_loginvconst(d::Integer, η::Real) # Equation (16) in LKJ (2009 JMA) expsum = zero(η) betasum = zero(η) - for k = 1:d-1 + for k = 1:(d-1) α = η + 0.5(d - k - 1) expsum += (2η - 2 + d - k) * (d - k) betasum += (d - k) * logbeta(α, α) @@ -243,7 +243,7 @@ function lkj_vine_loginvconst_uniform(d::Integer) # Equation after (16) in LKJ (2009 JMA) expsum = 0.0 betasum = 0.0 - for k = 1:d-1 + for k = 1:(d-1) α = (k + 1) / 2 expsum += k^2 betasum += k * logbeta(α, α) @@ -255,7 +255,7 @@ end function lkj_loginvconst_alt(d::Integer, η::Real) # Third line in first proof of Section 3.3 in LKJ (2009 JMA) loginvconst = zero(η) - for k = 1:d-1 + for k = 1:(d-1) loginvconst += 0.5k * logπ + loggamma(η + 0.5(d - 1 - k)) - loggamma(η + 0.5(d - 1)) end return loginvconst @@ -264,7 +264,7 @@ end function corr_logvolume(n::Integer) # https://doi.org/10.4169/amer.math.monthly.123.9.909 logvol = 0.0 - for k = 1:n-1 + for k = 1:(n-1) logvol += 0.5k * logπ + k * loggamma((k + 1) / 2) - k * loggamma((k + 2) / 2) end return logvol diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index 47c4a505e9..cb89192736 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -95,15 +95,15 @@ function cov(d::Multinomial{T}) where {T<:Real} C = Matrix{T}(undef, k, k) for j = 1:k pj = p[j] - for i = 1:j-1 + for i = 1:(j-1) @inbounds C[i, j] = -n * p[i] * pj end @inbounds C[j, j] = n * pj * (1 - pj) end - for j = 1:k-1 - for i = j+1:k + for j = 1:(k-1) + for i = (j+1):k @inbounds C[i, j] = C[j, i] end end diff --git a/src/multivariates.jl b/src/multivariates.jl index 9f95429a03..8bef508a1e 100644 --- a/src/multivariates.jl +++ b/src/multivariates.jl @@ -105,11 +105,11 @@ function cor(d::MultivariateDistribution) R = Matrix{eltype(C)}(undef, n, n) for j = 1:n - for i = 1:j-1 + for i = 1:(j-1) @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 - for i = j+1:n + for i = (j+1):n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index 2a2f4bde51..61e570d492 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -35,7 +35,7 @@ function multinom_rand!( @inbounds x[k] = n else # n must have been zero z = zero(eltype(x)) - for j = i+1:k + for j = (i+1):k @inbounds x[j] = z end end diff --git a/src/univariate/continuous/beta.jl b/src/univariate/continuous/beta.jl index 812f16e105..1dd9635545 100644 --- a/src/univariate/continuous/beta.jl +++ b/src/univariate/continuous/beta.jl @@ -64,8 +64,7 @@ params(d::Beta) = (d.α, d.β) #### Statistics -mean(d::Beta) = ((α, β) = params(d); -α / (α + β)) +mean(d::Beta) = ((α, β) = params(d); α / (α + β)) function mode(d::Beta; check_args::Bool = true) α, β = params(d) @@ -85,11 +84,9 @@ function var(d::Beta) return (α * β) / (abs2(s) * (s + 1)) end -meanlogx(d::Beta) = ((α, β) = params(d); -digamma(α) - digamma(α + β)) +meanlogx(d::Beta) = ((α, β) = params(d); digamma(α) - digamma(α + β)) -varlogx(d::Beta) = ((α, β) = params(d); -trigamma(α) - trigamma(α + β)) +varlogx(d::Beta) = ((α, β) = params(d); trigamma(α) - trigamma(α + β)) stdlogx(d::Beta) = sqrt(varlogx(d)) function skewness(d::Beta) @@ -128,8 +125,8 @@ end @_delegate_statsfuns Beta beta α β -gradlogpdf(d::Beta{T}, x::Real) where {T<:Real} = ((α, β) = params(d); -0 <= x <= 1 ? (α - 1) / x - (β - 1) / (1 - x) : zero(T)) +gradlogpdf(d::Beta{T}, x::Real) where {T<:Real} = + ((α, β) = params(d); 0 <= x <= 1 ? (α - 1) / x - (β - 1) / (1 - x) : zero(T)) #### Sampling diff --git a/src/univariate/continuous/betaprime.jl b/src/univariate/continuous/betaprime.jl index 5b9596be9f..28bd276349 100644 --- a/src/univariate/continuous/betaprime.jl +++ b/src/univariate/continuous/betaprime.jl @@ -65,13 +65,11 @@ params(d::BetaPrime) = (d.α, d.β) #### Statistics function mean(d::BetaPrime{T}) where {T<:Real} - ((α, β) = params(d); - β > 1 ? α / (β - 1) : T(NaN)) + ((α, β) = params(d); β > 1 ? α / (β - 1) : T(NaN)) end function mode(d::BetaPrime{T}) where {T<:Real} - ((α, β) = params(d); - α > 1 ? (α - 1) / (β + 1) : zero(T)) + ((α, β) = params(d); α > 1 ? (α - 1) / (β + 1) : zero(T)) end function var(d::BetaPrime{T}) where {T<:Real} diff --git a/src/univariate/continuous/chernoff.jl b/src/univariate/continuous/chernoff.jl index 97ca4b139f..c574cbbdd5 100644 --- a/src/univariate/continuous/chernoff.jl +++ b/src/univariate/continuous/chernoff.jl @@ -308,7 +308,7 @@ function rand(rng::AbstractRNG, d::Chernoff) # Ziggurat random n 1.516689116183566 ] n = length(x) - i = rand(rng, 0:n-1) + i = rand(rng, 0:(n-1)) r = (2.0 * rand(rng) - 1) * ((i > 0) ? x[i] : A / y[1]) rabs = abs(r) if rabs < x[i+1] diff --git a/src/univariate/continuous/chi.jl b/src/univariate/continuous/chi.jl index 4d6a8b82d4..95c2a59e71 100644 --- a/src/univariate/continuous/chi.jl +++ b/src/univariate/continuous/chi.jl @@ -52,9 +52,7 @@ params(d::Chi) = (d.ν,) mean(d::Chi) = (h = d.ν / 2; sqrt2 * exp(loggamma(h + 1 // 2) - loggamma(h))) var(d::Chi) = d.ν - mean(d)^2 -_chi_skewness(μ::Real, σ::Real) = (σ2 = σ^2; -σ3 = σ2 * σ; -(μ / σ3) * (1 - 2σ2)) +_chi_skewness(μ::Real, σ::Real) = (σ2 = σ^2; σ3 = σ2 * σ; (μ / σ3) * (1 - 2σ2)) function skewness(d::Chi) μ = mean(d) @@ -69,8 +67,8 @@ function kurtosis(d::Chi) (2 / σ^2) * (1 - μ * σ * γ - σ^2) end -entropy(d::Chi{T}) where {T<:Real} = (ν = d.ν; -loggamma(ν / 2) - T(logtwo) / 2 - ((ν - 1) / 2) * digamma(ν / 2) + ν / 2) +entropy(d::Chi{T}) where {T<:Real} = + (ν = d.ν; loggamma(ν / 2) - T(logtwo) / 2 - ((ν - 1) / 2) * digamma(ν / 2) + ν / 2) function mode(d::Chi; check_args::Bool = true) ν = d.ν @@ -107,8 +105,7 @@ end #### Sampling -rand(rng::AbstractRNG, d::Chi) = (ν = d.ν; -sqrt(rand(rng, Gamma(ν / 2.0, 2.0one(ν))))) +rand(rng::AbstractRNG, d::Chi) = (ν = d.ν; sqrt(rand(rng, Gamma(ν / 2.0, 2.0one(ν))))) struct ChiSampler{S<:Sampleable{Univariate,Continuous}} <: Sampleable{Univariate,Continuous} s::S diff --git a/src/univariate/continuous/fdist.jl b/src/univariate/continuous/fdist.jl index 576259fb59..650fffa9bd 100644 --- a/src/univariate/continuous/fdist.jl +++ b/src/univariate/continuous/fdist.jl @@ -56,8 +56,7 @@ params(d::FDist) = (d.ν1, d.ν2) #### Statistics -mean(d::FDist{T}) where {T<:Real} = (ν2 = d.ν2; -ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) +mean(d::FDist{T}) where {T<:Real} = (ν2 = d.ν2; ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) function mode(d::FDist{T}) where {T<:Real} (ν1, ν2) = params(d) @@ -104,5 +103,5 @@ end @_delegate_statsfuns FDist fdist ν1 ν2 -rand(rng::AbstractRNG, d::FDist) = ((ν1, ν2) = params(d); -(ν2 * rand(rng, Chisq(ν1))) / (ν1 * rand(rng, Chisq(ν2)))) +rand(rng::AbstractRNG, d::FDist) = + ((ν1, ν2) = params(d); (ν2 * rand(rng, Chisq(ν1))) / (ν1 * rand(rng, Chisq(ν2)))) diff --git a/src/univariate/continuous/frechet.jl b/src/univariate/continuous/frechet.jl index 0046d3afd6..d7fc5005d2 100644 --- a/src/univariate/continuous/frechet.jl +++ b/src/univariate/continuous/frechet.jl @@ -66,8 +66,7 @@ end median(d::Frechet) = d.θ * logtwo^(-1 / d.α) -mode(d::Frechet) = (iα = -1 / d.α; -d.θ * (1 - iα)^iα) +mode(d::Frechet) = (iα = -1 / d.α; d.θ * (1 - iα)^iα) function var(d::Frechet{T}) where {T<:Real} if d.α > 2 diff --git a/src/univariate/continuous/inversegamma.jl b/src/univariate/continuous/inversegamma.jl index de7f66e79f..a127c3a2ca 100644 --- a/src/univariate/continuous/inversegamma.jl +++ b/src/univariate/continuous/inversegamma.jl @@ -67,8 +67,7 @@ partype(::InverseGamma{T}) where {T} = T #### Parameters -mean(d::InverseGamma{T}) where {T} = ((α, θ) = params(d); -α > 1 ? θ / (α - 1) : T(Inf)) +mean(d::InverseGamma{T}) where {T} = ((α, θ) = params(d); α > 1 ? θ / (α - 1) : T(Inf)) mode(d::InverseGamma) = scale(d) / (shape(d) + 1) diff --git a/src/univariate/continuous/ksdist.jl b/src/univariate/continuous/ksdist.jl index 3ae970e82f..745e7a7442 100644 --- a/src/univariate/continuous/ksdist.jl +++ b/src/univariate/continuous/ksdist.jl @@ -93,7 +93,7 @@ function cdf_durbin(d::KSDist, x::Float64) end H[m, 1] += h <= 0.5 ? -h^m : -h^m + (h - ch) for i = 1:m, j = 1:m - for g = 1:max(i - j + 1, 0) + for g = 1:max(i-j+1, 0) H[i, j] /= g end # we can avoid keeping track of the exponent by dividing by e diff --git a/src/univariate/continuous/ksonesided.jl b/src/univariate/continuous/ksonesided.jl index 6454946104..a57d6a5865 100644 --- a/src/univariate/continuous/ksonesided.jl +++ b/src/univariate/continuous/ksonesided.jl @@ -25,7 +25,7 @@ function ccdf(d::KSOneSided, x::Float64) end n = d.n s = 0.0 - for j = 0:floor(Int, n - n * x) + for j = 0:floor(Int, n-n*x) p = x + j / n s += pdf(Binomial(n, p), j) / p end diff --git a/src/univariate/continuous/lognormal.jl b/src/univariate/continuous/lognormal.jl index ab92991937..dc9270b8bf 100644 --- a/src/univariate/continuous/lognormal.jl +++ b/src/univariate/continuous/lognormal.jl @@ -62,11 +62,9 @@ meanlogx(d::LogNormal) = d.μ varlogx(d::LogNormal) = abs2(d.σ) stdlogx(d::LogNormal) = d.σ -mean(d::LogNormal) = ((μ, σ) = params(d); -exp(μ + σ^2 / 2)) +mean(d::LogNormal) = ((μ, σ) = params(d); exp(μ + σ^2 / 2)) median(d::LogNormal) = exp(d.μ) -mode(d::LogNormal) = ((μ, σ) = params(d); -exp(μ - σ^2)) +mode(d::LogNormal) = ((μ, σ) = params(d); exp(μ - σ^2)) partype(::LogNormal{T}) where {T<:Real} = T function var(d::LogNormal) diff --git a/src/univariate/continuous/normalcanon.jl b/src/univariate/continuous/normalcanon.jl index 5d45f62170..e0fa393a3e 100644 --- a/src/univariate/continuous/normalcanon.jl +++ b/src/univariate/continuous/normalcanon.jl @@ -37,8 +37,7 @@ Base.convert(::Type{NormalCanon{T}}, d::NormalCanon{T}) where {T<:Real} = d ## conversion between Normal and NormalCanon convert(::Type{Normal}, d::NormalCanon) = Normal(d.μ, 1 / sqrt(d.λ)) -convert(::Type{NormalCanon}, d::Normal) = (λ = 1 / d.σ^2; -NormalCanon(λ * d.μ, λ)) +convert(::Type{NormalCanon}, d::Normal) = (λ = 1 / d.σ^2; NormalCanon(λ * d.μ, λ)) meanform(d::NormalCanon) = convert(Normal, d) canonform(d::Normal) = convert(NormalCanon, d) diff --git a/src/univariate/continuous/pareto.jl b/src/univariate/continuous/pareto.jl index 1af251dd61..cd8f056dcd 100644 --- a/src/univariate/continuous/pareto.jl +++ b/src/univariate/continuous/pareto.jl @@ -61,8 +61,7 @@ function mean(d::Pareto{T}) where {T<:Real} (α, θ) = params(d) α > 1 ? α * θ / (α - 1) : T(Inf) end -median(d::Pareto) = ((α, θ) = params(d); -θ * 2^(1 / α)) +median(d::Pareto) = ((α, θ) = params(d); θ * 2^(1 / α)) mode(d::Pareto) = d.θ function var(d::Pareto{T}) where {T<:Real} @@ -80,8 +79,7 @@ function kurtosis(d::Pareto{T}) where {T<:Real} α > 4 ? (6(α^3 + α^2 - 6α - 2)) / (α * (α - 3) * (α - 4)) : T(NaN) end -entropy(d::Pareto) = ((α, θ) = params(d); -log(θ / α) + 1 / α + 1) +entropy(d::Pareto) = ((α, θ) = params(d); log(θ / α) + 1 / α + 1) #### Evaluation diff --git a/src/univariate/discrete/poissonbinomial.jl b/src/univariate/discrete/poissonbinomial.jl index 9f0972814b..259a46a57e 100644 --- a/src/univariate/discrete/poissonbinomial.jl +++ b/src/univariate/discrete/poissonbinomial.jl @@ -203,7 +203,7 @@ end function _dft(x::Vector{T}) where {T} n = length(x) y = zeros(complex(float(T)), n) - @inbounds for j = 0:n-1, k = 0:n-1 + @inbounds for j = 0:(n-1), k = 0:(n-1) y[k+1] += x[j+1] * cis(-π * float(T)(2 * mod(j * k, n)) / n) end return y diff --git a/src/univariates.jl b/src/univariates.jl index d43e304c90..a8212310e5 100644 --- a/src/univariates.jl +++ b/src/univariates.jl @@ -588,7 +588,7 @@ function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) c = sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) x < minimum_d ? zero(c) : c else - c = 1 - sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + c = 1 - sum(Base.Fix1(pdf, d), (min(x+1, maximum_d)):maximum_d) x >= maximum_d ? one(c) : c end @@ -605,7 +605,7 @@ function integerunitrange_ccdf(d::DiscreteUnivariateDistribution, x::Integer) c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) x < minimum_d ? one(c) : c else - c = sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + c = sum(Base.Fix1(pdf, d), (min(x+1, maximum_d)):maximum_d) x >= maximum_d ? zero(c) : c end @@ -622,7 +622,7 @@ function integerunitrange_logcdf(d::DiscreteUnivariateDistribution, x::Integer) c = logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d))) x < minimum_d ? oftype(c, -Inf) : c else - c = log1mexp(logsumexp(logpdf(d, y) for y = (min(x + 1, maximum_d)):maximum_d)) + c = log1mexp(logsumexp(logpdf(d, y) for y = (min(x+1, maximum_d)):maximum_d)) x >= maximum_d ? zero(c) : c end @@ -639,7 +639,7 @@ function integerunitrange_logccdf(d::DiscreteUnivariateDistribution, x::Integer) c = log1mexp(logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d)))) x < minimum_d ? zero(c) : c else - c = logsumexp(logpdf(d, y) for y = (min(x + 1, maximum_d)):maximum_d) + c = logsumexp(logpdf(d, y) for y = (min(x+1, maximum_d)):maximum_d) x >= maximum_d ? oftype(c, -Inf) : c end diff --git a/test/censored.jl b/test/censored.jl index ef958d72d2..8e21c40442 100644 --- a/test/censored.jl +++ b/test/censored.jl @@ -196,6 +196,7 @@ end lower, upper, ) in bounds + d = censored(d0, lower, upper) dmix = _as_mixture(d) l, u = extrema(d) @@ -257,6 +258,7 @@ end lower, upper, ) in bounds + d = censored(d0, lower, upper) dmix = _as_mixture(d) l, u = extrema(d) @@ -317,6 +319,7 @@ end lower, upper, ) in bounds + d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) @@ -360,6 +363,7 @@ end lower, upper, ) in bounds + d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) diff --git a/test/cholesky/lkjcholesky.jl b/test/cholesky/lkjcholesky.jl index 077b6d3821..723545e232 100644 --- a/test/cholesky/lkjcholesky.jl +++ b/test/cholesky/lkjcholesky.jl @@ -223,14 +223,14 @@ using FiniteDifferences test_draws(d, xs; nkstests = nkstests) F2 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs2 = [deepcopy(F2) for _ = 1:10^4] + xs2 = [deepcopy(F2) for _ = 1:(10^4)] xs2[1] = cholesky(exp(Symmetric(randn(rng, p + 1, p + 1)))) rand!(rng, d, xs2) test_draws(d, xs2; nkstests = nkstests) # non-allocating F3 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs3 = [deepcopy(F3) for _ = 1:10^4] + xs3 = [deepcopy(F3) for _ = 1:(10^4)] rand!(rng, d, xs3) test_draws(d, xs3; check_uplo = uplo == 'U', nkstests = nkstests) end diff --git a/test/fit.jl b/test/fit.jl index a866dcd90a..2a51e8ad09 100644 --- a/test/fit.jl +++ b/test/fit.jl @@ -45,8 +45,8 @@ end ss = @inferred suffstats(D, x, w) @test ss isa Distributions.BernoulliStats - @test ss.cnt0 ≈ sum(v[z.==0]) - @test ss.cnt1 ≈ sum(v[z.==1]) + @test ss.cnt0 ≈ sum(v[z .== 0]) + @test ss.cnt1 ≈ sum(v[z .== 1]) d = @inferred fit(D, x) @test d isa D @@ -54,7 +54,7 @@ end d = @inferred fit(D, x, w) @test d isa D - @test mean(d) ≈ sum(v[z.==1]) / sum(v) + @test mean(d) ≈ sum(v[z .== 1]) / sum(v) end z = rand(rng..., Bernoulli(0.7), N) @@ -142,7 +142,7 @@ end @test probs(d2) == probs(d) ss = suffstats(Categorical, (3, x), w) - h = Float64[sum(w[x.==i]) for i = 1:3] + h = Float64[sum(w[x .== i]) for i = 1:3] @test isa(ss, Distributions.CategoricalStats) @test ss.h ≈ h diff --git a/test/matrixreshaped.jl b/test/matrixreshaped.jl index 5d7165bf73..e058c76fbd 100644 --- a/test/matrixreshaped.jl +++ b/test/matrixreshaped.jl @@ -19,7 +19,7 @@ function test_matrixreshaped(rng, d1, sizes) @test_deprecated(@test_throws ArgumentError MatrixReshaped(d1, -length(d1), -1)) end @testset "MatrixReshaped size" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test size(d) == s end end @@ -34,35 +34,35 @@ function test_matrixreshaped(rng, d1, sizes) end end @testset "MatrixReshaped insupport" begin - for (i, d) in enumerate(d1s[1:end-1]) - for (j, s) in enumerate(sizes[1:end-1]) + for (i, d) in enumerate(d1s[1:(end-1)]) + for (j, s) in enumerate(sizes[1:(end-1)]) @test (i == j) ⊻ !insupport(d, reshape(x1, s)) end end end @testset "MatrixReshaped mean" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test mean(d) == reshape(mean(d1), s) end end @testset "MatrixReshaped mode" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test mode(d) == reshape(mode(d1), s) end end @testset "MatrixReshaped covariance" begin - for (d, (n, p)) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, (n, p)) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test cov(d) == cov(d1) @test cov(d, Val(false)) == reshape(cov(d1), n, p, n, p) end end @testset "MatrixReshaped variance" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test var(d) == reshape(var(d1), s) end end @testset "MatrixReshaped params" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) @test params(d) == (d1, s) end end @@ -77,7 +77,7 @@ function test_matrixreshaped(rng, d1, sizes) end end @testset "MatrixReshaped logpdf" begin - for (d, s) in zip(d1s[1:end-1], sizes[1:end-1]) + for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) x = reshape(x1, s) @test logpdf(d, x) == logpdf(d1, x1) end diff --git a/test/matrixvariates.jl b/test/matrixvariates.jl index aadde2fef5..8d6bd34614 100644 --- a/test/matrixvariates.jl +++ b/test/matrixvariates.jl @@ -474,7 +474,7 @@ import Distributions: _univariate, _multivariate, _rand_params mymats[:, :, m] = rand(G) end for i = 1:d - for j = 1:i-1 + for j = 1:(i-1) @test pvalue_kolmogorovsmirnoff(mymats[i, j, :], ρ) >= α / L end end diff --git a/test/multivariate/jointorderstatistics.jl b/test/multivariate/jointorderstatistics.jl index 9960192636..950bc4b878 100644 --- a/test/multivariate/jointorderstatistics.jl +++ b/test/multivariate/jointorderstatistics.jl @@ -37,7 +37,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test n in [16, 40], r in [ 1:n, - ([i, j] for j = 2:n for i = 1:min(10, j - 1))..., + ([i, j] for j = 2:n for i = 1:min(10, j-1))..., vcat(2:4, (n-10):(n-5)), (2, n ÷ 2, n - 5), ] diff --git a/test/testutils.jl b/test/testutils.jl index 400aad679b..e79df17430 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -486,16 +486,16 @@ function test_range_evaluation(d::DiscreteUnivariateDistribution) p0 = map(Base.Fix1(pdf, d), collect(rmin:rmax)) @test map(Base.Fix1(pdf, d), rmin:rmax) ≈ p0 if rmin + 2 <= rmax - @test map(Base.Fix1(pdf, d), rmin+1:rmax-1) ≈ p0[2:end-1] + @test map(Base.Fix1(pdf, d), (rmin+1):(rmax-1)) ≈ p0[2:(end-1)] end if isbounded(d) @test map(Base.Fix1(pdf, d), support(d)) ≈ p0 - @test map(Base.Fix1(pdf, d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0) - @test map(Base.Fix1(pdf, d), rmin:rmax+3) ≈ vcat(p0, 0.0, 0.0, 0.0) - @test map(Base.Fix1(pdf, d), rmin-2:rmax+3) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0) + @test map(Base.Fix1(pdf, d), (rmin-2):rmax) ≈ vcat(0.0, 0.0, p0) + @test map(Base.Fix1(pdf, d), rmin:(rmax+3)) ≈ vcat(p0, 0.0, 0.0, 0.0) + @test map(Base.Fix1(pdf, d), (rmin-2):(rmax+3)) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0) elseif islowerbounded(d) - @test map(Base.Fix1(pdf, d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0) + @test map(Base.Fix1(pdf, d), (rmin-2):rmax) ≈ vcat(0.0, 0.0, p0) end end @@ -752,7 +752,7 @@ end # Finite difference differentiation function fdm(f, at) map(1:length(at)) do i - FiniteDifferences.central_fdm(5, 1)(x -> f([at[1:i-1]; x; at[i+1:end]]), at[i]) + FiniteDifferences.central_fdm(5, 1)(x -> f([at[1:(i-1)]; x; at[(i+1):end]]), at[i]) end end diff --git a/test/univariate/continuous/kumaraswamy.jl b/test/univariate/continuous/kumaraswamy.jl index d616ca373d..3fc730c5eb 100644 --- a/test/univariate/continuous/kumaraswamy.jl +++ b/test/univariate/continuous/kumaraswamy.jl @@ -17,7 +17,7 @@ using Distributions: expectation @test typeof(@inferred rand(D)) === typeof(rand()) tol = sqrt(eps(float(T))) @testset "gradlogpdf" begin - for x = T(0):(T <: Integer ? one(T) : T(0.5)):T(20) + for x = T(0):(T<:Integer ? one(T) : T(0.5)):T(20) fd = ForwardDiff.derivative(Base.Fix1(logpdf, D), x) gl = @inferred gradlogpdf(D, x) @test fd ≈ gl atol = tol diff --git a/test/univariate/discrete/poissonbinomial.jl b/test/univariate/discrete/poissonbinomial.jl index c0f7487790..974aee5cf6 100644 --- a/test/univariate/discrete/poissonbinomial.jl +++ b/test/univariate/discrete/poissonbinomial.jl @@ -7,13 +7,13 @@ using Test function naive_esf(x::AbstractVector{T}) where {T<:Real} n = length(x) S = zeros(T, n + 1) - states = hcat(reverse.(digits.(0:2^n-1, base = 2, pad = n))...)' + states = hcat(reverse.(digits.(0:(2^n-1), base = 2, pad = n))...)' r_states = vec(mapslices(sum, states, dims = 2)) for r = 0:n idx = findall(r_states .== r) - S[r+1] = sum(mapslices(x -> prod(x[x.!=0]), states[idx, :] .* x', dims = 2)) + S[r+1] = sum(mapslices(x -> prod(x[x .!= 0]), states[idx, :] .* x', dims = 2)) end return S end @@ -98,8 +98,8 @@ using Test n = n₁ + n₂ + n₃ p = zeros(n) p[1:n₁] .= p₁ - p[n₁+1:n₁+n₂] .= p₂ - p[n₁+n₂+1:end] .= p₃ + p[(n₁+1):(n₁+n₂)] .= p₂ + p[(n₁+n₂+1):end] .= p₃ d = PoissonBinomial(p) println(" testing PoissonBinomial [$(n₁) × $(p₁), $(n₂) × $(p₂), $(n₃) × $(p₃)]") b1 = Binomial(n₁, p₁) @@ -121,7 +121,7 @@ using Test m = 0.0 for i = 0:min(n₁, k) mc = 0.0 - for j = i:min(i + n₂, k) + for j = i:min(i+n₂, k) mc += (k - j <= n₃) && pmf2[j-i+1] * pmf3[k-j+1] end m += pmf1[i+1] * mc From af6c728a43ef45de657fb3becd343fa11b963e90 Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 17:02:18 +0200 Subject: [PATCH 3/8] add CI --- .formatting/format_check.jl | 18 ++++++++++++++++++ .github/workflows/CI.yml | 23 +++++++++++++++++++---- README.md | 6 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) create mode 100644 .formatting/format_check.jl diff --git a/.formatting/format_check.jl b/.formatting/format_check.jl new file mode 100644 index 0000000000..303a25d710 --- /dev/null +++ b/.formatting/format_check.jl @@ -0,0 +1,18 @@ +using JuliaFormatter + +project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..") + +println("Checking code formatting...") + +# Check if files are properly formatted +not_formatted = format(project_path; verbose = true, overwrite = false) + +if not_formatted + println("❌ Formatting check failed!") + println("Some files are not properly formatted.") + println("To fix formatting, run: julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(\".formatting/format_all.jl\")'") + exit(1) +else + println("✅ All files are properly formatted!") + exit(0) +end diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 34df70d08b..f4e3480442 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -5,7 +5,7 @@ on: push: branches: - master - tags: '*' + tags: "*" workflow_dispatch: merge_group: @@ -16,6 +16,21 @@ concurrency: cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} jobs: + format: + name: Format Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: "1" + show-versioninfo: true + - uses: julia-actions/cache@v2 + - run: | + julia --project=.formatting -e ' + using Pkg + Pkg.instantiate() + include(".formatting/format_all.jl")' test: name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} @@ -23,8 +38,8 @@ jobs: fail-fast: false matrix: version: - - 'min' - - '1' + - "min" + - "1" - pre os: - ubuntu-latest @@ -57,7 +72,7 @@ jobs: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v2 with: - version: '1' + version: "1" show-versioninfo: true - run: | julia --project=docs -e ' diff --git a/README.md b/README.md index c4290bd516..9971dd5a91 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,12 @@ To format the code, run the following command: julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_all.jl")' ``` +**Note:** Code formatting is automatically checked in CI. +The formatting command can be run locally with +```julia +julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_check.jl")' +``` + ### Requirements Distributions is a central package which many rely on, From 67ed56078893dc58d6a1e5578ba201fe134be41f Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 18:18:36 +0200 Subject: [PATCH 4/8] switched to Runic --- .formatting/Project.toml | 2 +- .formatting/format_all.jl | 15 +++++++++------ .formatting/format_check.jl | 14 +++++++------- .github/workflows/CI.yml | 2 +- README.md | 3 ++- 5 files changed, 20 insertions(+), 16 deletions(-) diff --git a/.formatting/Project.toml b/.formatting/Project.toml index f3aab8b8bf..f4ad875906 100644 --- a/.formatting/Project.toml +++ b/.formatting/Project.toml @@ -1,2 +1,2 @@ [deps] -JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" +Runic = "62bfec6d-59d7-401d-8490-b29ee721c001" diff --git a/.formatting/format_all.jl b/.formatting/format_all.jl index c4001aaba9..74a2e04bf7 100644 --- a/.formatting/format_all.jl +++ b/.formatting/format_all.jl @@ -1,11 +1,14 @@ -using JuliaFormatter +using Runic project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..") -not_formatted = format(project_path; verbose = true) -if not_formatted - @info "Formatting verified." +println("Formatting code with Runic...") + +# Format all files in the project +not_formatted = Runic.main(["--inplace", project_path]) +if not_formatted == 0 + @info "Formatting completed successfully." else - @warn "Formatting verification failed: Some files are not properly formatted!" + @warn "Formatting failed!" end -exit(not_formatted ? 0 : 1) +exit(not_formatted) diff --git a/.formatting/format_check.jl b/.formatting/format_check.jl index 303a25d710..3ec949e33f 100644 --- a/.formatting/format_check.jl +++ b/.formatting/format_check.jl @@ -1,18 +1,18 @@ -using JuliaFormatter +using Runic project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..") -println("Checking code formatting...") +println("Checking code formatting with Runic...") # Check if files are properly formatted -not_formatted = format(project_path; verbose = true, overwrite = false) +not_formatted = Runic.main(["--check", "--diff", project_path]) -if not_formatted +if not_formatted == 0 + println("✅ All files are properly formatted!") + exit(0) +else println("❌ Formatting check failed!") println("Some files are not properly formatted.") println("To fix formatting, run: julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(\".formatting/format_all.jl\")'") exit(1) -else - println("✅ All files are properly formatted!") - exit(0) end diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f4e3480442..22ed711720 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -30,7 +30,7 @@ jobs: julia --project=.formatting -e ' using Pkg Pkg.instantiate() - include(".formatting/format_all.jl")' + include(".formatting/format_check.jl")' test: name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} diff --git a/README.md b/README.md index 9971dd5a91..405c27f81a 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ Distributions.jl [![](https://zenodo.org/badge/DOI/10.5281/zenodo.2647458.svg)](https://zenodo.org/record/2647458) [![Coverage Status](https://coveralls.io/repos/JuliaStats/Distributions.jl/badge.svg?branch=master)](https://coveralls.io/r/JuliaStats/Distributions.jl?branch=master) [![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) +[![code style: runic](https://img.shields.io/badge/code_style-%E1%9A%B1%E1%9A%A2%E1%9A%BE%E1%9B%81%E1%9A%B2-black)](https://github.com/fredrikekre/Runic.jl) [![](https://img.shields.io/badge/docs-latest-blue.svg)](https://JuliaStats.github.io/Distributions.jl/latest/) [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaStats.github.io/Distributions.jl/stable/) @@ -60,7 +61,7 @@ To format the code, run the following command: julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_all.jl")' ``` -**Note:** Code formatting is automatically checked in CI. +**Note:** Code formatting is automatically checked in CI using Runic. The formatting command can be run locally with ```julia julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_check.jl")' From e0864bbfea8e3a766f4d49ec956dc8ea52f19e9e Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 18:18:51 +0200 Subject: [PATCH 5/8] applied formatting --- .../eachvariate.jl | 6 +- .../multivariate/dirichlet.jl | 36 +- .../univariate/discrete/negativebinomial.jl | 6 +- .../univariate/discrete/poissonbinomial.jl | 8 +- ext/DistributionsDensityInterfaceExt.jl | 6 +- ext/DistributionsTestExt.jl | 22 +- perf/mixtures.jl | 16 +- perf/samplers.jl | 2 +- src/Distributions.jl | 160 ++++---- src/censored.jl | 94 ++--- src/cholesky/lkjcholesky.jl | 78 ++-- src/common.jl | 164 ++++---- src/conversion.jl | 1 - src/convolution.jl | 6 +- src/deprecates.jl | 38 +- src/eachvariate.jl | 14 +- src/edgeworth.jl | 67 ++- src/estimators.jl | 14 +- src/functionals.jl | 22 +- src/genericfit.jl | 14 +- src/genericrand.jl | 66 +-- src/matrix/inversewishart.jl | 32 +- src/matrix/lkj.jl | 46 +-- src/matrix/matrixbeta.jl | 30 +- src/matrix/matrixfdist.jl | 38 +- src/matrix/matrixnormal.jl | 52 +-- src/matrix/matrixtdist.jl | 58 +-- src/matrix/wishart.jl | 56 +-- src/matrixvariates.jl | 25 +- src/mixtures/mixturemodel.jl | 106 +++-- src/mixtures/unigmm.jl | 22 +- src/multivariate/dirichlet.jl | 136 +++---- src/multivariate/dirichletmultinomial.jl | 54 +-- src/multivariate/jointorderstatistics.jl | 22 +- src/multivariate/multinomial.jl | 92 ++--- src/multivariate/mvlogitnormal.jl | 10 +- src/multivariate/mvlognormal.jl | 168 ++++---- src/multivariate/mvnormal.jl | 112 ++--- src/multivariate/mvnormalcanon.jl | 80 ++-- src/multivariate/mvtdist.jl | 82 ++-- src/multivariate/product.jl | 28 +- src/multivariate/vonmisesfisher.jl | 27 +- src/multivariates.jl | 46 +-- src/namedtuple/productnamedtuple.jl | 44 +- src/product.jl | 138 +++---- src/qq.jl | 6 +- src/quantilealgs.jl | 64 +-- src/reshaped.jl | 48 +-- src/samplers.jl | 26 +- src/samplers/aliastable.jl | 4 +- src/samplers/binomial.jl | 45 +-- src/samplers/categorical.jl | 14 +- src/samplers/discretenonparametric.jl | 16 +- src/samplers/exponential.jl | 4 +- src/samplers/gamma.jl | 79 ++-- src/samplers/multinomial.jl | 16 +- src/samplers/obsoleted.jl | 32 +- src/samplers/poisson.jl | 12 +- src/samplers/poissonbinomial.jl | 2 +- src/samplers/productnamedtuple.jl | 14 +- src/samplers/vonmises.jl | 5 +- src/samplers/vonmisesfisher.jl | 10 +- src/show.jl | 12 +- src/statsapi.jl | 22 +- src/test_utils.jl | 4 +- src/truncate.jl | 72 ++-- src/truncated/discrete_uniform.jl | 2 +- src/truncated/exponential.jl | 4 +- src/truncated/loguniform.jl | 2 +- src/truncated/normal.jl | 20 +- src/truncated/uniform.jl | 2 +- src/univariate/continuous/arcsine.jl | 24 +- src/univariate/continuous/beta.jl | 54 ++- src/univariate/continuous/betaprime.jl | 28 +- src/univariate/continuous/biweight.jl | 34 +- src/univariate/continuous/cauchy.jl | 36 +- src/univariate/continuous/chernoff.jl | 242 +++++------ src/univariate/continuous/chi.jl | 24 +- src/univariate/continuous/chisq.jl | 17 +- src/univariate/continuous/cosine.jl | 32 +- src/univariate/continuous/epanechnikov.jl | 40 +- src/univariate/continuous/erlang.jl | 22 +- src/univariate/continuous/exponential.jl | 18 +- src/univariate/continuous/fdist.jl | 36 +- src/univariate/continuous/frechet.jl | 28 +- src/univariate/continuous/gamma.jl | 48 +-- .../continuous/generalizedextremevalue.jl | 46 +-- .../continuous/generalizedpareto.jl | 34 +- src/univariate/continuous/gumbel.jl | 22 +- src/univariate/continuous/inversegamma.jl | 38 +- src/univariate/continuous/inversegaussian.jl | 38 +- src/univariate/continuous/johnsonsu.jl | 18 +- src/univariate/continuous/kolmogorov.jl | 15 +- src/univariate/continuous/ksdist.jl | 16 +- src/univariate/continuous/ksonesided.jl | 4 +- src/univariate/continuous/kumaraswamy.jl | 2 +- src/univariate/continuous/laplace.jl | 28 +- src/univariate/continuous/levy.jl | 38 +- src/univariate/continuous/lindley.jl | 4 +- src/univariate/continuous/logistic.jl | 26 +- src/univariate/continuous/logitnormal.jl | 30 +- src/univariate/continuous/lognormal.jl | 24 +- src/univariate/continuous/loguniform.jl | 22 +- src/univariate/continuous/noncentralbeta.jl | 14 +- src/univariate/continuous/noncentralchisq.jl | 22 +- src/univariate/continuous/noncentralf.jl | 24 +- src/univariate/continuous/noncentralt.jl | 20 +- src/univariate/continuous/normal.jl | 90 ++--- src/univariate/continuous/normalcanon.jl | 18 +- .../continuous/normalinversegaussian.jl | 44 +- src/univariate/continuous/pareto.jl | 40 +- .../continuous/pgeneralizedgaussian.jl | 16 +- src/univariate/continuous/rayleigh.jl | 34 +- src/univariate/continuous/rician.jl | 28 +- src/univariate/continuous/semicircle.jl | 6 +- .../continuous/skewedexponentialpower.jl | 52 +-- src/univariate/continuous/skewnormal.jl | 150 ++++--- src/univariate/continuous/studentizedrange.jl | 18 +- src/univariate/continuous/symtriangular.jl | 28 +- src/univariate/continuous/tdist.jl | 38 +- src/univariate/continuous/triangular.jl | 40 +- src/univariate/continuous/triweight.jl | 40 +- src/univariate/continuous/uniform.jl | 32 +- src/univariate/continuous/vonmises.jl | 18 +- src/univariate/continuous/weibull.jl | 44 +- src/univariate/discrete/bernoulli.jl | 40 +- src/univariate/discrete/bernoullilogit.jl | 24 +- src/univariate/discrete/betabinomial.jl | 16 +- src/univariate/discrete/binomial.jl | 70 ++-- src/univariate/discrete/categorical.jl | 104 ++--- .../discrete/discretenonparametric.jl | 90 ++--- src/univariate/discrete/discreteuniform.jl | 4 +- src/univariate/discrete/geometric.jl | 46 +-- src/univariate/discrete/hypergeometric.jl | 8 +- src/univariate/discrete/negativebinomial.jl | 16 +- .../discrete/noncentralhypergeometric.jl | 78 ++-- src/univariate/discrete/poisson.jl | 30 +- src/univariate/discrete/poissonbinomial.jl | 62 +-- src/univariate/discrete/skellam.jl | 22 +- src/univariate/discrete/soliton.jl | 10 +- src/univariate/locationscale.jl | 62 +-- src/univariate/orderstatistic.jl | 16 +- src/univariates.jl | 150 +++---- src/utils.jl | 40 +- test/censored.jl | 108 ++--- test/cholesky/lkjcholesky.jl | 22 +- test/convolution.jl | 6 +- test/density_interface.jl | 6 +- test/eachvariate.jl | 2 +- test/edgecases.jl | 2 +- test/edgeworth.jl | 2 +- test/fit.jl | 4 +- test/functionals.jl | 32 +- test/matrixreshaped.jl | 20 +- test/matrixvariates.jl | 138 +++---- test/mixture.jl | 82 ++-- test/multivariate/dirichlet.jl | 16 +- test/multivariate/dirichletmultinomial.jl | 14 +- test/multivariate/jointorderstatistics.jl | 38 +- test/multivariate/multinomial.jl | 50 +-- test/multivariate/mvlogitnormal.jl | 36 +- test/multivariate/mvlognormal.jl | 95 +++-- test/multivariate/mvnormal.jl | 98 ++--- test/multivariate/mvtdist.jl | 12 +- test/multivariate/product.jl | 2 +- test/multivariate/vonmisesfisher.jl | 63 ++- test/multivariate_stats.jl | 18 +- test/namedtuple/productnamedtuple.jl | 70 ++-- test/pdfnorm.jl | 28 +- test/product.jl | 30 +- test/qq.jl | 2 +- test/reshaped.jl | 2 +- test/samplers.jl | 80 ++-- test/statsapi.jl | 6 +- test/testutils.jl | 160 ++++---- test/truncate.jl | 381 +++++++++--------- test/truncated/exponential.jl | 2 +- test/truncated/normal.jl | 20 +- test/types.jl | 2 +- test/univariate/continuous.jl | 26 +- test/univariate/continuous/arcsine.jl | 2 +- test/univariate/continuous/chernoff.jl | 88 ++-- test/univariate/continuous/chisq.jl | 1 - test/univariate/continuous/erlang.jl | 4 +- test/univariate/continuous/exponential.jl | 12 +- test/univariate/continuous/gamma.jl | 6 +- test/univariate/continuous/gumbel.jl | 10 +- test/univariate/continuous/johnsonsu.jl | 8 +- test/univariate/continuous/kolmogorov.jl | 12 +- test/univariate/continuous/kumaraswamy.jl | 2 +- test/univariate/continuous/lindley.jl | 6 +- test/univariate/continuous/logitnormal.jl | 24 +- test/univariate/continuous/lognormal.jl | 28 +- test/univariate/continuous/loguniform.jl | 12 +- test/univariate/continuous/noncentralt.jl | 4 +- test/univariate/continuous/normal.jl | 14 +- test/univariate/continuous/normalcanon.jl | 8 +- test/univariate/continuous/pareto.jl | 10 +- .../continuous/pgeneralizedgaussian.jl | 14 +- test/univariate/continuous/rician.jl | 8 +- test/univariate/continuous/semicircle.jl | 2 +- .../continuous/skewedexponentialpower.jl | 20 +- test/univariate/continuous/triangular.jl | 6 +- test/univariate/continuous/uniform.jl | 32 +- test/univariate/continuous/weibull.jl | 2 +- test/univariate/discrete/bernoulli.jl | 4 +- test/univariate/discrete/bernoullilogit.jl | 10 +- test/univariate/discrete/betabinomial.jl | 8 +- test/univariate/discrete/binomial.jl | 26 +- test/univariate/discrete/categorical.jl | 24 +- test/univariate/discrete/dirac.jl | 4 +- .../discrete/discretenonparametric.jl | 36 +- test/univariate/discrete/discreteuniform.jl | 2 +- test/univariate/discrete/geometric.jl | 4 +- test/univariate/discrete/negativebinomial.jl | 12 +- test/univariate/discrete/poisson.jl | 2 +- test/univariate/discrete/poissonbinomial.jl | 60 +-- test/univariate/discrete/soliton.jl | 4 +- test/univariate/locationscale.jl | 52 +-- test/univariate/orderstatistic.jl | 46 +-- test/univariate_bounds.jl | 10 +- test/univariates.jl | 31 +- test/utils.jl | 6 +- test/vonmises.jl | 2 +- 224 files changed, 4012 insertions(+), 4014 deletions(-) diff --git a/ext/DistributionsChainRulesCoreExt/eachvariate.jl b/ext/DistributionsChainRulesCoreExt/eachvariate.jl index 4ac0b55956..fc539c938a 100644 --- a/ext/DistributionsChainRulesCoreExt/eachvariate.jl +++ b/ext/DistributionsChainRulesCoreExt/eachvariate.jl @@ -1,7 +1,7 @@ function ChainRulesCore.rrule( - ::Type{Distributions.EachVariate{V}}, - x::AbstractArray{<:Real}, -) where {V} + ::Type{Distributions.EachVariate{V}}, + x::AbstractArray{<:Real}, + ) where {V} y = Distributions.EachVariate{V}(x) size_x = size(x) function EachVariate_pullback(Δ) diff --git a/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl b/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl index 09ae1a5477..c5b04c2249 100644 --- a/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl +++ b/ext/DistributionsChainRulesCoreExt/multivariate/dirichlet.jl @@ -1,9 +1,9 @@ function ChainRulesCore.frule( - (_, Δalpha)::Tuple{Any,Any}, - ::Type{DT}, - alpha::AbstractVector{T}; - check_args::Bool = true, -) where {T<:Real,DT<:Union{Dirichlet{T},Dirichlet}} + (_, Δalpha)::Tuple{Any, Any}, + ::Type{DT}, + alpha::AbstractVector{T}; + check_args::Bool = true, + ) where {T <: Real, DT <: Union{Dirichlet{T}, Dirichlet}} d = DT(alpha; check_args = check_args) ∂alpha0 = sum(Δalpha) digamma_alpha0 = SpecialFunctions.digamma(d.alpha0) @@ -19,10 +19,10 @@ function ChainRulesCore.frule( end function ChainRulesCore.rrule( - ::Type{DT}, - alpha::AbstractVector{T}; - check_args::Bool = true, -) where {T<:Real,DT<:Union{Dirichlet{T},Dirichlet}} + ::Type{DT}, + alpha::AbstractVector{T}; + check_args::Bool = true, + ) where {T <: Real, DT <: Union{Dirichlet{T}, Dirichlet}} d = DT(alpha; check_args = check_args) digamma_alpha0 = SpecialFunctions.digamma(d.alpha0) function Dirichlet_pullback(_Δd) @@ -36,11 +36,11 @@ function ChainRulesCore.rrule( end function ChainRulesCore.frule( - (_, Δd, Δx)::Tuple{Any,Any,Any}, - ::typeof(Distributions._logpdf), - d::Dirichlet, - x::AbstractVector{<:Real}, -) + (_, Δd, Δx)::Tuple{Any, Any, Any}, + ::typeof(Distributions._logpdf), + d::Dirichlet, + x::AbstractVector{<:Real}, + ) Ω = Distributions._logpdf(d, x) ∂alpha = sum( Broadcast.instantiate( @@ -58,10 +58,10 @@ function ChainRulesCore.frule( end function ChainRulesCore.rrule( - ::typeof(Distributions._logpdf), - d::T, - x::AbstractVector{<:Real}, -) where {T<:Dirichlet} + ::typeof(Distributions._logpdf), + d::T, + x::AbstractVector{<:Real}, + ) where {T <: Dirichlet} Ω = Distributions._logpdf(d, x) isfinite_Ω = isfinite(Ω) alpha = d.alpha diff --git a/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl b/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl index 80949f6e15..232441cea8 100644 --- a/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl +++ b/ext/DistributionsChainRulesCoreExt/univariate/discrete/negativebinomial.jl @@ -1,5 +1,5 @@ ## Callable struct to fix type inference issues caused by captured values -struct LogPDFNegativeBinomialPullback{D,T<:Real} +struct LogPDFNegativeBinomialPullback{D, T <: Real} ∂r::T ∂p::T end @@ -24,7 +24,7 @@ function ChainRulesCore.rrule(::typeof(logpdf), d::NegativeBinomial, k::Real) ∂r = oftype( z, log(p) - inv(k + r) - SpecialFunctions.digamma(r) + - SpecialFunctions.digamma(r + k + 1), + SpecialFunctions.digamma(r + k + 1), ) ∂p = oftype(z, r / p - k / (1 - p)) else @@ -35,7 +35,7 @@ function ChainRulesCore.rrule(::typeof(logpdf), d::NegativeBinomial, k::Real) # Define pullback logpdf_NegativeBinomial_pullback = - LogPDFNegativeBinomialPullback{typeof(d),typeof(z)}(∂r, ∂p) + LogPDFNegativeBinomialPullback{typeof(d), typeof(z)}(∂r, ∂p) return Ω, logpdf_NegativeBinomial_pullback end diff --git a/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl b/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl index 1d8449a03e..55c962ace3 100644 --- a/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl +++ b/ext/DistributionsChainRulesCoreExt/univariate/discrete/poissonbinomial.jl @@ -2,10 +2,10 @@ for f in (:poissonbinomial_pdf, :poissonbinomial_pdf_fft) pullback = Symbol(f, :_pullback) @eval begin function ChainRulesCore.frule( - (_, Δp)::Tuple{<:Any,<:AbstractVector{<:Real}}, - ::typeof(Distributions.$f), - p::AbstractVector{<:Real}, - ) + (_, Δp)::Tuple{<:Any, <:AbstractVector{<:Real}}, + ::typeof(Distributions.$f), + p::AbstractVector{<:Real}, + ) y = Distributions.$f(p) A = Distributions.poissonbinomial_pdf_partialderivatives(p) return y, A' * Δp diff --git a/ext/DistributionsDensityInterfaceExt.jl b/ext/DistributionsDensityInterfaceExt.jl index ce8d143b47..ba72a1f6c4 100644 --- a/ext/DistributionsDensityInterfaceExt.jl +++ b/ext/DistributionsDensityInterfaceExt.jl @@ -26,9 +26,9 @@ for (di_func, d_func) in ((:logdensityof, :logpdf), (:densityof, :pdf)) end function DensityInterface.$di_func( - d::MatrixDistribution, - x::AbstractArray{<:AbstractMatrix{<:Real}}, - ) + d::MatrixDistribution, + x::AbstractArray{<:AbstractMatrix{<:Real}}, + ) throw( ArgumentError( "$(DensityInterface.$di_func) doesn't support multiple samples as an argument", diff --git a/ext/DistributionsTestExt.jl b/ext/DistributionsTestExt.jl index ed684be633..22711709dc 100644 --- a/ext/DistributionsTestExt.jl +++ b/ext/DistributionsTestExt.jl @@ -25,10 +25,10 @@ Test that `AbstractMvNormal` implements the expected API. this function. """ function Distributions.TestUtils.test_mvnormal( - g::AbstractMvNormal, - n_tsamples::Int = 10^6, - rng::Union{AbstractRNG,Nothing} = nothing, -) + g::AbstractMvNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG, Nothing} = nothing, + ) d = length(g) μ = mean(g) Σ = cov(g) @@ -60,10 +60,10 @@ function Distributions.TestUtils.test_mvnormal( mean_atols = 8 .* sqrt.(vs ./ n_tsamples) cov_atols = 10 .* sqrt.(vs .* vs') ./ sqrt.(n_tsamples) - for i = 1:d + for i in 1:d @test isapprox(emp_mu[i], μ[i], atol = mean_atols[i]) end - for i = 1:d, j = 1:d + for i in 1:d, j in 1:d @test isapprox(emp_cov[i, j], Σ[i, j], atol = cov_atols[i, j]) end @@ -73,10 +73,10 @@ function Distributions.TestUtils.test_mvnormal( emp_mu = vec(mean(X, dims = 2)) Z = X .- emp_mu emp_cov = (Z * Z') * inv(n_tsamples) - for i = 1:d + for i in 1:d @test isapprox(emp_mu[i], μ[i], atol = mean_atols[i]) end - for i = 1:d, j = 1:d + for i in 1:d, j in 1:d @test isapprox(emp_cov[i, j], Σ[i, j], atol = cov_atols[i, j]) end @@ -84,13 +84,13 @@ function Distributions.TestUtils.test_mvnormal( # evaluation of sqmahal & logpdf U = X .- μ sqm = vec(sum(U .* (Σ \ U), dims = 1)) - for i = 1:min(100, n_tsamples) + for i in 1:min(100, n_tsamples) @test sqmahal(g, X[:, i]) ≈ sqm[i] end @test sqmahal(g, X) ≈ sqm lp = -0.5 .* sqm .- 0.5 * (d * log(2.0 * pi) + ldcov) - for i = 1:min(100, n_tsamples) + for i in 1:min(100, n_tsamples) @test logpdf(g, X[:, i]) ≈ lp[i] end @test logpdf(g, X) ≈ lp @@ -98,7 +98,7 @@ function Distributions.TestUtils.test_mvnormal( # log likelihood @test loglikelihood(g, X) ≈ sum(i -> Distributions._logpdf(g, X[:, i]), 1:n_tsamples) @test loglikelihood(g, X[:, 1]) ≈ logpdf(g, X[:, 1]) - @test loglikelihood(g, [X[:, i] for i in axes(X, 2)]) ≈ loglikelihood(g, X) + return @test loglikelihood(g, [X[:, i] for i in axes(X, 2)]) ≈ loglikelihood(g, X) end end # module diff --git a/perf/mixtures.jl b/perf/mixtures.jl index 5bc1d03b41..3d979f3195 100644 --- a/perf/mixtures.jl +++ b/perf/mixtures.jl @@ -80,13 +80,13 @@ end function indexed_sum_comp(d, x) ps = probs(d) cs = components(d) - @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) + return @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) end function indexed_boolprod(d, x) ps = probs(d) cs = components(d) - @inbounds sum((ps[i] > 0) * (ps[i] * pdf(cs[i], x)) for i in eachindex(ps)) + return @inbounds sum((ps[i] > 0) * (ps[i] * pdf(cs[i], x)) for i in eachindex(ps)) end function indexed_boolprod_noinbound(d, x) @@ -99,7 +99,7 @@ function sumcomp_cond(d, x) ps = probs(d) cs = components(d) s = zero(eltype(ps)) - @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) + return @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) end distributions = [Normal(-1.0, 0.3), Normal(0.0, 0.5), Normal(3.0, 1.0)] @@ -137,8 +137,8 @@ for x in rand(5) @info "===================" end -large_normals = [Normal(rand(), rand()) for _ = 1:1000] -large_probs = [rand() for _ = 1:1000] +large_normals = [Normal(rand(), rand()) for _ in 1:1000] +large_probs = [rand() for _ in 1:1000] large_probs .= large_probs ./ sum(large_probs) gmm_large = MixtureModel(large_normals, large_probs) @@ -173,11 +173,11 @@ for x in rand(5) end large_het = append!( - ContinuousUnivariateDistribution[Normal(rand(), rand()) for _ = 1:1000], - ContinuousUnivariateDistribution[LogNormal(rand(), rand()) for _ = 1:1000], + ContinuousUnivariateDistribution[Normal(rand(), rand()) for _ in 1:1000], + ContinuousUnivariateDistribution[LogNormal(rand(), rand()) for _ in 1:1000], ) -large_het_probs = [rand() for _ = 1:2000] +large_het_probs = [rand() for _ in 1:2000] large_het_probs .= large_het_probs ./ sum(large_het_probs) gmm_het = MixtureModel(large_het, large_het_probs) diff --git a/perf/samplers.jl b/perf/samplers.jl index d390e55067..7d71396752 100644 --- a/perf/samplers.jl +++ b/perf/samplers.jl @@ -48,7 +48,7 @@ if haskey(ENV, "binomial") && ENV["binomial"] != "skip" for n in nvals, p in pvals s = ST(n, p) b = @benchmark rand($mt, $s) - @info "(n,p): $((n,p)), result: $b" + @info "(n,p): $((n, p)), result: $b" end end end diff --git a/src/Distributions.jl b/src/Distributions.jl index 250a039c4a..e60926f063 100644 --- a/src/Distributions.jl +++ b/src/Distributions.jl @@ -202,94 +202,94 @@ export RealInterval, # methods - canonform, # get canonical form of a distribution - ccdf, # complementary cdf, i.e. 1 - cdf - cdf, # cumulative distribution function - censored, # censor a distribution with a lower and upper bound - cf, # characteristic function - cquantile, # complementary quantile (i.e. using prob in right hand tail) - component, # get the k-th component of a mixture model - components, # get components from a mixture model - componentwise_pdf, # component-wise pdf for mixture models - componentwise_logpdf, # component-wise logpdf for mixture models - concentration, # the concentration parameter - convolve, # convolve distributions of the same type - dof, # get the degree of freedom - entropy, # entropy of distribution in nats - failprob, # failing probability - fit, # fit a distribution to data (using default method) - fit_mle, # fit a distribution to data using MLE - insupport, # predicate, is x in the support of the distribution? - invcov, # get the inversed covariance - invlogccdf, # complementary quantile based on log probability - invlogcdf, # quantile based on log probability - isplatykurtic, # Is excess kurtosis > 0.0? - isleptokurtic, # Is excess kurtosis < 0.0? - ismesokurtic, # Is excess kurtosis = 0.0? - isprobvec, # Is a probability vector? + canonform, # get canonical form of a distribution + ccdf, # complementary cdf, i.e. 1 - cdf + cdf, # cumulative distribution function + censored, # censor a distribution with a lower and upper bound + cf, # characteristic function + cquantile, # complementary quantile (i.e. using prob in right hand tail) + component, # get the k-th component of a mixture model + components, # get components from a mixture model + componentwise_pdf, # component-wise pdf for mixture models + componentwise_logpdf, # component-wise logpdf for mixture models + concentration, # the concentration parameter + convolve, # convolve distributions of the same type + dof, # get the degree of freedom + entropy, # entropy of distribution in nats + failprob, # failing probability + fit, # fit a distribution to data (using default method) + fit_mle, # fit a distribution to data using MLE + insupport, # predicate, is x in the support of the distribution? + invcov, # get the inversed covariance + invlogccdf, # complementary quantile based on log probability + invlogcdf, # quantile based on log probability + isplatykurtic, # Is excess kurtosis > 0.0? + isleptokurtic, # Is excess kurtosis < 0.0? + ismesokurtic, # Is excess kurtosis = 0.0? + isprobvec, # Is a probability vector? isupperbounded, islowerbounded, isbounded, hasfinitesupport, - kldivergence, # kl divergence between distributions - kurtosis, # kurtosis of the distribution - logccdf, # ccdf returning log-probability - logcdf, # cdf returning log-probability - logdiffcdf, # log of difference between cdf at two values - logdetcov, # log-determinant of covariance - loglikelihood, # log probability of array of IID draws - logpdf, # log probability density - logpdf!, # evaluate log pdf to provided storage - invscale, # Inverse scale parameter - sqmahal, # squared Mahalanobis distance to Gaussian center - sqmahal!, # in-place evaluation of sqmahal - location, # get the location parameter - location!, # provide storage for the location parameter (used in multivariate distribution mvlognormal) - mean, # mean of distribution - meandir, # mean direction (of a spherical distribution) - meanform, # convert a normal distribution from canonical form to mean form - meanlogx, # the mean of log(x) - median, # median of distribution - mgf, # moment generating function - cgf, # cumulant generating function - mode, # the mode of a unimodal distribution - modes, # mode(s) of distribution as vector - moment, # moments of distribution - nsamples, # get the number of samples contained in an array - ncategories, # the number of categories in a Categorical distribution - ncomponents, # the number of components in a mixture model - ntrials, # the number of trials being performed in the experiment - params, # get the tuple of parameters - params!, # provide storage space to calculate the tuple of parameters for a multivariate distribution like mvlognormal - partype, # returns a type large enough to hold all of a distribution's parameters' element types - pdf, # probability density function (ContinuousDistribution) - pdfsquaredL2norm, # squared L2 norm of the probability density function - probs, # Get the vector of probabilities - probval, # The pdf/pmf value for a uniform distribution + kldivergence, # kl divergence between distributions + kurtosis, # kurtosis of the distribution + logccdf, # ccdf returning log-probability + logcdf, # cdf returning log-probability + logdiffcdf, # log of difference between cdf at two values + logdetcov, # log-determinant of covariance + loglikelihood, # log probability of array of IID draws + logpdf, # log probability density + logpdf!, # evaluate log pdf to provided storage + invscale, # Inverse scale parameter + sqmahal, # squared Mahalanobis distance to Gaussian center + sqmahal!, # in-place evaluation of sqmahal + location, # get the location parameter + location!, # provide storage for the location parameter (used in multivariate distribution mvlognormal) + mean, # mean of distribution + meandir, # mean direction (of a spherical distribution) + meanform, # convert a normal distribution from canonical form to mean form + meanlogx, # the mean of log(x) + median, # median of distribution + mgf, # moment generating function + cgf, # cumulant generating function + mode, # the mode of a unimodal distribution + modes, # mode(s) of distribution as vector + moment, # moments of distribution + nsamples, # get the number of samples contained in an array + ncategories, # the number of categories in a Categorical distribution + ncomponents, # the number of components in a mixture model + ntrials, # the number of trials being performed in the experiment + params, # get the tuple of parameters + params!, # provide storage space to calculate the tuple of parameters for a multivariate distribution like mvlognormal + partype, # returns a type large enough to hold all of a distribution's parameters' element types + pdf, # probability density function (ContinuousDistribution) + pdfsquaredL2norm, # squared L2 norm of the probability density function + probs, # Get the vector of probabilities + probval, # The pdf/pmf value for a uniform distribution product_distribution, # product of univariate distributions - quantile, # inverse of cdf (defined for p in (0,1)) - qqbuild, # build a paired quantiles data structure for qqplots - rate, # get the rate parameter - sampler, # create a Sampler object for efficient samples - scale, # get the scale parameter - scale!, # provide storage for the scale parameter (used in multivariate distribution mvlognormal) - shape, # get the shape parameter - skewness, # skewness of the distribution - span, # the span of the support, e.g. maximum(d) - minimum(d) - std, # standard deviation of distribution - stdlogx, # standard deviation of log(x) - suffstats, # compute sufficient statistics - succprob, # the success probability - support, # the support of a distribution (or a distribution type) - truncated, # truncate a distribution with a lower and upper bound - var, # variance of distribution - varlogx, # variance of log(x) - expected_logdet, # expected logarithm of random matrix determinant - gradlogpdf, # gradient (or derivative) of logpdf(d,x) wrt x + quantile, # inverse of cdf (defined for p in (0,1)) + qqbuild, # build a paired quantiles data structure for qqplots + rate, # get the rate parameter + sampler, # create a Sampler object for efficient samples + scale, # get the scale parameter + scale!, # provide storage for the scale parameter (used in multivariate distribution mvlognormal) + shape, # get the shape parameter + skewness, # skewness of the distribution + span, # the span of the support, e.g. maximum(d) - minimum(d) + std, # standard deviation of distribution + stdlogx, # standard deviation of log(x) + suffstats, # compute sufficient statistics + succprob, # the success probability + support, # the support of a distribution (or a distribution type) + truncated, # truncate a distribution with a lower and upper bound + var, # variance of distribution + varlogx, # variance of log(x) + expected_logdet, # expected logarithm of random matrix determinant + gradlogpdf, # gradient (or derivative) of logpdf(d,x) wrt x # reexport from StatsBase sample, - sample!, # sample from a source array + sample!, # sample from a source array wsample, wsample! # weighted sampling from a source array diff --git a/src/censored.jl b/src/censored.jl index 0db920f8b9..5565b7ce97 100644 --- a/src/censored.jl +++ b/src/censored.jl @@ -40,7 +40,7 @@ should be implemented: - `censored(d0::D, l::Real, ::Nothing)` """ function censored end -function censored(d0::UnivariateDistribution, l::T, u::T) where {T<:Real} +function censored(d0::UnivariateDistribution, l::T, u::T) where {T <: Real} return Censored(d0, l, u) end function censored(d0::UnivariateDistribution, ::Nothing, u::Real) @@ -52,10 +52,10 @@ end censored(d0::UnivariateDistribution, l::Real, u::Real) = censored(d0, promote(l, u)...) censored(d0::UnivariateDistribution, ::Nothing, ::Nothing) = d0 function censored( - d0::UnivariateDistribution; - lower::Union{Real,Nothing} = nothing, - upper::Union{Real,Nothing} = nothing, -) + d0::UnivariateDistribution; + lower::Union{Real, Nothing} = nothing, + upper::Union{Real, Nothing} = nothing, + ) return censored(d0, lower, upper) end @@ -65,48 +65,48 @@ end Generic wrapper for a [`censored`](@ref) distribution. """ struct Censored{ - D<:UnivariateDistribution, - S<:ValueSupport, - T<:Real, - TL<:Union{T,Nothing}, - TU<:Union{T,Nothing}, -} <: UnivariateDistribution{S} + D <: UnivariateDistribution, + S <: ValueSupport, + T <: Real, + TL <: Union{T, Nothing}, + TU <: Union{T, Nothing}, + } <: UnivariateDistribution{S} uncensored::D # the original distribution (uncensored) lower::TL # lower bound upper::TU # upper bound function Censored( - d0::UnivariateDistribution, - lower::T, - upper::T; - check_args::Bool = true, - ) where {T<:Real} + d0::UnivariateDistribution, + lower::T, + upper::T; + check_args::Bool = true, + ) where {T <: Real} @check_args(Censored, lower ≤ upper) - new{typeof(d0),value_support(typeof(d0)),T,T,T}(d0, lower, upper) + return new{typeof(d0), value_support(typeof(d0)), T, T, T}(d0, lower, upper) end function Censored( - d0::UnivariateDistribution, - l::Nothing, - u::Real; - check_args::Bool = true, - ) - new{typeof(d0),value_support(typeof(d0)),typeof(u),Nothing,typeof(u)}(d0, l, u) + d0::UnivariateDistribution, + l::Nothing, + u::Real; + check_args::Bool = true, + ) + return new{typeof(d0), value_support(typeof(d0)), typeof(u), Nothing, typeof(u)}(d0, l, u) end function Censored( - d0::UnivariateDistribution, - l::Real, - u::Nothing; - check_args::Bool = true, - ) - new{typeof(d0),value_support(typeof(d0)),typeof(l),typeof(l),Nothing}(d0, l, u) + d0::UnivariateDistribution, + l::Real, + u::Nothing; + check_args::Bool = true, + ) + return new{typeof(d0), value_support(typeof(d0)), typeof(l), typeof(l), Nothing}(d0, l, u) end end -const LeftCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = - Censored{D,S,T,T,Nothing} -const RightCensored{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = - Censored{D,S,T,Nothing,T} +const LeftCensored{D <: UnivariateDistribution, S <: ValueSupport, T <: Real} = + Censored{D, S, T, T, Nothing} +const RightCensored{D <: UnivariateDistribution, S <: ValueSupport, T <: Real} = + Censored{D, S, T, Nothing, T} -function censored(d::Censored, l::T, u::T) where {T<:Real} +function censored(d::Censored, l::T, u::T) where {T <: Real} return censored( d.uncensored, d.lower === nothing ? l : max(l, d.lower), @@ -125,11 +125,11 @@ function params(d::Censored) return (d0params..., d.lower, d.upper) end -function partype(d::Censored{<:UnivariateDistribution,<:ValueSupport,T}) where {T} +function partype(d::Censored{<:UnivariateDistribution, <:ValueSupport, T}) where {T} return promote_type(partype(d.uncensored), T) end -Base.eltype(::Type{<:Censored{D,S,T}}) where {D,S,T} = promote_type(T, eltype(D)) +Base.eltype(::Type{<:Censored{D, S, T}}) where {D, S, T} = promote_type(T, eltype(D)) #### Range and Support @@ -152,8 +152,8 @@ function insupport(d::Censored, x::Real) upper = d.upper return ( (_in_open_interval(x, lower, upper) && insupport(d0, x)) || - (x == lower && cdf(d0, lower) > 0) || - (x == upper && _ccdf_inclusive(d0, upper) > 0) + (x == lower && cdf(d0, lower) > 0) || + (x == upper && _ccdf_inclusive(d0, upper) > 0) ) end @@ -164,7 +164,7 @@ function show(io::IO, ::MIME"text/plain", d::Censored) d0 = d.uncensored uml, namevals = _use_multline_show(d0) uml ? show_multline(io, d0, namevals; newline = false) : show_oneline(io, d0, namevals) - if d.lower === nothing + return if d.lower === nothing print(io, "; upper=$(d.upper))") elseif d.upper === nothing print(io, "; lower=$(d.lower))") @@ -211,8 +211,8 @@ function mean(d::Censored) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = ( xexpy(lower, log_prob_lower) + - xexpy(upper, log_prob_upper) + - xexpy(mean(_to_truncated(d)), log_prob_interval) + xexpy(upper, log_prob_upper) + + xexpy(mean(_to_truncated(d)), log_prob_interval) ) return μ end @@ -250,21 +250,21 @@ function var(d::Censored) μ_interval = mean(dtrunc) μ = ( xexpy(lower, log_prob_lower) + - xexpy(upper, log_prob_upper) + - xexpy(μ_interval, log_prob_interval) + xexpy(upper, log_prob_upper) + + xexpy(μ_interval, log_prob_interval) ) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = ( xexpy(abs2(lower - μ), log_prob_lower) + - xexpy(abs2(upper - μ), log_prob_upper) + - xexpy(v_interval, log_prob_interval) + xexpy(abs2(upper - μ), log_prob_upper) + + xexpy(v_interval, log_prob_interval) ) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) -# + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + +# + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. @@ -434,7 +434,7 @@ function ccdf(d::Censored, x::Real) end end -function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} +function logccdf(d::Censored{<:Any, <:Any, T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) diff --git a/src/cholesky/lkjcholesky.jl b/src/cholesky/lkjcholesky.jl index bf6c478964..69a596965d 100644 --- a/src/cholesky/lkjcholesky.jl +++ b/src/cholesky/lkjcholesky.jl @@ -29,7 +29,7 @@ External links Journal of Multivariate Analysis (2009), 100(9): 1989-2001 doi: [10.1016/j.jmva.2009.04.008](https://doi.org/10.1016/j.jmva.2009.04.008) """ -struct LKJCholesky{T<:Real} <: Distribution{CholeskyVariate,Continuous} +struct LKJCholesky{T <: Real} <: Distribution{CholeskyVariate, Continuous} d::Int η::T uplo::Char @@ -41,11 +41,11 @@ end # ----------------------------------------------------------------------------- function LKJCholesky( - d::Int, - η::Real, - _uplo::Union{Char,Symbol} = 'L'; - check_args::Bool = true, -) + d::Int, + η::Real, + _uplo::Union{Char, Symbol} = 'L'; + check_args::Bool = true, + ) @check_args( LKJCholesky, (d, d > 0, "matrix dimension must be positive"), @@ -58,7 +58,7 @@ function LKJCholesky( end # adapted from LinearAlgebra.char_uplo -function _char_uplo(uplo::Union{Symbol,Char}) +function _char_uplo(uplo::Union{Symbol, Char}) uplo ∈ (:U, 'U') && return 'U' uplo ∈ (:L, 'L') && return 'L' throw(ArgumentError("uplo argument must be either 'U' (upper) or 'L' (lower)")) @@ -74,18 +74,18 @@ Base.show(io::IO, d::LKJCholesky) = show(io, d, (:d, :η, :uplo)) # Conversion # ----------------------------------------------------------------------------- -function Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky) where {T<:Real} +function Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky) where {T <: Real} return LKJCholesky{T}(d.d, T(d.η), d.uplo, T(d.logc0)) end -Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky{T}) where {T<:Real} = d +Base.convert(::Type{LKJCholesky{T}}, d::LKJCholesky{T}) where {T <: Real} = d function convert( - ::Type{LKJCholesky{T}}, - d::Integer, - η::Real, - uplo::Char, - logc0::Real, -) where {T<:Real} + ::Type{LKJCholesky{T}}, + d::Integer, + η::Real, + uplo::Char, + logc0::Real, + ) where {T <: Real} return LKJCholesky{T}(Int(d), T(η), uplo, T(logc0)) end @@ -132,7 +132,7 @@ end StatsBase.params(d::LKJCholesky) = (d.d, d.η, d.uplo) -@inline partype(::LKJCholesky{T}) where {T<:Real} = T +@inline partype(::LKJCholesky{T}) where {T <: Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -179,7 +179,7 @@ function Base.rand(rng::AbstractRNG, d::LKJCholesky, dims::Dims) uplo = d.uplo T = eltype(d) TM = Matrix{T} - Rs = Array{LinearAlgebra.Cholesky{T,TM}}(undef, dims) + Rs = Array{LinearAlgebra.Cholesky{T, TM}}(undef, dims) for i in eachindex(Rs) factors = TM(undef, p, p) Rs[i] = R = LinearAlgebra.Cholesky(factors, uplo, 0) @@ -194,11 +194,11 @@ function Random.rand!(rng::AbstractRNG, d::LKJCholesky, R::LinearAlgebra.Cholesk end function Random.rand!( - rng::AbstractRNG, - d::LKJCholesky, - Rs::AbstractArray{<:LinearAlgebra.Cholesky{T,TM}}, - allocate::Bool, -) where {T,TM} + rng::AbstractRNG, + d::LKJCholesky, + Rs::AbstractArray{<:LinearAlgebra.Cholesky{T, TM}}, + allocate::Bool, + ) where {T, TM} p = d.d uplo = d.uplo if allocate @@ -217,10 +217,10 @@ function Random.rand!( return Rs end function Random.rand!( - rng::AbstractRNG, - d::LKJCholesky, - Rs::AbstractArray{<:LinearAlgebra.Cholesky{<:Real}}, -) + rng::AbstractRNG, + d::LKJCholesky, + Rs::AbstractArray{<:LinearAlgebra.Cholesky{<:Real}}, + ) allocate = any(!isassigned(Rs, i) for i in eachindex(Rs)) || any(R -> size(R, 1) != d.d, Rs) return Random.rand!(rng, d, Rs, allocate) @@ -231,10 +231,10 @@ end # function _lkj_cholesky_onion_sampler!( - rng::AbstractRNG, - d::LKJCholesky, - R::LinearAlgebra.Cholesky, -) + rng::AbstractRNG, + d::LKJCholesky, + R::LinearAlgebra.Cholesky, + ) if R.uplo === 'U' _lkj_cholesky_onion_tri!(rng, R.factors, d.d, d.η, Val(:U)) else @@ -244,12 +244,12 @@ function _lkj_cholesky_onion_sampler!( end function _lkj_cholesky_onion_tri!( - rng::AbstractRNG, - A::AbstractMatrix, - d::Int, - η::Real, - ::Val{uplo}, -) where {uplo} + rng::AbstractRNG, + A::AbstractMatrix, + d::Int, + η::Real, + ::Val{uplo}, + ) where {uplo} # Section 3.2 in LKJ (2009 JMA) # reformulated to incrementally construct Cholesky factor as mentioned in Section 5 # equivalent steps in algorithm in reference are marked. @@ -266,18 +266,18 @@ function _lkj_cholesky_onion_tri!( end @inbounds A[2, 2] = sqrt(1 - w0^2) # 2. Loop, each iteration k adds row/column k+1 - for k = 2:(d-1) + for k in 2:(d - 1) # (a) β -= 1 // 2 # (b) y = rand(rng, Beta(k // 2, β)) # (c)-(e) # w is directionally uniform vector of length √y - @inbounds w = @views uplo === :L ? A[k+1, 1:k] : A[1:k, k+1] + @inbounds w = @views uplo === :L ? A[k + 1, 1:k] : A[1:k, k + 1] Random.randn!(rng, w) rmul!(w, sqrt(y) / norm(w)) # normalize so new row/column has unit norm - @inbounds A[k+1, k+1] = sqrt(1 - y) + @inbounds A[k + 1, k + 1] = sqrt(1 - y) end # 3. return A diff --git a/src/common.jl b/src/common.jl index 4921a595dc..271aa317a4 100644 --- a/src/common.jl +++ b/src/common.jl @@ -75,10 +75,10 @@ Parametrized by a `VariateForm` defining the dimension of samples and a `ValueSupport` defining the domain of possibly sampled values. Any `Sampleable` implements the `Base.rand` method. """ -abstract type Sampleable{F<:VariateForm,S<:ValueSupport} end +abstract type Sampleable{F <: VariateForm, S <: ValueSupport} end variate_form(::Type{<:Sampleable{VF}}) where {VF} = VF -value_support(::Type{<:Sampleable{<:VariateForm,VS}}) where {VS} = VS +value_support(::Type{<:Sampleable{<:VariateForm, VS}}) where {VS} = VS """ length(s::Sampleable) @@ -106,8 +106,8 @@ The default element type of a sample. This is the type of elements of the sample by the `rand` method. However, one can provide an array of different element types to store the samples using `rand!`. """ -Base.eltype(::Type{<:Sampleable{F,Discrete}}) where {F} = Int -Base.eltype(::Type{<:Sampleable{F,Continuous}}) where {F} = Float64 +Base.eltype(::Type{<:Sampleable{F, Discrete}}) where {F} = Int +Base.eltype(::Type{<:Sampleable{F, Continuous}}) where {F} = Float64 """ nsamples(s::Sampleable) @@ -116,16 +116,16 @@ The number of values contained in one sample of `s`. Multiple samples are often into an array, depending on the variate form. """ nsamples(t::Type{Sampleable}, x::Any) -nsamples(::Type{D}, x::Number) where {D<:Sampleable{Univariate}} = 1 -nsamples(::Type{D}, x::AbstractArray) where {D<:Sampleable{Univariate}} = length(x) -nsamples(::Type{D}, x::AbstractVector) where {D<:Sampleable{Multivariate}} = 1 -nsamples(::Type{D}, x::AbstractMatrix) where {D<:Sampleable{Multivariate}} = size(x, 2) -nsamples(::Type{D}, x::Number) where {D<:Sampleable{Matrixvariate}} = 1 -nsamples(::Type{D}, x::Array{Matrix{T}}) where {D<:Sampleable{Matrixvariate},T<:Number} = +nsamples(::Type{D}, x::Number) where {D <: Sampleable{Univariate}} = 1 +nsamples(::Type{D}, x::AbstractArray) where {D <: Sampleable{Univariate}} = length(x) +nsamples(::Type{D}, x::AbstractVector) where {D <: Sampleable{Multivariate}} = 1 +nsamples(::Type{D}, x::AbstractMatrix) where {D <: Sampleable{Multivariate}} = size(x, 2) +nsamples(::Type{D}, x::Number) where {D <: Sampleable{Matrixvariate}} = 1 +nsamples(::Type{D}, x::Array{Matrix{T}}) where {D <: Sampleable{Matrixvariate}, T <: Number} = length(x) for func in (:(==), :isequal, :isapprox) - @eval function Base.$func(s1::A, s2::B; kwargs...) where {A<:Sampleable,B<:Sampleable} + @eval function Base.$func(s1::A, s2::B; kwargs...) where {A <: Sampleable, B <: Sampleable} nameof(A) === nameof(B) || return false fields = fieldnames(A) fields === fieldnames(B) || return false @@ -143,7 +143,7 @@ for func in (:(==), :isequal, :isapprox) end end -function Base.hash(s::S, h::UInt) where {S<:Sampleable} +function Base.hash(s::S, h::UInt) where {S <: Sampleable} hashed = hash(Sampleable, h) hashed = hash(nameof(S), hashed) @@ -162,22 +162,22 @@ distribution. Distributions define a Probability Distribution Function (PDF) to implement with `pdf` and a Cumulative Distribution Function (CDF) to implement with `cdf`. """ -abstract type Distribution{F<:VariateForm,S<:ValueSupport} <: Sampleable{F,S} end +abstract type Distribution{F <: VariateForm, S <: ValueSupport} <: Sampleable{F, S} end -const UnivariateDistribution{S<:ValueSupport} = Distribution{Univariate,S} -const MultivariateDistribution{S<:ValueSupport} = Distribution{Multivariate,S} -const MatrixDistribution{S<:ValueSupport} = Distribution{Matrixvariate,S} -const NonMatrixDistribution = Union{UnivariateDistribution,MultivariateDistribution} +const UnivariateDistribution{S <: ValueSupport} = Distribution{Univariate, S} +const MultivariateDistribution{S <: ValueSupport} = Distribution{Multivariate, S} +const MatrixDistribution{S <: ValueSupport} = Distribution{Matrixvariate, S} +const NonMatrixDistribution = Union{UnivariateDistribution, MultivariateDistribution} -const DiscreteDistribution{F<:VariateForm} = Distribution{F,Discrete} -const ContinuousDistribution{F<:VariateForm} = Distribution{F,Continuous} +const DiscreteDistribution{F <: VariateForm} = Distribution{F, Discrete} +const ContinuousDistribution{F <: VariateForm} = Distribution{F, Continuous} -const DiscreteUnivariateDistribution = Distribution{Univariate,Discrete} -const ContinuousUnivariateDistribution = Distribution{Univariate,Continuous} -const DiscreteMultivariateDistribution = Distribution{Multivariate,Discrete} -const ContinuousMultivariateDistribution = Distribution{Multivariate,Continuous} -const DiscreteMatrixDistribution = Distribution{Matrixvariate,Discrete} -const ContinuousMatrixDistribution = Distribution{Matrixvariate,Continuous} +const DiscreteUnivariateDistribution = Distribution{Univariate, Discrete} +const ContinuousUnivariateDistribution = Distribution{Univariate, Continuous} +const DiscreteMultivariateDistribution = Distribution{Multivariate, Discrete} +const ContinuousMultivariateDistribution = Distribution{Multivariate, Continuous} +const DiscreteMatrixDistribution = Distribution{Matrixvariate, Discrete} +const ContinuousMatrixDistribution = Distribution{Matrixvariate, Continuous} # allow broadcasting over distribution objects # to be decided: how to handle multivariate/matrixvariate distributions? @@ -221,9 +221,9 @@ usually it is sufficient to implement `logpdf`. See also: [`logpdf`](@ref). """ @inline function pdf( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} if M == N @boundscheck begin size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) @@ -243,7 +243,7 @@ See also: [`logpdf`](@ref). end end -function _pdf(d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where {N} +function _pdf(d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real, N}) where {N} return exp(@inbounds logpdf(d, x)) end @@ -263,9 +263,9 @@ size of `x`. See also: [`pdf`](@ref), [`gradlogpdf`](@ref). """ @inline function logpdf( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} if M == N @boundscheck begin size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) @@ -313,9 +313,9 @@ Here, `x` can be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds function pdf( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, -) where {N} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + ) where {N} return map(Base.Fix1(pdf, d), x) end @@ -333,9 +333,9 @@ Here, `x` can be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds function logpdf( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, -) where {N} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + ) where {N} return map(Base.Fix1(logpdf, d), x) end @@ -362,26 +362,26 @@ back to `logpdf!` usually it is sufficient to implement `logpdf!`. See also: [`logpdf!`](@ref). """ Base.@propagate_inbounds function pdf!( - out::AbstractArray{<:Real}, - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N},M}, -) where {N,M} + out::AbstractArray{<:Real}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}, M}, + ) where {N, M} return map!(Base.Fix1(pdf, d), out, x) end Base.@propagate_inbounds function logpdf!( - out::AbstractArray{<:Real}, - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N},M}, -) where {N,M} + out::AbstractArray{<:Real}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}, M}, + ) where {N, M} return map!(Base.Fix1(logpdf, d), out, x) end @inline function pdf!( - out::AbstractArray{<:Real}, - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + out::AbstractArray{<:Real}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} @boundscheck begin M > N || throw( DimensionMismatch( @@ -390,17 +390,17 @@ end ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) - length(out) == prod(i -> size(x, i), (N+1):M) || + length(out) == prod(i -> size(x, i), (N + 1):M) || throw(DimensionMismatch("inconsistent array dimensions")) end return _pdf!(out, d, x) end function _pdf!( - out::AbstractArray{<:Real}, - d::Distribution{<:ArrayLikeVariate}, - x::AbstractArray{<:Real}, -) + out::AbstractArray{<:Real}, + d::Distribution{<:ArrayLikeVariate}, + x::AbstractArray{<:Real}, + ) @inbounds logpdf!(out, d, x) map!(exp, out, out) return out @@ -428,10 +428,10 @@ the size of `out` and `x`. See also: [`pdf!`](@ref). """ @inline function logpdf!( - out::AbstractArray{<:Real}, - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + out::AbstractArray{<:Real}, + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} @boundscheck begin M > N || throw( DimensionMismatch( @@ -440,7 +440,7 @@ See also: [`pdf!`](@ref). ) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) - length(out) == prod(i -> size(x, i), (N+1):M) || + length(out) == prod(i -> size(x, i), (N + 1):M) || throw(DimensionMismatch("inconsistent array dimensions")) end return _logpdf!(out, d, x) @@ -448,10 +448,10 @@ end # default definition function _logpdf!( - out::AbstractArray{<:Real}, - d::Distribution{<:ArrayLikeVariate}, - x::AbstractArray{<:Real}, -) + out::AbstractArray{<:Real}, + d::Distribution{<:ArrayLikeVariate}, + x::AbstractArray{<:Real}, + ) @inbounds map!(Base.Fix1(logpdf, d), out, eachvariate(x, variate_form(typeof(d)))) return out end @@ -468,9 +468,9 @@ be - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds @inline function loglikelihood( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} if M == N return logpdf(d, x) else @@ -487,9 +487,9 @@ Base.@propagate_inbounds @inline function loglikelihood( end end Base.@propagate_inbounds function loglikelihood( - d::Distribution{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, -) where {N} + d::Distribution{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + ) where {N} return sum(Base.Fix1(logpdf, d), x) end @@ -497,8 +497,8 @@ end abstract type SufficientStats end abstract type IncompleteDistribution end -const DistributionType{D<:Distribution} = Type{D} -const IncompleteFormulation = Union{DistributionType,IncompleteDistribution} +const DistributionType{D <: Distribution} = Type{D} +const IncompleteFormulation = Union{DistributionType, IncompleteDistribution} """ succprob(d::DiscreteUnivariateDistribution) @@ -526,15 +526,17 @@ RFunction. Calls using another random number generator still work, but rely on a quantile function to operate. """ macro rand_rdist(D) - esc(quote - function rand(d::$D, n::Int) - [rand(d) for i in Base.OneTo(n)] - end - function rand!(d::$D, X::AbstractArray) - for i in eachindex(X) - X[i] = rand(d) + return esc( + quote + function rand(d::$D, n::Int) + return [rand(d) for i in Base.OneTo(n)] + end + function rand!(d::$D, X::AbstractArray) + for i in eachindex(X) + X[i] = rand(d) + end + return X end - return X end - end) + ) end diff --git a/src/conversion.jl b/src/conversion.jl index ce4bf5fce5..eac0e35b5e 100644 --- a/src/conversion.jl +++ b/src/conversion.jl @@ -1,4 +1,3 @@ - # Discrete univariate convert(::Type{Binomial}, d::Bernoulli) = Binomial(1, d.p) diff --git a/src/convolution.jl b/src/convolution.jl index a083c33669..b0dce22b5c 100644 --- a/src/convolution.jl +++ b/src/convolution.jl @@ -58,7 +58,7 @@ function convolve(d1::DiscreteNonParametric, d2::DiscreteNonParametric) idx = searchsortedfirst(support_conv, s1 + s2) p_conv[idx] += p1 * p2 end - DiscreteNonParametric(support_conv, p_conv, check_args = false) + return DiscreteNonParametric(support_conv, p_conv, check_args = false) end # continuous univariate @@ -83,7 +83,7 @@ function convolve(d1::MvNormal, d2::MvNormal) end function _check_convolution_args(p1, p2) - p1 ≈ p2 || throw( + return p1 ≈ p2 || throw( ArgumentError( "$(p1) !≈ $(p2): distribution parameters must be approximately equal", ), @@ -91,5 +91,5 @@ function _check_convolution_args(p1, p2) end function _check_convolution_shape(d1, d2) - length(d1) == length(d2) || throw(ArgumentError("$d1 and $d2 are not the same size")) + return length(d1) == length(d2) || throw(ArgumentError("$d1 and $d2 are not the same size")) end diff --git a/src/deprecates.jl b/src/deprecates.jl index 75df5fe5e0..a718f3af95 100644 --- a/src/deprecates.jl +++ b/src/deprecates.jl @@ -12,7 +12,7 @@ function Binomial(n::Real, p::Real) "Binomial(n::Real, p) is deprecated. Please use Binomial(n::Integer, p) instead.", :Binomial, ) - Binomial(Int(n), p) + return Binomial(Int(n), p) end function Binomial(n::Real) @@ -20,7 +20,7 @@ function Binomial(n::Real) "Binomial(n::Real) is deprecated. Please use Binomial(n::Integer) instead.", :Binomial, ) - Binomial(Int(n)) + return Binomial(Int(n)) end function BetaBinomial(n::Real, α::Real, β::Real) @@ -28,23 +28,23 @@ function BetaBinomial(n::Real, α::Real, β::Real) "BetaBinomial(n::Real, α, β) is deprecated. Please use BetaBinomial(n::Integer, α, β) instead.", :BetaBinomial, ) - BetaBinomial(Int(n), α, β) + return BetaBinomial(Int(n), α, β) end # vectorized versions for fun in [ - :pdf, - :logpdf, - :cdf, - :logcdf, - :ccdf, - :logccdf, - :invlogcdf, - :invlogccdf, - :quantile, - :cquantile, -] + :pdf, + :logpdf, + :cdf, + :logcdf, + :ccdf, + :logccdf, + :invlogcdf, + :invlogccdf, + :quantile, + :cquantile, + ] _fun! = Symbol('_', fun, '!') fun! = Symbol(fun, '!') @@ -85,7 +85,7 @@ end distr, ) false @deprecate expectation( - distr::Union{UnivariateDistribution,MultivariateDistribution}, + distr::Union{UnivariateDistribution, MultivariateDistribution}, g::Function; kwargs..., ) expectation(g, distr; kwargs...) false @@ -94,15 +94,15 @@ end # This is very similar to `Base.@deprecate_binding MatrixReshaped{...} ReshapedDistribution{...}` # However, `Base.@deprecate_binding` does not support type parameters export MatrixReshaped -const MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = - ReshapedDistribution{2,S,D} +const MatrixReshaped{S <: ValueSupport, D <: MultivariateDistribution{S}} = + ReshapedDistribution{2, S, D} Base.deprecate(@__MODULE__, :MatrixReshaped) # This is very similar to `Base.@deprecate MatrixReshaped(...) reshape(...)` # We use another (unexported!) alias here to not throw a deprecation warning/error # Unexported aliases do not affect the type printing # In Julia >= 1.6, instead of a new alias we could have defined a method for (ReshapedDistribution{2,S,D} where {S<:ValueSupport,D<:MultivariateDistribution{S}}) -const _MatrixReshaped{S<:ValueSupport,D<:MultivariateDistribution{S}} = - ReshapedDistribution{2,S,D} +const _MatrixReshaped{S <: ValueSupport, D <: MultivariateDistribution{S}} = + ReshapedDistribution{2, S, D} function _MatrixReshaped(d::MultivariateDistribution, n::Integer, p::Integer = n) Base.depwarn( "`MatrixReshaped(d, n, p)` is deprecated, use `reshape(d, (n, p))` instead.", diff --git a/src/eachvariate.jl b/src/eachvariate.jl index 37f7f35bf2..5783ea172e 100644 --- a/src/eachvariate.jl +++ b/src/eachvariate.jl @@ -1,21 +1,21 @@ ## AbstractArray wrapper for collection of variates ## Similar to https://github.com/JuliaLang/julia/pull/32310 - replace with EachSlice? -struct EachVariate{V,P,A,T,N} <: AbstractArray{T,N} +struct EachVariate{V, P, A, T, N} <: AbstractArray{T, N} parent::P axes::A end -function EachVariate{V}(x::AbstractArray{<:Real,M}) where {V,M} +function EachVariate{V}(x::AbstractArray{<:Real, M}) where {V, M} ax = ntuple(i -> axes(x, i + V), Val(M - V)) T = Base.promote_op( view, typeof(x), ntuple(i -> i <= V ? Colon : eltype(axes(x, i)), Val(M))..., ) - return EachVariate{V,typeof(x),typeof(ax),T,M - V}(x, ax) + return EachVariate{V, typeof(x), typeof(ax), T, M - V}(x, ax) end -Base.IteratorSize(::Type{EachVariate{V,P,A,T,N}}) where {V,P,A,T,N} = Base.HasShape{N}() +Base.IteratorSize(::Type{EachVariate{V, P, A, T, N}}) where {V, P, A, T, N} = Base.HasShape{N}() Base.axes(x::EachVariate) = x.axes @@ -24,9 +24,9 @@ Base.size(x::EachVariate, d::Int) = 1 <= ndims(x) ? length(axes(x)[d]) : 1 # We don't need `setindex!` (currently), therefore only `getindex` is implemented Base.@propagate_inbounds function Base.getindex( - x::EachVariate{V,P,A,T,N}, - I::Vararg{Int,N}, -) where {V,P,A,T,N} + x::EachVariate{V, P, A, T, N}, + I::Vararg{Int, N}, + ) where {V, P, A, T, N} return view(x.parent, ntuple(_ -> Colon(), Val(V))..., I...) end diff --git a/src/edgeworth.jl b/src/edgeworth.jl index fa41fbb3cf..05daeafd05 100644 --- a/src/edgeworth.jl +++ b/src/edgeworth.jl @@ -9,17 +9,17 @@ abstract type EdgeworthAbstract <: ContinuousUnivariateDistribution end skewness(d::EdgeworthAbstract) = skewness(d.dist) / sqrt(d.n) kurtosis(d::EdgeworthAbstract) = kurtosis(d.dist) / d.n -struct EdgeworthZ{D<:UnivariateDistribution} <: EdgeworthAbstract +struct EdgeworthZ{D <: UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 function EdgeworthZ{D}( - d::UnivariateDistribution, - n::Real; - check_args::Bool = true, - ) where {D<:UnivariateDistribution} + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D <: UnivariateDistribution} @check_args EdgeworthZ (n, n > zero(n)) - new{D}(d, n) + return new{D}(d, n) end end function EdgeworthZ(d::UnivariateDistribution, n::Real; check_args::Bool = true) @@ -34,33 +34,33 @@ function pdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) x2 = x * x - pdf(Normal(0, 1), x) * ( + return pdf(Normal(0, 1), x) * ( 1 + - s * x * (x2 - 3.0) / 6.0 + - k * (x2 * (x2 - 6.0) + 3.0) / 24.0 + - s * s * (x2 * (x2 * (x2 - 15.0) + 45.0) - 15.0) / 72.0 + s * x * (x2 - 3.0) / 6.0 + + k * (x2 * (x2 - 6.0) + 3.0) / 24.0 + + s * s * (x2 * (x2 * (x2 - 15.0) + 45.0) - 15.0) / 72.0 ) end function cdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) x2 = x * x - cdf(Normal(0, 1), x) - - pdf(Normal(0, 1), x) * ( + return cdf(Normal(0, 1), x) - + pdf(Normal(0, 1), x) * ( s * (x2 - 1.0) / 6.0 + - k * x * (x2 - 3.0) / 24.0 + - s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 + k * x * (x2 - 3.0) / 24.0 + + s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 ) end function ccdf(d::EdgeworthZ, x::Float64) s = skewness(d) k = kurtosis(d) x2 = x * x - ccdf(Normal(0, 1), x) + - pdf(Normal(0, 1), x) * ( + return ccdf(Normal(0, 1), x) + + pdf(Normal(0, 1), x) * ( s * (x2 - 1.0) / 6.0 + - k * x * (x2 - 3.0) / 24.0 + - s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 + k * x * (x2 - 3.0) / 24.0 + + s * s * x * (x2 * (x2 - 10.0) + 15.0) / 72.0 ) end @@ -71,7 +71,7 @@ function quantile(d::EdgeworthZ, p::Float64) k = kurtosis(d) z = quantile(Normal(0, 1), p) z2 = z * z - z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) + return z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) end function cquantile(d::EdgeworthZ, p::Float64) @@ -79,22 +79,21 @@ function cquantile(d::EdgeworthZ, p::Float64) k = kurtosis(d) z = cquantile(Normal(0, 1), p) z2 = z * z - z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) + return z + s * (z2 - 1) / 6.0 + k * z * (z2 - 3) / 24.0 - s * s / 36.0 * z * (2.0 * z2 - 5.0) end - # Edgeworth approximation of the sum -struct EdgeworthSum{D<:UnivariateDistribution} <: EdgeworthAbstract +struct EdgeworthSum{D <: UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 function EdgeworthSum{D}( - d::UnivariateDistribution, - n::Real; - check_args::Bool = true, - ) where {D<:UnivariateDistribution} + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D <: UnivariateDistribution} @check_args EdgeworthSum (n, n > zero(n)) - new{D}(d, n) + return new{D}(d, n) end end function EdgeworthSum(d::UnivariateDistribution, n::Real; check_args::Bool = true) @@ -105,17 +104,17 @@ mean(d::EdgeworthSum) = d.n * mean(d.dist) var(d::EdgeworthSum) = d.n * var(d.dist) # Edgeworth approximation of the mean -struct EdgeworthMean{D<:UnivariateDistribution} <: EdgeworthAbstract +struct EdgeworthMean{D <: UnivariateDistribution} <: EdgeworthAbstract dist::D n::Float64 function EdgeworthMean{D}( - d::UnivariateDistribution, - n::Real; - check_args::Bool = true, - ) where {D<:UnivariateDistribution} + d::UnivariateDistribution, + n::Real; + check_args::Bool = true, + ) where {D <: UnivariateDistribution} # although n would usually be an integer, no methods are require this @check_args EdgeworthMean (n, n > zero(n), "n must be positive") - new{D}(d, Float64(n)) + return new{D}(d, Float64(n)) end end function EdgeworthMean(d::UnivariateDistribution, n::Real; check_args::Bool = true) @@ -127,7 +126,7 @@ var(d::EdgeworthMean) = var(d.dist) / d.n function pdf(d::EdgeworthAbstract, x::Float64) m, s = mean(d), std(d) - pdf(EdgeworthZ(d.dist, d.n), (x - m) / s) / s + return pdf(EdgeworthZ(d.dist, d.n), (x - m) / s) / s end cdf(d::EdgeworthAbstract, x::Float64) = cdf(EdgeworthZ(d.dist, d.n), (x - mean(d)) / std(d)) diff --git a/src/estimators.jl b/src/estimators.jl index d07d9d1fc4..397aec1788 100644 --- a/src/estimators.jl +++ b/src/estimators.jl @@ -3,13 +3,13 @@ export Estimator, MLEstimator export nsamples, estimate -abstract type Estimator{D<:Distribution} end +abstract type Estimator{D <: Distribution} end -nsamples(e::Estimator{D}, x::Array) where {D<:UnivariateDistribution} = length(x) -nsamples(e::Estimator{D}, x::Matrix) where {D<:MultivariateDistribution} = size(x, 2) +nsamples(e::Estimator{D}, x::Array) where {D <: UnivariateDistribution} = length(x) +nsamples(e::Estimator{D}, x::Matrix) where {D <: MultivariateDistribution} = size(x, 2) -mutable struct MLEstimator{D<:Distribution} <: Estimator{D} end -MLEstimator(::Type{D}) where {D<:Distribution} = MLEstimator{D}() +mutable struct MLEstimator{D <: Distribution} <: Estimator{D} end +MLEstimator(::Type{D}) where {D <: Distribution} = MLEstimator{D}() -estimate(e::MLEstimator{D}, x) where {D<:Distribution} = fit_mle(D, x) -estimate(e::MLEstimator{D}, x, w) where {D<:Distribution} = fit_mle(D, x, w) +estimate(e::MLEstimator{D}, x) where {D <: Distribution} = fit_mle(D, x) +estimate(e::MLEstimator{D}, x, w) where {D <: Distribution} = fit_mle(D, x, w) diff --git a/src/functionals.jl b/src/functionals.jl index 37e1250e61..bc98e68ad2 100644 --- a/src/functionals.jl +++ b/src/functionals.jl @@ -3,7 +3,7 @@ function expectation(g, distr::ContinuousUnivariateDistribution; kwargs...) end ## Assuming that discrete distributions only take integer values. -function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real = 1e-10) +function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real = 1.0e-10) mindist, maxdist = extrema(distr) # We want to avoid taking values up to infinity minval = isfinite(mindist) ? mindist : quantile(distr, epsilon) @@ -12,17 +12,17 @@ function expectation(g, distr::DiscreteUnivariateDistribution; epsilon::Real = 1 end function expectation( - g, - distr::MultivariateDistribution; - nsamples::Int = 100, - rng::AbstractRNG = default_rng(), -) + g, + distr::MultivariateDistribution; + nsamples::Int = 100, + rng::AbstractRNG = default_rng(), + ) nsamples > 0 || throw(ArgumentError("number of samples should be > 0")) # We use a function barrier to work around type instability of `sampler(dist)` return mcexpectation(rng, g, sampler(distr), nsamples) end -mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ = 1:n) / n +mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ in 1:n) / n ## Leave undefined until we've implemented a numerical integration procedure # function entropy(distr::UnivariateDistribution) @@ -32,10 +32,10 @@ mcexpectation(rng, f, sampler, n) = sum(f, rand(rng, sampler) for _ = 1:n) / n # end function kldivergence( - p::Distribution{V}, - q::Distribution{V}; - kwargs..., -) where {V<:VariateForm} + p::Distribution{V}, + q::Distribution{V}; + kwargs..., + ) where {V <: VariateForm} return expectation(p; kwargs...) do x logp = logpdf(p, x) return (logp > oftype(logp, -Inf)) * (logp - logpdf(q, x)) diff --git a/src/genericfit.jl b/src/genericfit.jl index 42ac0901b5..6057f81541 100644 --- a/src/genericfit.jl +++ b/src/genericfit.jl @@ -1,6 +1,6 @@ # generic functions for distribution fitting -function suffstats(dt::Type{D}, xs...) where {D<:Distribution} +function suffstats(dt::Type{D}, xs...) where {D <: Distribution} argtypes = tuple(D, map(typeof, xs)...) error("suffstats is not implemented for $argtypes.") end @@ -24,18 +24,18 @@ Here, `w` should be an array with length `n`, where `n` is the number of samples """ fit_mle(D, x, w) -fit_mle(dt::Type{D}, x::AbstractArray) where {D<:UnivariateDistribution} = +fit_mle(dt::Type{D}, x::AbstractArray) where {D <: UnivariateDistribution} = fit_mle(D, suffstats(D, x)) -fit_mle(dt::Type{D}, x::AbstractArray, w::AbstractArray) where {D<:UnivariateDistribution} = +fit_mle(dt::Type{D}, x::AbstractArray, w::AbstractArray) where {D <: UnivariateDistribution} = fit_mle(D, suffstats(D, x, w)) -fit_mle(dt::Type{D}, x::AbstractMatrix) where {D<:MultivariateDistribution} = +fit_mle(dt::Type{D}, x::AbstractMatrix) where {D <: MultivariateDistribution} = fit_mle(D, suffstats(D, x)) fit_mle( dt::Type{D}, x::AbstractMatrix, w::AbstractArray, -) where {D<:MultivariateDistribution} = fit_mle(D, suffstats(D, x, w)) +) where {D <: MultivariateDistribution} = fit_mle(D, suffstats(D, x, w)) """ fit(D, args...) @@ -50,5 +50,5 @@ change; for a function that will behave consistently across versions, see By default, the fallback is [`fit_mle(D, args...)`](@ref); developers can change this default for a specific distribution type `D <: Distribution` by defining a `fit(::Type{D}, args...)` method. """ -fit(dt::Type{D}, x) where {D<:Distribution} = fit_mle(D, x) -fit(dt::Type{D}, args...) where {D<:Distribution} = fit_mle(D, args...) +fit(dt::Type{D}, x) where {D <: Distribution} = fit_mle(D, x) +fit(dt::Type{D}, args...) where {D <: Distribution} = fit_mle(D, args...) diff --git a/src/genericrand.jl b/src/genericrand.jl index 25db1a7d69..058e7bb678 100644 --- a/src/genericrand.jl +++ b/src/genericrand.jl @@ -42,14 +42,14 @@ function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims) end # these are workarounds for sampleables that incorrectly base `eltype` on the parameters -function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}) +function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate, Continuous}) return @inbounds rand!(rng, sampler(s), Array{float(eltype(s))}(undef, size(s))) end -function rand(rng::AbstractRNG, s::Sampleable{Univariate,Continuous}, dims::Dims) +function rand(rng::AbstractRNG, s::Sampleable{Univariate, Continuous}, dims::Dims) out = Array{float(eltype(s))}(undef, dims) return @inbounds rand!(rng, sampler(s), out) end -function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims) +function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate, Continuous}, dims::Dims) sz = size(s) ax = map(Base.OneTo, dims) out = [Array{float(eltype(s))}(undef, sz) for _ in Iterators.product(ax...)] @@ -77,10 +77,10 @@ end # default definitions for arraylike variates @inline function rand!( - rng::AbstractRNG, - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,N}, -) where {N} + rng::AbstractRNG, + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, N}, + ) where {N} @boundscheck begin size(x) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -88,10 +88,10 @@ end end @inline function rand!( - rng::AbstractRNG, - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:Real,M}, -) where {N,M} + rng::AbstractRNG, + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:Real, M}, + ) where {N, M} @boundscheck begin M > N || throw( DimensionMismatch( @@ -106,10 +106,10 @@ end end function _rand!( - rng::AbstractRNG, - s::Sampleable{<:ArrayLikeVariate}, - x::AbstractArray{<:Real}, -) + rng::AbstractRNG, + s::Sampleable{<:ArrayLikeVariate}, + x::AbstractArray{<:Real}, + ) @inbounds for xi in eachvariate(x, variate_form(typeof(s))) rand!(rng, s, xi) end @@ -117,28 +117,28 @@ function _rand!( end Base.@propagate_inbounds function rand!( - rng::AbstractRNG, - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, -) where {N} + rng::AbstractRNG, + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + ) where {N} sz = size(s) allocate = !all(isassigned(x, i) && size(@inbounds x[i]) == sz for i in eachindex(x)) return rand!(rng, s, x, allocate) end Base.@propagate_inbounds function rand!( - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, - allocate::Bool, -) where {N} + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + allocate::Bool, + ) where {N} return rand!(default_rng(), s, x, allocate) end @inline function rand!( - rng::AbstractRNG, - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, - allocate::Bool, -) where {N} + rng::AbstractRNG, + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + allocate::Bool, + ) where {N} @boundscheck begin if !allocate sz = size(s) @@ -151,11 +151,11 @@ end end function _rand!( - rng::AbstractRNG, - s::Sampleable{ArrayLikeVariate{N}}, - x::AbstractArray{<:AbstractArray{<:Real,N}}, - allocate::Bool, -) where {N} + rng::AbstractRNG, + s::Sampleable{ArrayLikeVariate{N}}, + x::AbstractArray{<:AbstractArray{<:Real, N}}, + allocate::Bool, + ) where {N} if allocate @inbounds for i in eachindex(x) x[i] = rand(rng, s) diff --git a/src/matrix/inversewishart.jl b/src/matrix/inversewishart.jl index 9794beadab..46d5d61600 100644 --- a/src/matrix/inversewishart.jl +++ b/src/matrix/inversewishart.jl @@ -18,7 +18,7 @@ f(\\boldsymbol{\\Sigma}; \\nu,\\boldsymbol{\\Psi}) = ``\\mathbf{H}\\sim \\textrm{W}_p(\\nu, \\mathbf{S})`` if and only if ``\\mathbf{H}^{-1}\\sim \\textrm{IW}_p(\\nu, \\mathbf{S}^{-1})``. """ -struct InverseWishart{T<:Real,ST<:AbstractPDMat} <: ContinuousMatrixDistribution +struct InverseWishart{T <: Real, ST <: AbstractPDMat} <: ContinuousMatrixDistribution df::T # degree of freedom Ψ::ST # scale matrix logc0::T # log of normalizing constant @@ -28,18 +28,18 @@ end # Constructors # ----------------------------------------------------------------------------- -function InverseWishart(df::T, Ψ::AbstractPDMat{T}) where {T<:Real} +function InverseWishart(df::T, Ψ::AbstractPDMat{T}) where {T <: Real} p = size(Ψ, 1) df > p - 1 || throw(ArgumentError("df should be greater than dim - 1.")) logc0 = invwishart_logc0(df, Ψ) R = Base.promote_eltype(T, logc0) prom_Ψ = convert(AbstractArray{R}, Ψ) - InverseWishart{R,typeof(prom_Ψ)}(R(df), prom_Ψ, R(logc0)) + return InverseWishart{R, typeof(prom_Ψ)}(R(df), prom_Ψ, R(logc0)) end function InverseWishart(df::Real, Ψ::AbstractPDMat) T = Base.promote_eltype(df, Ψ) - InverseWishart(T(df), convert(AbstractArray{T}, Ψ)) + return InverseWishart(T(df), convert(AbstractArray{T}, Ψ)) end InverseWishart(df::Real, Ψ::Matrix) = InverseWishart(df, PDMat(Ψ)) @@ -56,15 +56,15 @@ show(io::IO, d::InverseWishart) = show_multline(io, d, [(:df, d.df), (:Ψ, Matri # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{InverseWishart{T}}, d::InverseWishart) where {T<:Real} +function convert(::Type{InverseWishart{T}}, d::InverseWishart) where {T <: Real} P = convert(AbstractArray{T}, d.Ψ) - InverseWishart{T,typeof(P)}(T(d.df), P, T(d.logc0)) + return InverseWishart{T, typeof(P)}(T(d.df), P, T(d.logc0)) end -Base.convert(::Type{InverseWishart{T}}, d::InverseWishart{T}) where {T<:Real} = d +Base.convert(::Type{InverseWishart{T}}, d::InverseWishart{T}) where {T <: Real} = d -function convert(::Type{InverseWishart{T}}, df, Ψ::AbstractPDMat, logc0) where {T<:Real} +function convert(::Type{InverseWishart{T}}, df, Ψ::AbstractPDMat, logc0) where {T <: Real} P = convert(AbstractArray{T}, Ψ) - InverseWishart{T,typeof(P)}(T(df), P, T(logc0)) + return InverseWishart{T, typeof(P)}(T(df), P, T(logc0)) end # ----------------------------------------------------------------------------- @@ -78,7 +78,7 @@ size(d::InverseWishart) = size(d.Ψ) rank(d::InverseWishart) = rank(d.Ψ) params(d::InverseWishart) = (d.df, d.Ψ) -@inline partype(d::InverseWishart{T}) where {T<:Real} = T +@inline partype(d::InverseWishart{T}) where {T <: Real} = T function mean(d::InverseWishart) df = d.df @@ -94,15 +94,15 @@ mode(d::InverseWishart) = d.Ψ * inv(d.df + size(d, 1) + 1.0) function cov(d::InverseWishart, i::Integer, j::Integer, k::Integer, l::Integer) p, ν, Ψ = (size(d, 1), d.df, Matrix(d.Ψ)) ν > p + 3 || throw(ArgumentError("cov only defined for df > dim + 3")) - inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * - (2Ψ[i, j] * Ψ[k, l] + (ν - p - 1) * (Ψ[i, k] * Ψ[j, l] + Ψ[i, l] * Ψ[k, j])) + return inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * + (2Ψ[i, j] * Ψ[k, l] + (ν - p - 1) * (Ψ[i, k] * Ψ[j, l] + Ψ[i, l] * Ψ[k, j])) end function var(d::InverseWishart, i::Integer, j::Integer) p, ν, Ψ = (size(d, 1), d.df, Matrix(d.Ψ)) ν > p + 3 || throw(ArgumentError("var only defined for df > dim + 3")) - inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * - ((ν - p + 1) * Ψ[i, j]^2 + (ν - p - 1) * Ψ[i, i] * Ψ[j, j]) + return inv((ν - p) * (ν - p - 3) * (ν - p - 1)^2) * + ((ν - p + 1) * Ψ[i, j]^2 + (ν - p - 1) * Ψ[i, i] * Ψ[j, j]) end # ----------------------------------------------------------------------------- @@ -113,7 +113,7 @@ function invwishart_logc0(df::Real, Ψ::AbstractPDMat) p = size(Ψ, 1) logdet_Ψ, _df = promote(logdet(Ψ), df) h_df = _df / 2 - -h_df * (p * oftype(logdet_Ψ, logtwo) - logdet_Ψ) - logmvgamma(p, h_df) + return -h_df * (p * oftype(logdet_Ψ, logtwo) - logdet_Ψ) - logmvgamma(p, h_df) end function logkernel(d::InverseWishart, X::AbstractMatrix) @@ -122,7 +122,7 @@ function logkernel(d::InverseWishart, X::AbstractMatrix) Xcf = cholesky(X) # we use the fact: tr(Ψ * inv(X)) = tr(inv(X) * Ψ) = tr(X \ Ψ) Ψ = Matrix(d.Ψ) - -0.5 * ((df + p + 1) * logdet(Xcf) + tr(Xcf \ Ψ)) + return -0.5 * ((df + p + 1) * logdet(Xcf) + tr(Xcf \ Ψ)) end # ----------------------------------------------------------------------------- diff --git a/src/matrix/lkj.jl b/src/matrix/lkj.jl index 3ff40d1d76..433f611822 100644 --- a/src/matrix/lkj.jl +++ b/src/matrix/lkj.jl @@ -22,7 +22,7 @@ If ``\\eta = 1``, then the LKJ distribution is uniform over if a Cholesky factor of the correlation matrix is desired, it is more efficient to use [`LKJCholesky`](@ref), which avoids factorizing the matrix. """ -struct LKJ{T<:Real,D<:Integer} <: ContinuousMatrixDistribution +struct LKJ{T <: Real, D <: Integer} <: ContinuousMatrixDistribution d::D η::T logc0::T @@ -40,7 +40,7 @@ function LKJ(d::Integer, η::Real; check_args::Bool = true) ) logc0 = lkj_logc0(d, η) T = Base.promote_eltype(η, logc0) - LKJ{T,typeof(d)}(d, T(η), T(logc0)) + return LKJ{T, typeof(d)}(d, T(η), T(logc0)) end # ----------------------------------------------------------------------------- @@ -53,13 +53,13 @@ show(io::IO, d::LKJ) = show_multline(io, d, [(:d, d.d), (:η, d.η)]) # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{LKJ{T}}, d::LKJ) where {T<:Real} - LKJ{T,typeof(d.d)}(d.d, T(d.η), T(d.logc0)) +function convert(::Type{LKJ{T}}, d::LKJ) where {T <: Real} + return LKJ{T, typeof(d.d)}(d.d, T(d.η), T(d.logc0)) end -Base.convert(::Type{LKJ{T}}, d::LKJ{T}) where {T<:Real} = d +Base.convert(::Type{LKJ{T}}, d::LKJ{T}) where {T <: Real} = d -function convert(::Type{LKJ{T}}, d::Integer, η, logc0) where {T<:Real} - LKJ{T,typeof(d)}(d, T(η), T(logc0)) +function convert(::Type{LKJ{T}}, d::Integer, η, logc0) where {T <: Real} + return LKJ{T, typeof(d)}(d, T(η), T(logc0)) end # ----------------------------------------------------------------------------- @@ -89,12 +89,12 @@ function var(lkj::LKJ) d = lkj.d d > 1 || return zeros(d, d) σ² = var(_marginal(lkj)) - σ² * (ones(partype(lkj), d, d) - I) + return σ² * (ones(partype(lkj), d, d) - I) end params(d::LKJ) = (d.d, d.η) -@inline partype(d::LKJ{T}) where {T<:Real} = T +@inline partype(d::LKJ{T}) where {T <: Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -122,7 +122,7 @@ logkernel(d::LKJ, R::AbstractMatrix) = (d.η - 1) * logdet(R) # ----------------------------------------------------------------------------- function _rand!(rng::AbstractRNG, d::LKJ, R::AbstractMatrix) - R .= _lkj_onion_sampler(d.d, d.η, rng) + return R .= _lkj_onion_sampler(d.d, d.η, rng) end function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.default_rng()) @@ -135,7 +135,7 @@ function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.defa R[1, 2] = 2u - 1 R[2, 1] = R[1, 2] # 2. - for k = 2:(d-1) + for k in 2:(d - 1) # (a) β -= 0.5 # (b) @@ -148,8 +148,8 @@ function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.defa A = cholesky(R[1:k, 1:k]).L z = A * w # (e) - R[1:k, k+1] = z - R[k+1, 1:k] = z' + R[1:k, k + 1] = z + R[k + 1, 1:k] = z' end # 3. return R @@ -163,7 +163,7 @@ function _marginal(lkj::LKJ) d = lkj.d η = lkj.η α = η + 0.5d - 1 - AffineDistribution(-1, 2, Beta(α, α)) + return AffineDistribution(-1, 2, Beta(α, α)) end # ----------------------------------------------------------------------------- @@ -196,31 +196,31 @@ function lkj_onion_loginvconst(d::Integer, η::Real) loginvconst = (2 * η + d - 3) * T(logtwo) + (T(logπ) / 4) * (d * (d - 1) - 2) + logbeta(α, α) - (d - 2) * loggamma(η + h * (d - 1)) - for k = 2:(d-1) + for k in 2:(d - 1) loginvconst += loggamma(η + h * (d - 1 - k)) end return loginvconst end -function lkj_onion_loginvconst_uniform_odd(d::Integer, ::Type{T}) where {T<:Real} +function lkj_onion_loginvconst_uniform_odd(d::Integer, ::Type{T}) where {T <: Real} # Theorem 5 in LKJ (2009 JMA) h = T(1 // 2) loginvconst = (d - 1) * ((d + 1) * (T(logπ) / 4) - (d - 1) * (T(logtwo) / 4) - loggamma(h * (d + 1))) - for k = 2:2:(d-1) + for k in 2:2:(d - 1) loginvconst += loggamma(T(k)) end return loginvconst end -function lkj_onion_loginvconst_uniform_even(d::Integer, ::Type{T}) where {T<:Real} +function lkj_onion_loginvconst_uniform_even(d::Integer, ::Type{T}) where {T <: Real} # Theorem 5 in LKJ (2009 JMA) h = T(1 // 2) loginvconst = d * ((d - 2) * (T(logπ) / 4) + (3 * d - 4) * (T(logtwo) / 4) + loggamma(h * d)) - (d - 1) * loggamma(T(d)) - for k = 2:2:(d-2) + for k in 2:2:(d - 2) loginvconst += loggamma(k) end return loginvconst @@ -230,7 +230,7 @@ function lkj_vine_loginvconst(d::Integer, η::Real) # Equation (16) in LKJ (2009 JMA) expsum = zero(η) betasum = zero(η) - for k = 1:(d-1) + for k in 1:(d - 1) α = η + 0.5(d - k - 1) expsum += (2η - 2 + d - k) * (d - k) betasum += (d - k) * logbeta(α, α) @@ -243,7 +243,7 @@ function lkj_vine_loginvconst_uniform(d::Integer) # Equation after (16) in LKJ (2009 JMA) expsum = 0.0 betasum = 0.0 - for k = 1:(d-1) + for k in 1:(d - 1) α = (k + 1) / 2 expsum += k^2 betasum += k * logbeta(α, α) @@ -255,7 +255,7 @@ end function lkj_loginvconst_alt(d::Integer, η::Real) # Third line in first proof of Section 3.3 in LKJ (2009 JMA) loginvconst = zero(η) - for k = 1:(d-1) + for k in 1:(d - 1) loginvconst += 0.5k * logπ + loggamma(η + 0.5(d - 1 - k)) - loggamma(η + 0.5(d - 1)) end return loginvconst @@ -264,7 +264,7 @@ end function corr_logvolume(n::Integer) # https://doi.org/10.4169/amer.math.monthly.123.9.909 logvol = 0.0 - for k = 1:(n-1) + for k in 1:(n - 1) logvol += 0.5k * logπ + k * loggamma((k + 1) / 2) - k * loggamma((k + 2) / 2) end return logvol diff --git a/src/matrix/matrixbeta.jl b/src/matrix/matrixbeta.jl index 1c694c1be9..1a0c4e9d7f 100644 --- a/src/matrix/matrixbeta.jl +++ b/src/matrix/matrixbeta.jl @@ -25,7 +25,7 @@ are independent, and we use ``\\mathcal{L}(\\cdot)`` to denote the lower Cholesk has ``\\mathbf{U}\\sim \\textrm{MB}_p(n_1/2, n_2/2)``. """ -struct MatrixBeta{T<:Real,TW} <: ContinuousMatrixDistribution +struct MatrixBeta{T <: Real, TW} <: ContinuousMatrixDistribution W1::TW W2::TW logc0::T @@ -42,7 +42,7 @@ function MatrixBeta(p::Int, n1::Real, n2::Real) Ip = ScalMat(p, one(T)) W1 = Wishart(T(n1), Ip) W2 = Wishart(T(n2), Ip) - MatrixBeta{T,typeof(W1)}(W1, W2, T(logc0)) + return MatrixBeta{T, typeof(W1)}(W1, W2, T(logc0)) end # ----------------------------------------------------------------------------- @@ -55,17 +55,17 @@ show(io::IO, d::MatrixBeta) = show_multline(io, d, [(:n1, d.W1.df), (:n2, d.W2.d # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixBeta{T}}, d::MatrixBeta) where {T<:Real} +function convert(::Type{MatrixBeta{T}}, d::MatrixBeta) where {T <: Real} W1 = convert(Wishart{T}, d.W1) W2 = convert(Wishart{T}, d.W2) - MatrixBeta{T,typeof(W1)}(W1, W2, T(d.logc0)) + return MatrixBeta{T, typeof(W1)}(W1, W2, T(d.logc0)) end -Base.convert(::Type{MatrixBeta{T}}, d::MatrixBeta{T}) where {T<:Real} = d +Base.convert(::Type{MatrixBeta{T}}, d::MatrixBeta{T}) where {T <: Real} = d -function convert(::Type{MatrixBeta{T}}, W1::Wishart, W2::Wishart, logc0) where {T<:Real} +function convert(::Type{MatrixBeta{T}}, W1::Wishart, W2::Wishart, logc0) where {T <: Real} WW1 = convert(Wishart{T}, W1) WW2 = convert(Wishart{T}, W2) - MatrixBeta{T,typeof(WW1)}(WW1, WW2, T(logc0)) + return MatrixBeta{T, typeof(WW1)}(WW1, WW2, T(logc0)) end # ----------------------------------------------------------------------------- @@ -83,24 +83,24 @@ params(d::MatrixBeta) = (size(d, 1), d.W1.df, d.W2.df) mean(d::MatrixBeta) = ((p, n1, n2) = params(d); Matrix((n1 / (n1 + n2)) * I, p, p)) -@inline partype(d::MatrixBeta{T}) where {T<:Real} = T +@inline partype(d::MatrixBeta{T}) where {T <: Real} = T # Konno (1988 JJSS) Corollary 3.3.i function cov(d::MatrixBeta, i::Integer, j::Integer, k::Integer, l::Integer) p, n1, n2 = params(d) n = n1 + n2 Ω = Matrix{partype(d)}(I, p, p) - n1 * - n2 * - inv(n * (n - 1) * (n + 2)) * - (-(2 / n) * Ω[i, j] * Ω[k, l] + Ω[j, l] * Ω[i, k] + Ω[i, l] * Ω[k, j]) + return n1 * + n2 * + inv(n * (n - 1) * (n + 2)) * + (-(2 / n) * Ω[i, j] * Ω[k, l] + Ω[j, l] * Ω[i, k] + Ω[i, l] * Ω[k, j]) end function var(d::MatrixBeta, i::Integer, j::Integer) p, n1, n2 = params(d) n = n1 + n2 Ω = Matrix{partype(d)}(I, p, p) - n1 * n2 * inv(n * (n - 1) * (n + 2)) * ((1 - (2 / n)) * Ω[i, j]^2 + Ω[j, j] * Ω[i, i]) + return n1 * n2 * inv(n * (n - 1) * (n + 2)) * ((1 - (2 / n)) * Ω[i, j]^2 + Ω[j, j] * Ω[i, i]) end # ----------------------------------------------------------------------------- @@ -114,7 +114,7 @@ end function logkernel(d::MatrixBeta, U::AbstractMatrix) p, n1, n2 = params(d) - ((n1 - p - 1) / 2) * logdet(U) + ((n2 - p - 1) / 2) * logdet(I - U) + return ((n1 - p - 1) / 2) * logdet(U) + ((n2 - p - 1) / 2) * logdet(I - U) end # ----------------------------------------------------------------------------- @@ -128,7 +128,7 @@ function _rand!(rng::AbstractRNG, d::MatrixBeta, A::AbstractMatrix) S2 = PDMat(rand(rng, d.W2)) S = S1 + S2 invL = Matrix(inv(S.chol.L)) - A .= X_A_Xt(S1, invL) + return A .= X_A_Xt(S1, invL) end # ----------------------------------------------------------------------------- diff --git a/src/matrix/matrixfdist.jl b/src/matrix/matrixfdist.jl index 6fa9f64616..9eb8bc1df4 100644 --- a/src/matrix/matrixfdist.jl +++ b/src/matrix/matrixfdist.jl @@ -30,7 +30,7 @@ is given by then the marginal distribution of ``\\boldsymbol{\\Sigma}`` is ``\\textrm{MF}_{p}(n_1/2,n_2/2,\\mathbf{B})``. """ -struct MatrixFDist{T<:Real,TW<:Wishart} <: ContinuousMatrixDistribution +struct MatrixFDist{T <: Real, TW <: Wishart} <: ContinuousMatrixDistribution W::TW n2::T logc0::T @@ -50,10 +50,10 @@ function MatrixFDist(n1::Real, n2::Real, B::AbstractPDMat) T = Base.promote_eltype(n1, n2, logc0, B) prom_B = convert(AbstractArray{T}, B) W = Wishart(T(n1), prom_B) - MatrixFDist{T,typeof(W)}(W, T(n2), T(logc0)) + return MatrixFDist{T, typeof(W)}(W, T(n2), T(logc0)) end -MatrixFDist(n1::Real, n2::Real, B::Union{AbstractMatrix,LinearAlgebra.Cholesky}) = +MatrixFDist(n1::Real, n2::Real, B::Union{AbstractMatrix, LinearAlgebra.Cholesky}) = MatrixFDist(n1, n2, PDMat(B)) # ----------------------------------------------------------------------------- @@ -67,15 +67,15 @@ show(io::IO, d::MatrixFDist) = # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixFDist{T}}, d::MatrixFDist) where {T<:Real} +function convert(::Type{MatrixFDist{T}}, d::MatrixFDist) where {T <: Real} W = convert(Wishart{T}, d.W) - MatrixFDist{T,typeof(W)}(W, T(d.n2), T(d.logc0)) + return MatrixFDist{T, typeof(W)}(W, T(d.n2), T(d.logc0)) end -Base.convert(::Type{MatrixFDist{T}}, d::MatrixFDist{T}) where {T<:Real} = d +Base.convert(::Type{MatrixFDist{T}}, d::MatrixFDist{T}) where {T <: Real} = d -function convert(::Type{MatrixFDist{T}}, W::Wishart, n2, logc0) where {T<:Real} +function convert(::Type{MatrixFDist{T}}, W::Wishart, n2, logc0) where {T <: Real} WW = convert(Wishart{T}, W) - MatrixFDist{T,typeof(WW)}(WW, T(n2), T(logc0)) + return MatrixFDist{T, typeof(WW)}(WW, T(n2), T(logc0)) end # ----------------------------------------------------------------------------- @@ -98,7 +98,7 @@ function mean(d::MatrixFDist) return (n1 / (n2 - p - 1)) * Matrix(B) end -@inline partype(d::MatrixFDist{T}) where {T<:Real} = T +@inline partype(d::MatrixFDist{T}) where {T <: Real} = T # Konno (1988 JJSS) Corollary 2.4.i function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) @@ -107,10 +107,10 @@ function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) n2 > p + 3 || throw(ArgumentError("cov only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) - n1 * - (n - p - 1) * - inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * - (2inv(n2 - p - 1) * B[i, j] * B[k, l] + B[j, l] * B[i, k] + B[i, l] * B[k, j]) + return n1 * + (n - p - 1) * + inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * + (2inv(n2 - p - 1) * B[i, j] * B[k, l] + B[j, l] * B[i, k] + B[i, l] * B[k, j]) end function var(d::MatrixFDist, i::Integer, j::Integer) @@ -119,10 +119,10 @@ function var(d::MatrixFDist, i::Integer, j::Integer) n2 > p + 3 || throw(ArgumentError("var only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) - n1 * - (n - p - 1) * - inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * - ((2inv(n2 - p - 1) + 1) * B[i, j]^2 + B[j, j] * B[i, i]) + return n1 * + (n - p - 1) * + inv((n2 - p) * (n2 - p - 1) * (n2 - p - 3)) * + ((2inv(n2 - p - 1) + 1) * B[i, j]^2 + B[j, j] * B[i, i]) end # ----------------------------------------------------------------------------- @@ -140,7 +140,7 @@ end function logkernel(d::MatrixFDist, Σ::AbstractMatrix) p = size(d, 1) n1, n2, B = params(d) - ((n1 - p - 1) / 2) * logdet(Σ) - ((n1 + n2) / 2) * logdet(pdadd(Σ, B)) + return ((n1 - p - 1) / 2) * logdet(Σ) - ((n1 + n2) / 2) * logdet(pdadd(Σ, B)) end # ----------------------------------------------------------------------------- @@ -149,7 +149,7 @@ end function _rand!(rng::AbstractRNG, d::MatrixFDist, A::AbstractMatrix) Ψ = rand(rng, d.W) - A .= rand(rng, InverseWishart(d.n2, Ψ)) + return A .= rand(rng, InverseWishart(d.n2, Ψ)) end # ----------------------------------------------------------------------------- diff --git a/src/matrix/matrixnormal.jl b/src/matrix/matrixnormal.jl index 38e9c421d0..32393ded1b 100644 --- a/src/matrix/matrixnormal.jl +++ b/src/matrix/matrixnormal.jl @@ -17,8 +17,8 @@ f(\\mathbf{X};\\mathbf{M}, \\mathbf{U}, \\mathbf{V}) = \\frac{\\exp\\left( -\\fr ``\\mathbf{X}\\sim \\textrm{MN}_{n,p}(\\mathbf{M},\\mathbf{U},\\mathbf{V})`` if and only if ``\\text{vec}(\\mathbf{X})\\sim \\textrm{N}(\\text{vec}(\\mathbf{M}),\\mathbf{V}\\otimes\\mathbf{U})``. """ -struct MatrixNormal{T<:Real,TM<:AbstractMatrix,TU<:AbstractPDMat,TV<:AbstractPDMat} <: - ContinuousMatrixDistribution +struct MatrixNormal{T <: Real, TM <: AbstractMatrix, TU <: AbstractPDMat, TV <: AbstractPDMat} <: + ContinuousMatrixDistribution M::TM U::TU V::TV @@ -30,10 +30,10 @@ end # ----------------------------------------------------------------------------- function MatrixNormal( - M::AbstractMatrix{T}, - U::AbstractPDMat{T}, - V::AbstractPDMat{T}, -) where {T<:Real} + M::AbstractMatrix{T}, + U::AbstractPDMat{T}, + V::AbstractPDMat{T}, + ) where {T <: Real} n, p = size(M) n == size(U, 1) || throw(ArgumentError("Number of rows of M must equal dim of U.")) p == size(V, 1) || throw(ArgumentError("Number of columns of M must equal dim of V.")) @@ -42,7 +42,7 @@ function MatrixNormal( prom_M = convert(AbstractArray{R}, M) prom_U = convert(AbstractArray{R}, U) prom_V = convert(AbstractArray{R}, V) - MatrixNormal{R,typeof(prom_M),typeof(prom_U),typeof(prom_V)}( + return MatrixNormal{R, typeof(prom_M), typeof(prom_U), typeof(prom_V)}( prom_M, prom_U, prom_V, @@ -52,7 +52,7 @@ end function MatrixNormal(M::AbstractMatrix, U::AbstractPDMat, V::AbstractPDMat) T = Base.promote_eltype(M, U, V) - MatrixNormal( + return MatrixNormal( convert(AbstractArray{T}, M), convert(AbstractArray{T}, U), convert(AbstractArray{T}, V), @@ -61,18 +61,18 @@ end MatrixNormal( M::AbstractMatrix, - U::Union{AbstractMatrix,LinearAlgebra.Cholesky}, - V::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + U::Union{AbstractMatrix, LinearAlgebra.Cholesky}, + V::Union{AbstractMatrix, LinearAlgebra.Cholesky}, ) = MatrixNormal(M, PDMat(U), PDMat(V)) MatrixNormal( M::AbstractMatrix, - U::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + U::Union{AbstractMatrix, LinearAlgebra.Cholesky}, V::AbstractPDMat, ) = MatrixNormal(M, PDMat(U), V) MatrixNormal( M::AbstractMatrix, U::AbstractPDMat, - V::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + V::Union{AbstractMatrix, LinearAlgebra.Cholesky}, ) = MatrixNormal(M, U, PDMat(V)) MatrixNormal(m::Int, n::Int) = @@ -89,25 +89,25 @@ show(io::IO, d::MatrixNormal) = # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixNormal{T}}, d::MatrixNormal) where {T<:Real} +function convert(::Type{MatrixNormal{T}}, d::MatrixNormal) where {T <: Real} MM = convert(AbstractArray{T}, d.M) UU = convert(AbstractArray{T}, d.U) VV = convert(AbstractArray{T}, d.V) - MatrixNormal{T,typeof(MM),typeof(UU),typeof(VV)}(MM, UU, VV, T(d.logc0)) + return MatrixNormal{T, typeof(MM), typeof(UU), typeof(VV)}(MM, UU, VV, T(d.logc0)) end -Base.convert(::Type{MatrixNormal{T}}, d::MatrixNormal{T}) where {T<:Real} = d +Base.convert(::Type{MatrixNormal{T}}, d::MatrixNormal{T}) where {T <: Real} = d function convert( - ::Type{MatrixNormal{T}}, - M::AbstractMatrix, - U::AbstractPDMat, - V::AbstractPDMat, - logc0, -) where {T<:Real} + ::Type{MatrixNormal{T}}, + M::AbstractMatrix, + U::AbstractPDMat, + V::AbstractPDMat, + logc0, + ) where {T <: Real} MM = convert(AbstractArray{T}, M) UU = convert(AbstractArray{T}, U) VV = convert(AbstractArray{T}, V) - MatrixNormal{T,typeof(MM),typeof(UU),typeof(VV)}(MM, UU, VV, T(logc0)) + return MatrixNormal{T, typeof(MM), typeof(UU), typeof(VV)}(MM, UU, VV, T(logc0)) end # ----------------------------------------------------------------------------- @@ -132,7 +132,7 @@ var(d::MatrixNormal) = reshape(diag(cov(d)), size(d)) params(d::MatrixNormal) = (d.M, d.U, d.V) -@inline partype(d::MatrixNormal{T}) where {T<:Real} = T +@inline partype(d::MatrixNormal{T}) where {T <: Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -141,12 +141,12 @@ params(d::MatrixNormal) = (d.M, d.U, d.V) function matrixnormal_logc0(U::AbstractPDMat, V::AbstractPDMat) n = size(U, 1) p = size(V, 1) - -(n * p / 2) * (logtwo + logπ) - (n / 2) * logdet(V) - (p / 2) * logdet(U) + return -(n * p / 2) * (logtwo + logπ) - (n / 2) * logdet(V) - (p / 2) * logdet(U) end function logkernel(d::MatrixNormal, X::AbstractMatrix) A = X - d.M - -0.5 * tr((d.V \ A') * (d.U \ A)) + return -0.5 * tr((d.V \ A') * (d.U \ A)) end # ----------------------------------------------------------------------------- @@ -160,7 +160,7 @@ function _rand!(rng::AbstractRNG, d::MatrixNormal, Y::AbstractMatrix) X = randn(rng, n, p) A = cholesky(d.U).L B = cholesky(d.V).U - Y .= d.M .+ A * X * B + return Y .= d.M .+ A * X * B end # ----------------------------------------------------------------------------- diff --git a/src/matrix/matrixtdist.jl b/src/matrix/matrixtdist.jl index 85beb7fc1b..bdb4736bfa 100644 --- a/src/matrix/matrixtdist.jl +++ b/src/matrix/matrixtdist.jl @@ -35,8 +35,8 @@ is given by then the marginal distribution of ``\\mathbf{X}`` is ``\\textrm{MT}_{n,p}(\\nu,\\mathbf{M},\\boldsymbol{\\Sigma},\\boldsymbol{\\Omega})``. """ -struct MatrixTDist{T<:Real,TM<:AbstractMatrix,TΣ<:AbstractPDMat,TΩ<:AbstractPDMat} <: - ContinuousMatrixDistribution +struct MatrixTDist{T <: Real, TM <: AbstractMatrix, TΣ <: AbstractPDMat, TΩ <: AbstractPDMat} <: + ContinuousMatrixDistribution ν::T M::TM Σ::TΣ @@ -49,11 +49,11 @@ end # ----------------------------------------------------------------------------- function MatrixTDist( - ν::T, - M::AbstractMatrix{T}, - Σ::AbstractPDMat{T}, - Ω::AbstractPDMat{T}, -) where {T<:Real} + ν::T, + M::AbstractMatrix{T}, + Σ::AbstractPDMat{T}, + Ω::AbstractPDMat{T}, + ) where {T <: Real} n, p = size(M) 0 < ν < Inf || throw(ArgumentError("degrees of freedom must be positive and finite.")) n == size(Σ, 1) || throw(ArgumentError("Number of rows of M must equal dim of Σ.")) @@ -63,7 +63,7 @@ function MatrixTDist( prom_M = convert(AbstractArray{R}, M) prom_Σ = convert(AbstractArray{R}, Σ) prom_Ω = convert(AbstractArray{R}, Ω) - MatrixTDist{R,typeof(prom_M),typeof(prom_Σ),typeof(prom_Ω)}( + return MatrixTDist{R, typeof(prom_M), typeof(prom_Σ), typeof(prom_Ω)}( R(ν), prom_M, prom_Σ, @@ -74,7 +74,7 @@ end function MatrixTDist(ν::Real, M::AbstractMatrix, Σ::AbstractPDMat, Ω::AbstractPDMat) T = Base.promote_eltype(ν, M, Σ, Ω) - MatrixTDist( + return MatrixTDist( convert(T, ν), convert(AbstractArray{T}, M), convert(AbstractArray{T}, Σ), @@ -85,19 +85,19 @@ end MatrixTDist( ν::Real, M::AbstractMatrix, - Σ::Union{AbstractMatrix,LinearAlgebra.Cholesky}, - Ω::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + Σ::Union{AbstractMatrix, LinearAlgebra.Cholesky}, + Ω::Union{AbstractMatrix, LinearAlgebra.Cholesky}, ) = MatrixTDist(ν, M, PDMat(Σ), PDMat(Ω)) MatrixTDist( ν::Real, M::AbstractMatrix, Σ::AbstractPDMat, - Ω::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + Ω::Union{AbstractMatrix, LinearAlgebra.Cholesky}, ) = MatrixTDist(ν, M, Σ, PDMat(Ω)) MatrixTDist( ν::Real, M::AbstractMatrix, - Σ::Union{AbstractMatrix,LinearAlgebra.Cholesky}, + Σ::Union{AbstractMatrix, LinearAlgebra.Cholesky}, Ω::AbstractPDMat, ) = MatrixTDist(ν, M, PDMat(Σ), Ω) @@ -112,26 +112,26 @@ show(io::IO, d::MatrixTDist) = # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{MatrixTDist{T}}, d::MatrixTDist) where {T<:Real} +function convert(::Type{MatrixTDist{T}}, d::MatrixTDist) where {T <: Real} MM = convert(AbstractArray{T}, d.M) ΣΣ = convert(AbstractArray{T}, d.Σ) ΩΩ = convert(AbstractArray{T}, d.Ω) - MatrixTDist{T,typeof(MM),typeof(ΣΣ),typeof(ΩΩ)}(T(d.ν), MM, ΣΣ, ΩΩ, T(d.logc0)) + return MatrixTDist{T, typeof(MM), typeof(ΣΣ), typeof(ΩΩ)}(T(d.ν), MM, ΣΣ, ΩΩ, T(d.logc0)) end -Base.convert(::Type{MatrixTDist{T}}, d::MatrixTDist{T}) where {T<:Real} = d +Base.convert(::Type{MatrixTDist{T}}, d::MatrixTDist{T}) where {T <: Real} = d function convert( - ::Type{MatrixTDist{T}}, - ν, - M::AbstractMatrix, - Σ::AbstractPDMat, - Ω::AbstractPDMat, - logc0, -) where {T<:Real} + ::Type{MatrixTDist{T}}, + ν, + M::AbstractMatrix, + Σ::AbstractPDMat, + Ω::AbstractPDMat, + logc0, + ) where {T <: Real} MM = convert(AbstractArray{T}, M) ΣΣ = convert(AbstractArray{T}, Σ) ΩΩ = convert(AbstractArray{T}, Ω) - MatrixTDist{T,typeof(MM),typeof(ΣΣ),typeof(ΩΩ)}(T(ν), MM, ΣΣ, ΩΩ, T(logc0)) + return MatrixTDist{T, typeof(MM), typeof(ΣΣ), typeof(ΩΩ)}(T(ν), MM, ΣΣ, ΩΩ, T(logc0)) end # ----------------------------------------------------------------------------- @@ -164,7 +164,7 @@ var(d::MatrixTDist) = params(d::MatrixTDist) = (d.ν, d.M, d.Σ, d.Ω) -@inline partype(d::MatrixTDist{T}) where {T<:Real} = T +@inline partype(d::MatrixTDist{T}) where {T <: Real} = T # ----------------------------------------------------------------------------- # Evaluation @@ -179,13 +179,13 @@ function matrixtdist_logc0(Σ::AbstractPDMat, Ω::AbstractPDMat, ν::Real) term3 = -logmvgamma(p, (ν + p - 1) / 2) term4 = (-n / 2) * logdet(Ω) term5 = (-p / 2) * logdet(Σ) - term1 + term2 + term3 + term4 + term5 + return term1 + term2 + term3 + term4 + term5 end function logkernel(d::MatrixTDist, X::AbstractMatrix) n, p = size(d) A = X - d.M - (-(d.ν + n + p - 1) / 2) * logdet(I + (d.Σ \ A) * (d.Ω \ A')) + return (-(d.ν + n + p - 1) / 2) * logdet(I + (d.Σ \ A) * (d.Ω \ A')) end # ----------------------------------------------------------------------------- @@ -197,7 +197,7 @@ end function _rand!(rng::AbstractRNG, d::MatrixTDist, A::AbstractMatrix) n, p = size(d) S = rand(rng, InverseWishart(d.ν + n - 1, d.Σ)) - A .= rand(rng, MatrixNormal(d.M, S, d.Ω)) + return A .= rand(rng, MatrixNormal(d.M, S, d.Ω)) end # ----------------------------------------------------------------------------- @@ -213,7 +213,7 @@ function MvTDist(MT::MatrixTDist) ArgumentError("Row or col dim of `MatrixTDist` must be 1 to coerce to `MvTDist`"), ) ν, M, Σ, Ω = params(MT) - MvTDist(ν, vec(M), (1 / ν) * kron(Σ, Ω)) + return MvTDist(ν, vec(M), (1 / ν) * kron(Σ, Ω)) end # ----------------------------------------------------------------------------- diff --git a/src/matrix/wishart.jl b/src/matrix/wishart.jl index 68c1127802..6071ffcd5f 100644 --- a/src/matrix/wishart.jl +++ b/src/matrix/wishart.jl @@ -30,7 +30,7 @@ has ``\\mathbf{H}\\sim \\textrm{W}_p(\\nu, \\mathbf{S})``. For non-integer ``\\nu``, Wishart matrices can be generated via the [Bartlett decomposition](https://en.wikipedia.org/wiki/Wishart_distribution#Bartlett_decomposition). """ -struct Wishart{T<:Real,ST<:AbstractPDMat,R<:Integer} <: ContinuousMatrixDistribution +struct Wishart{T <: Real, ST <: AbstractPDMat, R <: Integer} <: ContinuousMatrixDistribution df::T # degree of freedom S::ST # the scale matrix logc0::T # the logarithm of normalizing constant in pdf @@ -42,7 +42,7 @@ end # Constructors # ----------------------------------------------------------------------------- -function Wishart(df::T, S::AbstractPDMat{T}) where {T<:Real} +function Wishart(df::T, S::AbstractPDMat{T}) where {T <: Real} df > 0 || throw(ArgumentError("df must be positive. got $(df).")) p = size(S, 1) singular = df <= p - 1 @@ -56,12 +56,12 @@ function Wishart(df::T, S::AbstractPDMat{T}) where {T<:Real} rnk::Integer = ifelse(singular, df, p) logc0 = wishart_logc0(df, S, rnk) _df, _logc0 = promote(df, logc0) - Wishart{typeof(_df),typeof(S),typeof(rnk)}(_df, S, _logc0, rnk, singular) + return Wishart{typeof(_df), typeof(S), typeof(rnk)}(_df, S, _logc0, rnk, singular) end function Wishart(df::Real, S::AbstractPDMat) T = Base.promote_eltype(df, S) - Wishart(T(df), convert(AbstractArray{T}, S)) + return Wishart(T(df), convert(AbstractArray{T}, S)) end Wishart(df::Real, S::Matrix) = Wishart(df, PDMat(S)) @@ -77,22 +77,22 @@ show(io::IO, d::Wishart) = show_multline(io, d, [(:df, d.df), (:S, d.S)]) # Conversion # ----------------------------------------------------------------------------- -function convert(::Type{Wishart{T}}, d::Wishart) where {T<:Real} +function convert(::Type{Wishart{T}}, d::Wishart) where {T <: Real} P = convert(AbstractArray{T}, d.S) - Wishart{T,typeof(P),typeof(d.rank)}(T(d.df), P, T(d.logc0), d.rank, d.singular) + return Wishart{T, typeof(P), typeof(d.rank)}(T(d.df), P, T(d.logc0), d.rank, d.singular) end -Base.convert(::Type{Wishart{T}}, d::Wishart{T}) where {T<:Real} = d +Base.convert(::Type{Wishart{T}}, d::Wishart{T}) where {T <: Real} = d function convert( - ::Type{Wishart{T}}, - df, - S::AbstractPDMat, - logc0, - rnk, - singular, -) where {T<:Real} + ::Type{Wishart{T}}, + df, + S::AbstractPDMat, + logc0, + rnk, + singular, + ) where {T <: Real} P = convert(AbstractArray{T}, S) - Wishart{T,typeof(P),typeof(rnk)}(T(df), P, T(logc0), rnk, singular) + return Wishart{T, typeof(P), typeof(rnk)}(T(df), P, T(logc0), rnk, singular) end # ----------------------------------------------------------------------------- @@ -113,7 +113,7 @@ size(d::Wishart) = size(d.S) rank(d::Wishart) = d.rank params(d::Wishart) = (d.df, d.S) -@inline partype(d::Wishart{T}) where {T<:Real} = T +@inline partype(d::Wishart{T}) where {T <: Real} = T mean(d::Wishart) = d.df * Matrix(d.S) @@ -128,7 +128,7 @@ function meanlogdet(d::Wishart) p = size(d, 1) v = logdet_S + p * oftype(logdet_S, logtwo) df = oftype(logdet_S, d.df) - for i = 0:(p-1) + for i in 0:(p - 1) v += digamma((df - i) / 2) end return d.singular ? oftype(v, -Inf) : v @@ -156,7 +156,7 @@ end # Evaluation # ----------------------------------------------------------------------------- -function wishart_logc0(df::T, S::AbstractPDMat{T}, rnk::Integer) where {T<:Real} +function wishart_logc0(df::T, S::AbstractPDMat{T}, rnk::Integer) where {T <: Real} p = size(S, 1) if df <= p - 1 return singular_wishart_logc0(p, df, S, rnk) @@ -175,26 +175,26 @@ end # Singular Wishart pdf: Theorem 6 in Uhlig (1994 AoS) function singular_wishart_logc0( - p::Integer, - df::T, - S::AbstractPDMat{T}, - rnk::Integer, -) where {T<:Real} + p::Integer, + df::T, + S::AbstractPDMat{T}, + rnk::Integer, + ) where {T <: Real} logdet_S = logdet(S) h_df = oftype(logdet_S, df) / 2 return -h_df * (logdet_S + p * oftype(logdet_S, logtwo)) - logmvgamma(rnk, h_df) + - ((rnk * (rnk - p)) // 2) * oftype(logdet_S, logπ) + ((rnk * (rnk - p)) // 2) * oftype(logdet_S, logπ) end function singular_wishart_logkernel(d::Wishart, X::AbstractMatrix) p = size(d, 1) r = rank(d) - L = eigvals(Hermitian(X), (p-r+1):p) + L = eigvals(Hermitian(X), (p - r + 1):p) return ((d.df - (p + 1)) * sum(log, L) - tr(d.S \ X)) / 2 end # Nonsingular Wishart pdf -function nonsingular_wishart_logc0(p::Integer, df::T, S::AbstractPDMat{T}) where {T<:Real} +function nonsingular_wishart_logc0(p::Integer, df::T, S::AbstractPDMat{T}) where {T <: Real} logdet_S = logdet(S) h_df = oftype(logdet_S, df) / 2 return -h_df * (logdet_S + p * oftype(logdet_S, logtwo)) - logmvgamma(p, h_df) @@ -213,12 +213,12 @@ function _rand!(rng::AbstractRNG, d::Wishart, A::AbstractMatrix) axes2 = axes(A, 2) r = rank(d) randn!(rng, view(A, :, axes2[1:r])) - fill!(view(A, :, axes2[(r+1):end]), zero(eltype(A))) + fill!(view(A, :, axes2[(r + 1):end]), zero(eltype(A))) else _wishart_genA!(rng, A, d.df) end unwhiten!(d.S, A) - A .= A * A' + return A .= A * A' end function _wishart_genA!(rng::AbstractRNG, A::AbstractMatrix, df::Real) diff --git a/src/matrixvariates.jl b/src/matrixvariates.jl index e17d96fab0..8db1c788b3 100644 --- a/src/matrixvariates.jl +++ b/src/matrixvariates.jl @@ -1,4 +1,3 @@ - ##### Generic methods ##### """ @@ -44,7 +43,7 @@ mean(d::MatrixDistribution) Compute the matrix of element-wise variances for distribution `d`. """ -var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i = 1:n, j = 1:p]) +var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i in 1:n, j in 1:p]) """ cov(d::MatrixDistribution) @@ -55,8 +54,8 @@ function cov(d::MatrixDistribution) M = length(d) V = zeros(partype(d), M, M) iter = CartesianIndices(size(d)) - for el1 = 1:M - for el2 = 1:el1 + for el1 in 1:M + for el2 in 1:el1 i, j = Tuple(iter[el1]) k, l = Tuple(iter[el2]) V[el1, el2] = cov(d, i, j, k, l) @@ -73,7 +72,7 @@ Compute the 4-dimensional array whose `(i, j, k, l)` element is `cov(X[i,j], X[k """ function cov(d::MatrixDistribution, ::Val{false}) n, p = size(d) - [cov(d, i, j, k, l) for i = 1:n, j = 1:p, k = 1:n, l = 1:p] + return [cov(d, i, j, k, l) for i in 1:n, j in 1:p, k in 1:n, l in 1:p] end # pdf & logpdf @@ -89,13 +88,13 @@ check_univariate(d::MatrixDistribution) = ##### Specific distributions ##### for fname in [ - "wishart.jl", - "inversewishart.jl", - "matrixnormal.jl", - "matrixtdist.jl", - "matrixbeta.jl", - "matrixfdist.jl", - "lkj.jl", -] + "wishart.jl", + "inversewishart.jl", + "matrixnormal.jl", + "matrixtdist.jl", + "matrixbeta.jl", + "matrixfdist.jl", + "lkj.jl", + ] include(joinpath("matrix", fname)) end diff --git a/src/mixtures/mixturemodel.jl b/src/mixtures/mixturemodel.jl index fb610782fb..86492ebf5d 100644 --- a/src/mixtures/mixturemodel.jl +++ b/src/mixtures/mixturemodel.jl @@ -11,8 +11,8 @@ All subtypes of `AbstractMixtureModel` should implement the following methods: - `probs(d)`: return a vector of prior probabilities over components. """ -abstract type AbstractMixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution} <: - Distribution{VF,VS} end +abstract type AbstractMixtureModel{VF <: VariateForm, VS <: ValueSupport, C <: Distribution} <: +Distribution{VF, VS} end """ MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Real} @@ -22,24 +22,24 @@ A mixture of distributions, parametrized on: * `C` distribution family of the mixture * `CT` the type for probabilities of the prior """ -struct MixtureModel{VF<:VariateForm,VS<:ValueSupport,C<:Distribution,CT<:Categorical} <: - AbstractMixtureModel{VF,VS,C} +struct MixtureModel{VF <: VariateForm, VS <: ValueSupport, C <: Distribution, CT <: Categorical} <: + AbstractMixtureModel{VF, VS, C} components::Vector{C} prior::CT - function MixtureModel{VF,VS,C}(cs::Vector{C}, pri::CT) where {VF,VS,C,CT} + function MixtureModel{VF, VS, C}(cs::Vector{C}, pri::CT) where {VF, VS, C, CT} length(cs) == ncategories(pri) || error("The number of components does not match the length of prior.") - new{VF,VS,C,CT}(cs, pri) + return new{VF, VS, C, CT}(cs, pri) end end -const UnivariateMixture{S<:ValueSupport,C<:Distribution} = - AbstractMixtureModel{Univariate,S,C} -const MultivariateMixture{S<:ValueSupport,C<:Distribution} = - AbstractMixtureModel{Multivariate,S,C} -const MatrixvariateMixture{S<:ValueSupport,C<:Distribution} = - AbstractMixtureModel{Matrixvariate,S,C} +const UnivariateMixture{S <: ValueSupport, C <: Distribution} = + AbstractMixtureModel{Univariate, S, C} +const MultivariateMixture{S <: ValueSupport, C <: Distribution} = + AbstractMixtureModel{Multivariate, S, C} +const MatrixvariateMixture{S <: ValueSupport, C <: Distribution} = + AbstractMixtureModel{Matrixvariate, S, C} # Interface @@ -48,14 +48,14 @@ const MatrixvariateMixture{S<:ValueSupport,C<:Distribution} = The type of the components of `d`. """ -component_type(d::AbstractMixtureModel{VF,VS,C}) where {VF,VS,C} = C +component_type(d::AbstractMixtureModel{VF, VS, C}) where {VF, VS, C} = C """ components(d::AbstractMixtureModel) Get a list of components of the mixture model `d`. """ -components(d::AbstractMixtureModel) = [component(d, k) for k = 1:ncomponents(d)] +components(d::AbstractMixtureModel) = [component(d, k) for k in 1:ncomponents(d)] """ probs(d::AbstractMixtureModel) @@ -121,7 +121,7 @@ rand!(d::AbstractMixtureModel, r::AbstractArray) Construct a mixture model with a vector of `components` and a `prior` probability vector. If no `prior` is provided then all components will have the same prior probabilities. """ -MixtureModel(components::Vector{C}) where {C<:Distribution} = +MixtureModel(components::Vector{C}) where {C <: Distribution} = MixtureModel(components, Categorical(length(components))) """ @@ -131,38 +131,36 @@ Construct a mixture model with component type ``C``, a vector of parameters for the components given by ``params``, and a prior probability vector. If no `prior` is provided then all components will have the same prior probabilities. """ -function MixtureModel(::Type{C}, params::AbstractArray) where {C<:Distribution} +function MixtureModel(::Type{C}, params::AbstractArray) where {C <: Distribution} components = C[_construct_component(C, a) for a in params] - MixtureModel(components) + return MixtureModel(components) end -function MixtureModel(components::Vector{C}, prior::Categorical) where {C<:Distribution} +function MixtureModel(components::Vector{C}, prior::Categorical) where {C <: Distribution} VF = variate_form(C) VS = value_support(C) - MixtureModel{VF,VS,C}(components, prior) + return MixtureModel{VF, VS, C}(components, prior) end MixtureModel( components::Vector{C}, p::VT, -) where {C<:Distribution,VT<:AbstractVector{<:Real}} = +) where {C <: Distribution, VT <: AbstractVector{<:Real}} = MixtureModel(components, Categorical(p)) -_construct_component(::Type{C}, arg) where {C<:Distribution} = C(arg) -_construct_component(::Type{C}, args::Tuple) where {C<:Distribution} = C(args...) +_construct_component(::Type{C}, arg) where {C <: Distribution} = C(arg) +_construct_component(::Type{C}, args::Tuple) where {C <: Distribution} = C(args...) function MixtureModel( - ::Type{C}, - params::AbstractArray, - p::Vector{T}, -) where {C<:Distribution,T<:Real} + ::Type{C}, + params::AbstractArray, + p::Vector{T}, + ) where {C <: Distribution, T <: Real} components = C[_construct_component(C, a) for a in params] - MixtureModel(components, p) + return MixtureModel(components, p) end - - #### Basic properties """ @@ -194,7 +192,7 @@ function mean(d::MultivariateMixture) K = ncomponents(d) p = probs(d) m = zeros(length(d)) - for i = 1:K + for i in 1:K pi = p[i] if pi > 0.0 c = component(d, i) @@ -215,7 +213,7 @@ function var(d::UnivariateMixture) means = Vector{Float64}(undef, K) m = 0.0 v = 0.0 - for i = 1:K + for i in 1:K pi = p[i] if pi > 0.0 ci = component(d, i) @@ -224,7 +222,7 @@ function var(d::UnivariateMixture) v += pi * var(ci) end end - for i = 1:K + for i in 1:K pi = p[i] if pi > 0.0 v += pi * abs2(means[i] - m) @@ -244,7 +242,7 @@ function cov(d::MultivariateMixture) md = zeros(length(d)) V = zeros(length(d), length(d)) - for i = 1:K + for i in 1:K pi = p[i] if pi > 0.0 c = component(d, i) @@ -252,7 +250,7 @@ function cov(d::MultivariateMixture) axpy!(pi, cov(c), V) end end - for i = 1:K + for i in 1:K pi = p[i] if pi > 0.0 c = component(d, i) @@ -265,7 +263,6 @@ function cov(d::MultivariateMixture) end - #### show function show(io::IO, d::MixtureModel) @@ -273,17 +270,16 @@ function show(io::IO, d::MixtureModel) pr = probs(d) println(io, "MixtureModel{$(component_type(d))}(K = $K)") Ks = min(K, 8) - for i = 1:Ks + for i in 1:Ks @printf(io, "components[%d] (prior = %.4f): ", i, pr[i]) println(io, component(d, i)) end - if Ks < K + return if Ks < K println(io, "The rest are omitted ...") end end - #### Evaluation function insupport(d::AbstractMixtureModel, x::AbstractVector) @@ -350,7 +346,7 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) # in the mean time, add log(prior) to lp and # update the maximum for each sample - for j = 1:n + for j in 1:n lp_i[j] += lpri if lp_i[j] > m[j] m[j] = lp_i[j] @@ -360,16 +356,16 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) end fill!(r, 0.0) - @inbounds for i = 1:K + @inbounds for i in 1:K if p[i] > 0.0 lp_i = view(Lp, :, i) - for j = 1:n + for j in 1:n r[j] += exp(lp_i[j] - m[j]) end end end - @inbounds for j = 1:n + @inbounds for j in 1:n r[j] = log(r[j]) + m[j] end return r @@ -398,47 +394,47 @@ _logpdf!(r::AbstractArray{<:Real}, d::MultivariateMixture, x::AbstractMatrix{<:R function _cwise_pdf1!(r::AbstractVector, d::AbstractMixtureModel, x) K = ncomponents(d) length(r) == K || error("The length of r should match the number of components.") - for i = 1:K + for i in 1:K r[i] = pdf(component(d, i), x) end - r + return r end function _cwise_logpdf1!(r::AbstractVector, d::AbstractMixtureModel, x) K = ncomponents(d) length(r) == K || error("The length of r should match the number of components.") - for i = 1:K + for i in 1:K r[i] = logpdf(component(d, i), x) end - r + return r end function _cwise_pdf!(r::AbstractMatrix, d::AbstractMixtureModel, X) K = ncomponents(d) n = size(X, ndims(X)) size(r) == (n, K) || error("The size of r is incorrect.") - for i = 1:K + for i in 1:K if d isa UnivariateMixture view(r, :, i) .= Base.Fix1(pdf, component(d, i)).(X) else pdf!(view(r, :, i), component(d, i), X) end end - r + return r end function _cwise_logpdf!(r::AbstractMatrix, d::AbstractMixtureModel, X) K = ncomponents(d) n = size(X, ndims(X)) size(r) == (n, K) || error("The size of r is incorrect.") - for i = 1:K + for i in 1:K if d isa UnivariateMixture view(r, :, i) .= Base.Fix1(logpdf, component(d, i)).(X) else logpdf!(view(r, :, i), component(d, i), X) end end - r + return r end componentwise_pdf!(r::AbstractVector, d::UnivariateMixture, x::Real) = _cwise_pdf1!(r, d, x) @@ -481,7 +477,7 @@ function quantile(d::UnivariateMixture{Continuous}, p::Real) ps = probs(d) min_q, max_q = extrema(quantile(component(d, i), p) for (i, pi) in enumerate(ps) if pi > 0) - quantile_bisect(d, p, min_q, max_q) + return quantile_bisect(d, p, min_q, max_q) end # we also implement `median` since `median` is implemented more efficiently than @@ -489,20 +485,20 @@ end function median(d::UnivariateMixture{Continuous}) ps = probs(d) min_q, max_q = extrema(median(component(d, i)) for (i, pi) in enumerate(ps) if pi > 0) - quantile_bisect(d, 1 // 2, min_q, max_q) + return quantile_bisect(d, 1 // 2, min_q, max_q) end ## Sampling -struct MixtureSampler{VF,VS,Sampler} <: Sampleable{VF,VS} +struct MixtureSampler{VF, VS, Sampler} <: Sampleable{VF, VS} csamplers::Vector{Sampler} psampler::AliasTable end -function MixtureSampler(d::MixtureModel{VF,VS}) where {VF,VS} +function MixtureSampler(d::MixtureModel{VF, VS}) where {VF, VS} csamplers = map(sampler, d.components) psampler = sampler(d.prior) - MixtureSampler{VF,VS,eltype(csamplers)}(csamplers, psampler) + return MixtureSampler{VF, VS, eltype(csamplers)}(csamplers, psampler) end Base.length(s::MixtureSampler) = length(first(s.csamplers)) diff --git a/src/mixtures/unigmm.jl b/src/mixtures/unigmm.jl index 355f6f9cae..56099a824a 100644 --- a/src/mixtures/unigmm.jl +++ b/src/mixtures/unigmm.jl @@ -1,26 +1,26 @@ # Univariate Gaussian Mixture Models struct UnivariateGMM{ - VT1<:AbstractVector{<:Real}, - VT2<:AbstractVector{<:Real}, - C<:Categorical, -} <: UnivariateMixture{Continuous,Normal} + VT1 <: AbstractVector{<:Real}, + VT2 <: AbstractVector{<:Real}, + C <: Categorical, + } <: UnivariateMixture{Continuous, Normal} K::Int means::VT1 stds::VT2 prior::C function UnivariateGMM( - ms::VT1, - ss::VT2, - pri::C, - ) where {VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real},C<:Categorical} + ms::VT1, + ss::VT2, + pri::C, + ) where {VT1 <: AbstractVector{<:Real}, VT2 <: AbstractVector{<:Real}, C <: Categorical} K = length(ms) length(ss) == K || throw(DimensionMismatch()) ncategories(pri) == K || error( "The number of categories in pri should be equal to the number of components.", ) - new{VT1,VT2,C}(K, ms, ss, pri) + return new{VT1, VT2, C}(K, ms, ss, pri) end end @@ -41,8 +41,8 @@ rand(rng::AbstractRNG, d::UnivariateGMM) = params(d::UnivariateGMM) = (d.means, d.stds, d.prior) -struct UnivariateGMMSampler{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Real}} <: - Sampleable{Univariate,Continuous} +struct UnivariateGMMSampler{VT1 <: AbstractVector{<:Real}, VT2 <: AbstractVector{<:Real}} <: + Sampleable{Univariate, Continuous} means::VT1 stds::VT2 psampler::AliasTable diff --git a/src/multivariate/dirichlet.jl b/src/multivariate/dirichlet.jl index 7bb749fd1f..f9f7258c74 100644 --- a/src/multivariate/dirichlet.jl +++ b/src/multivariate/dirichlet.jl @@ -20,8 +20,8 @@ Dirichlet(alpha) # Dirichlet distribution with parameter vector alpha Dirichlet(k, a) # Dirichlet distribution with parameter a * ones(k) ``` """ -struct Dirichlet{T<:Real,Ts<:AbstractVector{T},S<:Real} <: - ContinuousMultivariateDistribution +struct Dirichlet{T <: Real, Ts <: AbstractVector{T}, S <: Real} <: + ContinuousMultivariateDistribution alpha::Ts alpha0::T lmnB::S @@ -33,19 +33,19 @@ struct Dirichlet{T<:Real,Ts<:AbstractVector{T},S<:Real} <: ) alpha0 = sum(alpha) lmnB = sum(loggamma, alpha) - loggamma(alpha0) - new{T,typeof(alpha),typeof(lmnB)}(alpha, alpha0, lmnB) + return new{T, typeof(alpha), typeof(lmnB)}(alpha, alpha0, lmnB) end end -function Dirichlet(alpha::AbstractVector{T}; check_args::Bool = true) where {T<:Real} - Dirichlet{T}(alpha; check_args = check_args) +function Dirichlet(alpha::AbstractVector{T}; check_args::Bool = true) where {T <: Real} + return Dirichlet{T}(alpha; check_args = check_args) end function Dirichlet(d::Integer, alpha::Real; check_args::Bool = true) @check_args Dirichlet (d, d > zero(d)) (alpha, alpha > zero(alpha)) return Dirichlet{typeof(alpha)}(Fill(alpha, d); check_args = false) end -struct DirichletCanon{T<:Real,Ts<:AbstractVector{T}} +struct DirichletCanon{T <: Real, Ts <: AbstractVector{T}} alpha::Ts end @@ -54,16 +54,16 @@ length(d::DirichletCanon) = length(d.alpha) Base.eltype(::Type{<:Dirichlet{T}}) where {T} = T #### Conversions -convert(::Type{Dirichlet{T}}, cf::DirichletCanon) where {T<:Real} = +convert(::Type{Dirichlet{T}}, cf::DirichletCanon) where {T <: Real} = Dirichlet(convert(AbstractVector{T}, cf.alpha)) -convert(::Type{Dirichlet{T}}, alpha::AbstractVector{<:Real}) where {T<:Real} = +convert(::Type{Dirichlet{T}}, alpha::AbstractVector{<:Real}) where {T <: Real} = Dirichlet(convert(AbstractVector{T}, alpha)) -convert(::Type{Dirichlet{T}}, d::Dirichlet{<:Real}) where {T<:Real} = +convert(::Type{Dirichlet{T}}, d::Dirichlet{<:Real}) where {T <: Real} = Dirichlet(convert(AbstractVector{T}, d.alpha)) -convert(::Type{Dirichlet{T}}, cf::DirichletCanon{T}) where {T<:Real} = Dirichlet(cf.alpha) -convert(::Type{Dirichlet{T}}, alpha::AbstractVector{T}) where {T<:Real} = Dirichlet(alpha) -convert(::Type{Dirichlet{T}}, d::Dirichlet{T}) where {T<:Real} = d +convert(::Type{Dirichlet{T}}, cf::DirichletCanon{T}) where {T <: Real} = Dirichlet(cf.alpha) +convert(::Type{Dirichlet{T}}, alpha::AbstractVector{T}) where {T <: Real} = Dirichlet(alpha) +convert(::Type{Dirichlet{T}}, d::Dirichlet{T}) where {T <: Real} = d Base.show(io::IO, d::Dirichlet) = show(io, d, (:alpha,)) @@ -72,7 +72,7 @@ Base.show(io::IO, d::Dirichlet) = show(io, d, (:alpha,)) length(d::Dirichlet) = length(d.alpha) mean(d::Dirichlet) = d.alpha .* inv(d.alpha0) params(d::Dirichlet) = (d.alpha,) -@inline partype(::Dirichlet{T}) where {T<:Real} = T +@inline partype(::Dirichlet{T}) where {T <: Real} = T function var(d::Dirichlet) α0 = d.alpha0 @@ -91,14 +91,14 @@ function cov(d::Dirichlet) T = typeof(zero(eltype(α))^2 * c) k = length(α) C = Matrix{T}(undef, k, k) - for j = 1:k + for j in 1:k αj = α[j] αjc = αj * c - for i = 1:(j-1) + for i in 1:(j - 1) @inbounds C[i, j] = C[j, i] end @inbounds C[j, j] = (α0 - αj) * αjc - for i = (j+1):k + for i in (j + 1):k @inbounds C[i, j] = -α[i] * αjc end end @@ -155,23 +155,23 @@ end # sampling function _rand!( - rng::AbstractRNG, - d::Union{Dirichlet,DirichletCanon}, - x::AbstractVector{<:Real}, -) + rng::AbstractRNG, + d::Union{Dirichlet, DirichletCanon}, + x::AbstractVector{<:Real}, + ) for (i, αi) in zip(eachindex(x), d.alpha) @inbounds x[i] = rand(rng, Gamma(αi)) end - lmul!(inv(sum(x)), x) # this returns x + return lmul!(inv(sum(x)), x) # this returns x end function _rand!( - rng::AbstractRNG, - d::Dirichlet{T,<:FillArrays.AbstractFill{T}}, - x::AbstractVector{<:Real}, -) where {T<:Real} + rng::AbstractRNG, + d::Dirichlet{T, <:FillArrays.AbstractFill{T}}, + x::AbstractVector{<:Real}, + ) where {T <: Real} rand!(rng, Gamma(FillArrays.getindex_value(d.alpha)), x) - lmul!(inv(sum(x)), x) # this returns x + return lmul!(inv(sum(x)), x) # this returns x end ####################################### @@ -195,19 +195,19 @@ function suffstats(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}) K = size(P, 1) n = size(P, 2) slogp = zeros(K) - for i = 1:n - for k = 1:K + for i in 1:n + for k in 1:K @inbounds slogp[k] += log(P[k, i]) end end - DirichletStats(slogp, n) + return DirichletStats(slogp, n) end function suffstats( - ::Type{<:Dirichlet}, - P::AbstractMatrix{Float64}, - w::AbstractArray{Float64}, -) + ::Type{<:Dirichlet}, + P::AbstractMatrix{Float64}, + w::AbstractArray{Float64}, + ) K = size(P, 1) n = size(P, 2) if length(w) != n @@ -217,14 +217,14 @@ function suffstats( tw = 0.0 slogp = zeros(K) - for i = 1:n + for i in 1:n @inbounds wi = w[i] tw += wi - for k = 1:K + for k in 1:K @inbounds slogp[k] += log(P[k, i]) * wi end end - DirichletStats(slogp, tw) + return DirichletStats(slogp, tw) end # fit_mle methods @@ -235,7 +235,7 @@ function _dirichlet_mle_init2(μ::Vector{Float64}, γ::Vector{Float64}) K = length(μ) α0 = 0.0 - for k = 1:K + for k in 1:K @inbounds μk = μ[k] @inbounds γk = γ[k] ak = (μk - γk) / (γk - μk * μk) @@ -243,7 +243,7 @@ function _dirichlet_mle_init2(μ::Vector{Float64}, γ::Vector{Float64}) end α0 /= K - lmul!(α0, μ) + return lmul!(α0, μ) end function dirichlet_mle_init(P::AbstractMatrix{Float64}) @@ -257,7 +257,7 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}) μ .*= c γ .*= c - _dirichlet_mle_init2(μ, γ) + return _dirichlet_mle_init2(μ, γ) end function dirichlet_mle_init(P::AbstractMatrix{Float64}, w::AbstractArray{Float64}) @@ -268,10 +268,10 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}, w::AbstractArray{Float64 γ = zeros(K) # E[p^2] tw = 0.0 - for i = 1:n + for i in 1:n @inbounds wi = w[i] tw += wi - for k = 1:K + for k in 1:K pk = P[k, i] @inbounds μ[k] += pk * wi @inbounds γ[k] += pk * pk * wi @@ -282,18 +282,18 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}, w::AbstractArray{Float64 μ .*= c γ .*= c - _dirichlet_mle_init2(μ, γ) + return _dirichlet_mle_init2(μ, γ) end ## Newton-Ralphson algorithm function fit_dirichlet!( - elogp::Vector{Float64}, - α::Vector{Float64}; - maxiter::Int = 25, - tol::Float64 = 1.0e-12, - debug::Bool = false, -) + elogp::Vector{Float64}, + α::Vector{Float64}; + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, + ) # This function directly overrides α K = length(elogp) @@ -321,7 +321,7 @@ function fit_dirichlet!( b = 0.0 iqs = 0.0 - for k = 1:K + for k in 1:K @inbounds ak = α[k] @inbounds g[k] = gk = digam_α0 - digamma(ak) + elogp[k] @inbounds iq[k] = -1.0 / trigamma(ak) @@ -338,7 +338,7 @@ function fit_dirichlet!( # update α - for k = 1:K + for k in 1:K @inbounds α[k] -= (g[k] - b) * iq[k] @inbounds if α[k] < 1.0e-12 α[k] = 1.0e-12 @@ -367,38 +367,38 @@ function fit_dirichlet!( throw(ErrorException("No convergence after $maxiter (maxiter) iterations.")) end - Dirichlet(α) + return Dirichlet(α) end function fit_mle( - ::Type{T}, - P::AbstractMatrix{Float64}; - init::Vector{Float64} = Float64[], - maxiter::Int = 25, - tol::Float64 = 1.0e-12, - debug::Bool = false, -) where {T<:Dirichlet} + ::Type{T}, + P::AbstractMatrix{Float64}; + init::Vector{Float64} = Float64[], + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, + ) where {T <: Dirichlet} α = isempty(init) ? dirichlet_mle_init(P) : init elogp = mean_logp(suffstats(T, P)) - fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) + return fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) end function fit_mle( - ::Type{<:Dirichlet}, - P::AbstractMatrix{Float64}, - w::AbstractArray{Float64}; - init::Vector{Float64} = Float64[], - maxiter::Int = 25, - tol::Float64 = 1.0e-12, - debug::Bool = false, -) + ::Type{<:Dirichlet}, + P::AbstractMatrix{Float64}, + w::AbstractArray{Float64}; + init::Vector{Float64} = Float64[], + maxiter::Int = 25, + tol::Float64 = 1.0e-12, + debug::Bool = false, + ) n = size(P, 2) length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions.")) α = isempty(init) ? dirichlet_mle_init(P, w) : init elogp = mean_logp(suffstats(Dirichlet, P, w)) - fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) + return fit_dirichlet!(elogp, α; maxiter = maxiter, tol = tol, debug = debug) end diff --git a/src/multivariate/dirichletmultinomial.jl b/src/multivariate/dirichletmultinomial.jl index 73c1c08370..54d54cae15 100644 --- a/src/multivariate/dirichletmultinomial.jl +++ b/src/multivariate/dirichletmultinomial.jl @@ -30,7 +30,7 @@ DirichletMultinomial(n, k) # Dirichlet-multinomial distribution with n trials an vector of length k of ones. ``` """ -struct DirichletMultinomial{T<:Real} <: DiscreteMultivariateDistribution +struct DirichletMultinomial{T <: Real} <: DiscreteMultivariateDistribution n::Int α::Vector{T} α0::T @@ -39,12 +39,12 @@ struct DirichletMultinomial{T<:Real} <: DiscreteMultivariateDistribution α0 = sum(abs, α) sum(α) == α0 || throw(ArgumentError("alpha must be a positive vector.")) n > 0 || throw(ArgumentError("n must be a positive integer.")) - new{T}(Int(n), α, α0) + return new{T}(Int(n), α, α0) end end -DirichletMultinomial(n::Integer, α::Vector{T}) where {T<:Real} = +DirichletMultinomial(n::Integer, α::Vector{T}) where {T <: Real} = DirichletMultinomial{T}(n, α) -DirichletMultinomial(n::Integer, α::Vector{T}) where {T<:Integer} = +DirichletMultinomial(n::Integer, α::Vector{T}) where {T <: Integer} = DirichletMultinomial(n, float(α)) DirichletMultinomial(n::Integer, k::Integer) = DirichletMultinomial(n, ones(k)) @@ -60,13 +60,13 @@ params(d::DirichletMultinomial) = (d.n, d.α) # Statistics mean(d::DirichletMultinomial) = d.α .* (d.n / d.α0) -function var(d::DirichletMultinomial{T}) where {T<:Real} +function var(d::DirichletMultinomial{T}) where {T <: Real} v = fill(d.n * (d.n + d.α0) / (1 + d.α0), length(d)) p = d.α / d.α0 for i in eachindex(v) @inbounds v[i] *= p[i] * (1 - p[i]) end - v + return v end function cov(d::DirichletMultinomial{<:Real}) v = var(d) @@ -75,12 +75,12 @@ function cov(d::DirichletMultinomial{<:Real}) for (i, vi) in zip(diagind(c), v) @inbounds c[i] = vi end - c + return c end # Evaluation -function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where {T<:Real} +function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where {T <: Real} k = length(d) length(x) == k || return false for xi in x @@ -88,13 +88,13 @@ function insupport(d::DirichletMultinomial, x::AbstractVector{T}) where {T<:Real end return sum(x) == ntrials(d) end -function _logpdf(d::DirichletMultinomial{S}, x::AbstractVector{T}) where {T<:Real,S<:Real} +function _logpdf(d::DirichletMultinomial{S}, x::AbstractVector{T}) where {T <: Real, S <: Real} c = loggamma(S(d.n + 1)) + loggamma(d.α0) - loggamma(d.n + d.α0) for j in eachindex(x) @inbounds xj, αj = x[j], d.α[j] c += loggamma(xj + αj) - loggamma(xj + 1) - loggamma(αj) end - c + return c end @@ -110,47 +110,47 @@ struct DirichletMultinomialStats <: SufficientStats tw::Float64 DirichletMultinomialStats(n::Int, s::Matrix{Float64}, tw::Real) = new(n, s, Float64(tw)) end -function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}) where {T<:Real} +function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}) where {T <: Real} ns = sum(x, dims = 1) # get ntrials for each observation n = ns[1] # use ntrails from first ob., then check all equal all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k = 1:n, i = 1:m, j = 1:d + @inbounds for k in 1:n, i in 1:m, j in 1:d if x[j, i] >= k s[j, k] += 1.0 end end - DirichletMultinomialStats(n, s, m) + return DirichletMultinomialStats(n, s, m) end function suffstats( - ::Type{<:DirichletMultinomial}, - x::Matrix{T}, - w::Array{Float64}, -) where {T<:Real} + ::Type{<:DirichletMultinomial}, + x::Matrix{T}, + w::Array{Float64}, + ) where {T <: Real} length(w) == size(x, 2) || throw(DimensionMismatch("Inconsistent argument dimensions.")) ns = sum(x, dims = 1) n = ns[1] all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k = 1:n, i = 1:m, j = 1:d + @inbounds for k in 1:n, i in 1:m, j in 1:d if x[j, i] >= k s[j, k] += w[i] end end - DirichletMultinomialStats(n, s, sum(w)) + return DirichletMultinomialStats(n, s, sum(w)) end function fit_mle( - ::Type{<:DirichletMultinomial}, - ss::DirichletMultinomialStats; - tol::Float64 = 1e-8, - maxiter::Int = 1000, -) + ::Type{<:DirichletMultinomial}, + ss::DirichletMultinomialStats; + tol::Float64 = 1.0e-8, + maxiter::Int = 1000, + ) k = size(ss.s, 2) α = ones(size(ss.s, 1)) - rng = 0.0:(k-1) - @inbounds for iter = 1:maxiter + rng = 0.0:(k - 1) + @inbounds for iter in 1:maxiter α_old = copy(α) αsum = sum(α) denom = ss.tw * sum(inv, αsum .+ rng) @@ -161,5 +161,5 @@ function fit_mle( end maximum(abs, α_old - α) < tol && break end - DirichletMultinomial(ss.n, α) + return DirichletMultinomial(ss.n, α) end diff --git a/src/multivariate/jointorderstatistics.jl b/src/multivariate/jointorderstatistics.jl index 25780e589e..167c53bb51 100644 --- a/src/multivariate/jointorderstatistics.jl +++ b/src/multivariate/jointorderstatistics.jl @@ -35,18 +35,18 @@ JointOrderStatistics(Cauchy(), 10, (1, 10)) # joint distribution of only the ex ``` """ struct JointOrderStatistics{ - D<:ContinuousUnivariateDistribution, - R<:Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}}, -} <: ContinuousMultivariateDistribution + D <: ContinuousUnivariateDistribution, + R <: Union{AbstractVector{Int}, Tuple{Int, Vararg{Int}}}, + } <: ContinuousMultivariateDistribution dist::D n::Int ranks::R function JointOrderStatistics( - dist::ContinuousUnivariateDistribution, - n::Int, - ranks::Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}} = Base.OneTo(n); - check_args::Bool = true, - ) + dist::ContinuousUnivariateDistribution, + n::Int, + ranks::Union{AbstractVector{Int}, Tuple{Int, Vararg{Int}}} = Base.OneTo(n); + check_args::Bool = true, + ) @check_args( JointOrderStatistics, (n, n ≥ 1, "`n` must be a positive integer."), @@ -56,7 +56,7 @@ struct JointOrderStatistics{ "`ranks` must be a sorted vector or tuple of unique integers between 1 and `n`.", ), ) - return new{typeof(dist),typeof(ranks)}(dist, n, ranks) + return new{typeof(dist), typeof(ranks)}(dist, n, ranks) end end @@ -65,13 +65,13 @@ _islesseq(x, y) = isless(x, y) || isequal(x, y) function _are_ranks_valid(ranks, n) # this is equivalent to but faster than # issorted(ranks) && allunique(ranks) - !isempty(ranks) && + return !isempty(ranks) && first(ranks) ≥ 1 && last(ranks) ≤ n && issorted(ranks; lt = _islesseq) end function _are_ranks_valid(ranks::AbstractRange, n) - !isempty(ranks) && first(ranks) ≥ 1 && last(ranks) ≤ n && step(ranks) > 0 + return !isempty(ranks) && first(ranks) ≥ 1 && last(ranks) ≤ n && step(ranks) > 0 end length(d::JointOrderStatistics) = length(d.ranks) diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index cb89192736..355b2ea2ee 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -19,28 +19,28 @@ Multinomial(n, k) # Multinomial distribution for n trials with equal probabili # over 1:k ``` """ -struct Multinomial{T<:Real,TV<:AbstractVector{T}} <: DiscreteMultivariateDistribution +struct Multinomial{T <: Real, TV <: AbstractVector{T}} <: DiscreteMultivariateDistribution n::Int p::TV - Multinomial{T,TV}(n::Int, p::TV) where {T<:Real,TV<:AbstractVector{T}} = new{T,TV}(n, p) + Multinomial{T, TV}(n::Int, p::TV) where {T <: Real, TV <: AbstractVector{T}} = new{T, TV}(n, p) end function Multinomial( - n::Integer, - p::AbstractVector{T}; - check_args::Bool = true, -) where {T<:Real} + n::Integer, + p::AbstractVector{T}; + check_args::Bool = true, + ) where {T <: Real} @check_args( Multinomial, (n, n >= 0), (p, isprobvec(p), "p is not a probability vector."), ) - return Multinomial{T,typeof(p)}(n, p) + return Multinomial{T, typeof(p)}(n, p) end function Multinomial(n::Integer, k::Integer; check_args::Bool = true) @check_args Multinomial (n, n >= 0) (k, k >= 1) - return Multinomial{Float64,Vector{Float64}}(round(Int, n), fill(1.0 / k, k)) + return Multinomial{Float64, Vector{Float64}}(round(Int, n), fill(1.0 / k, k)) end # Parameters @@ -51,80 +51,80 @@ probs(d::Multinomial) = d.p ntrials(d::Multinomial) = d.n params(d::Multinomial) = (d.n, d.p) -@inline partype(d::Multinomial{T}) where {T<:Real} = T +@inline partype(d::Multinomial{T}) where {T <: Real} = T ### Conversions -convert(::Type{Multinomial{T,TV}}, d::Multinomial) where {T<:Real,TV<:AbstractVector{T}} = +convert(::Type{Multinomial{T, TV}}, d::Multinomial) where {T <: Real, TV <: AbstractVector{T}} = Multinomial(d.n, TV(d.p)) convert( - ::Type{Multinomial{T,TV}}, - d::Multinomial{T,TV}, -) where {T<:Real,TV<:AbstractVector{T}} = d + ::Type{Multinomial{T, TV}}, + d::Multinomial{T, TV}, +) where {T <: Real, TV <: AbstractVector{T}} = d convert( - ::Type{Multinomial{T,TV}}, + ::Type{Multinomial{T, TV}}, n, p::AbstractVector, -) where {T<:Real,TV<:AbstractVector} = Multinomial(n, TV(p)) -convert(::Type{Multinomial{T}}, d::Multinomial) where {T<:Real} = Multinomial(d.n, T.(d.p)) -convert(::Type{Multinomial{T}}, d::Multinomial{T}) where {T<:Real} = d -convert(::Type{Multinomial{T}}, n, p::AbstractVector) where {T<:Real} = +) where {T <: Real, TV <: AbstractVector} = Multinomial(n, TV(p)) +convert(::Type{Multinomial{T}}, d::Multinomial) where {T <: Real} = Multinomial(d.n, T.(d.p)) +convert(::Type{Multinomial{T}}, d::Multinomial{T}) where {T <: Real} = d +convert(::Type{Multinomial{T}}, n, p::AbstractVector) where {T <: Real} = Multinomial(n, T.(p)) # Statistics mean(d::Multinomial) = d.n .* d.p -function var(d::Multinomial{T}) where {T<:Real} +function var(d::Multinomial{T}) where {T <: Real} p = probs(d) k = length(p) n = ntrials(d) v = Vector{T}(undef, k) - for i = 1:k + for i in 1:k @inbounds p_i = p[i] v[i] = n * p_i * (1 - p_i) end - v + return v end -function cov(d::Multinomial{T}) where {T<:Real} +function cov(d::Multinomial{T}) where {T <: Real} p = probs(d) k = length(p) n = ntrials(d) C = Matrix{T}(undef, k, k) - for j = 1:k + for j in 1:k pj = p[j] - for i = 1:(j-1) + for i in 1:(j - 1) @inbounds C[i, j] = -n * p[i] * pj end @inbounds C[j, j] = n * pj * (1 - pj) end - for j = 1:(k-1) - for i = (j+1):k + for j in 1:(k - 1) + for i in (j + 1):k @inbounds C[i, j] = C[j, i] end end - C + return C end -function mgf(d::Multinomial{T}, t::AbstractVector) where {T<:Real} +function mgf(d::Multinomial{T}, t::AbstractVector) where {T <: Real} p = probs(d) n = ntrials(p) s = zero(T) - for i = 1:length(p) + for i in 1:length(p) s += p[i] * exp(t[i]) end return s^n end -function cf(d::Multinomial{T}, t::AbstractVector) where {T<:Real} +function cf(d::Multinomial{T}, t::AbstractVector) where {T <: Real} p = probs(d) n = ntrials(d) s = zero(Complex{T}) - for i = 1:length(p) + for i in 1:length(p) s += p[i] * exp(im * t[i]) end return s^n @@ -135,7 +135,7 @@ function entropy(d::Multinomial) s = -loggamma(n + 1) + n * entropy(p) for pr in p b = Binomial(n, pr) - for x = 0:n + for x in 0:n s += pdf(b, x) * loggamma(x + 1) end end @@ -145,11 +145,11 @@ end # Evaluation -function insupport(d::Multinomial, x::AbstractVector{T}) where {T<:Real} +function insupport(d::Multinomial, x::AbstractVector{T}) where {T <: Real} k = length(d) length(x) == k || return false s = 0.0 - for i = 1:k + for i in 1:k @inbounds xi = x[i] if !(isinteger(xi) && xi >= 0) return false @@ -159,14 +159,14 @@ function insupport(d::Multinomial, x::AbstractVector{T}) where {T<:Real} return s == ntrials(d) # integer computation would not yield truncation errors end -function _logpdf(d::Multinomial, x::AbstractVector{T}) where {T<:Real} +function _logpdf(d::Multinomial, x::AbstractVector{T}) where {T <: Real} p = probs(d) n = ntrials(d) S = eltype(p) R = promote_type(T, S) insupport(d, x) || return -R(Inf) s = R(loggamma(n + 1)) - for i = 1:length(p) + for i in 1:length(p) @inbounds xi = x[i] @inbounds p_i = p[i] s -= R(loggamma(R(xi) + 1)) @@ -194,14 +194,14 @@ struct MultinomialStats <: SufficientStats MultinomialStats(n::Int, scnts::Vector{Float64}, tw::Real) = new(n, scnts, Float64(tw)) end -function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where {T<:Real} +function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where {T <: Real} K = size(x, 1) n::T = zero(T) scnts = zeros(K) - for j = 1:size(x, 2) + for j in 1:size(x, 2) nj = zero(T) - for i = 1:K + for i in 1:K @inbounds xi = x[i, j] @inbounds scnts[i] += xi nj += xi @@ -213,10 +213,10 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where {T<:Real} error("Each sample in X should sum to the same value.") end end - MultinomialStats(n, scnts, size(x, 2)) + return MultinomialStats(n, scnts, size(x, 2)) end -function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where {T<:Real} +function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where {T <: Real} length(w) == size(x, 2) || throw(DimensionMismatch("Inconsistent argument dimensions.")) K = size(x, 1) @@ -224,11 +224,11 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where scnts = zeros(K) tw = 0.0 - for j = 1:size(x, 2) + for j in 1:size(x, 2) nj = zero(T) @inbounds wj = w[j] tw += wj - for i = 1:K + for i in 1:K @inbounds xi = x[i, j] @inbounds scnts[i] += xi * wj nj += xi @@ -240,7 +240,7 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where error("Each sample in X should sum to the same value.") end end - MultinomialStats(n, scnts, tw) + return MultinomialStats(n, scnts, tw) end fit_mle(::Type{<:Multinomial}, ss::MultinomialStats) = @@ -248,10 +248,10 @@ fit_mle(::Type{<:Multinomial}, ss::MultinomialStats) = function fit_mle(::Type{<:Multinomial}, x::Matrix{<:Real}) ss = suffstats(Multinomial, x) - Multinomial(ss.n, lmul!(inv(ss.tw * ss.n), ss.scnts)) + return Multinomial(ss.n, lmul!(inv(ss.tw * ss.n), ss.scnts)) end function fit_mle(::Type{<:Multinomial}, x::Matrix{<:Real}, w::Array{Float64}) ss = suffstats(Multinomial, x, w) - Multinomial(ss.n, lmul!(inv(ss.tw * ss.n), ss.scnts)) + return Multinomial(ss.n, lmul!(inv(ss.tw * ss.n), ss.scnts)) end diff --git a/src/multivariate/mvlogitnormal.jl b/src/multivariate/mvlogitnormal.jl index f13bf35fca..747c0a02d3 100644 --- a/src/multivariate/mvlogitnormal.jl +++ b/src/multivariate/mvlogitnormal.jl @@ -22,9 +22,9 @@ MvLogitNormal(MvNormalCanon(μ, J)) # MvLogitNormal with y ~ MvNormalCanon(μ, - `normal::AbstractMvNormal`: contains the ``d-1``-dimensional distribution of ``y`` """ -struct MvLogitNormal{D<:AbstractMvNormal} <: ContinuousMultivariateDistribution +struct MvLogitNormal{D <: AbstractMvNormal} <: ContinuousMultivariateDistribution normal::D - MvLogitNormal{D}(normal::D) where {D<:AbstractMvNormal} = new{D}(normal) + MvLogitNormal{D}(normal::D) where {D <: AbstractMvNormal} = new{D}(normal) end MvLogitNormal(d::AbstractMvNormal) = MvLogitNormal{typeof(d)}(d) MvLogitNormal(args...) = MvLogitNormal(MvNormal(args...)) @@ -36,7 +36,7 @@ function Base.show(io::IO, d::MvLogitNormal; indent::String = " ") normstr = replace(normstr, "\n" => "\n$indent") print(io, indent) println(io, normstr) - println(io, ")") + return println(io, ")") end # Conversions @@ -121,8 +121,8 @@ function _softmax1!(x::AbstractMatrix, y::AbstractMatrix) return x end -_drop1(x::AbstractVector) = @views x[firstindex(x, 1):(end-1)] -_drop1(x::AbstractMatrix) = @views x[firstindex(x, 1):(end-1), :] +_drop1(x::AbstractVector) = @views x[firstindex(x, 1):(end - 1)] +_drop1(x::AbstractMatrix) = @views x[firstindex(x, 1):(end - 1), :] _last1(x::AbstractVector) = x[end] _last1(x::AbstractMatrix) = @views x[end, :] diff --git a/src/multivariate/mvlognormal.jl b/src/multivariate/mvlognormal.jl index 198a759456..28434ef33c 100644 --- a/src/multivariate/mvlognormal.jl +++ b/src/multivariate/mvlognormal.jl @@ -25,83 +25,83 @@ abstract type AbstractMvLogNormal <: ContinuousMultivariateDistribution end -function insupport(::Type{D}, x::AbstractVector{T}) where {T<:Real,D<:AbstractMvLogNormal} - for i = 1:length(x) +function insupport(::Type{D}, x::AbstractVector{T}) where {T <: Real, D <: AbstractMvLogNormal} + for i in 1:length(x) @inbounds 0.0 < x[i] < Inf ? continue : (return false) end - true + return true end -insupport(l::AbstractMvLogNormal, x::AbstractVector{T}) where {T<:Real} = +insupport(l::AbstractMvLogNormal, x::AbstractVector{T}) where {T <: Real} = insupport(typeof(l), x) -assertinsupport(::Type{D}, m::AbstractVector) where {D<:AbstractMvLogNormal} = +assertinsupport(::Type{D}, m::AbstractVector) where {D <: AbstractMvLogNormal} = @assert insupport(D, m) "Mean of LogNormal distribution should be strictly positive" ###Internal functions to calculate scale and location for a desired average and covariance function _location!( - ::Type{D}, - ::Type{Val{:meancov}}, - mn::AbstractVector, - S::AbstractMatrix, - μ::AbstractVector, -) where {D<:AbstractMvLogNormal} - @simd for i = 1:length(mn) + ::Type{D}, + ::Type{Val{:meancov}}, + mn::AbstractVector, + S::AbstractMatrix, + μ::AbstractVector, + ) where {D <: AbstractMvLogNormal} + @simd for i in 1:length(mn) @inbounds μ[i] = log(mn[i] / sqrt(1 + S[i, i] / mn[i] / mn[i])) end - μ + return μ end function _scale!( - ::Type{D}, - ::Type{Val{:meancov}}, - mn::AbstractVector, - S::AbstractMatrix, - Σ::AbstractMatrix, -) where {D<:AbstractMvLogNormal} - for j = 1:length(mn) - @simd for i = j:length(mn) + ::Type{D}, + ::Type{Val{:meancov}}, + mn::AbstractVector, + S::AbstractMatrix, + Σ::AbstractMatrix, + ) where {D <: AbstractMvLogNormal} + for j in 1:length(mn) + @simd for i in j:length(mn) @inbounds Σ[i, j] = Σ[j, i] = log(1 + S[j, i] / mn[i] / mn[j]) end end - Σ + return Σ end function _location!( - ::Type{D}, - ::Type{Val{:mean}}, - mn::AbstractVector, - S::AbstractMatrix, - μ::AbstractVector, -) where {D<:AbstractMvLogNormal} - @simd for i = 1:length(mn) + ::Type{D}, + ::Type{Val{:mean}}, + mn::AbstractVector, + S::AbstractMatrix, + μ::AbstractVector, + ) where {D <: AbstractMvLogNormal} + @simd for i in 1:length(mn) @inbounds μ[i] = log(mn[i]) - S[i, i] / 2 end - μ + return μ end function _location!( - ::Type{D}, - ::Type{Val{:median}}, - md::AbstractVector, - S::AbstractMatrix, - μ::AbstractVector, -) where {D<:AbstractMvLogNormal} - @simd for i = 1:length(md) + ::Type{D}, + ::Type{Val{:median}}, + md::AbstractVector, + S::AbstractMatrix, + μ::AbstractVector, + ) where {D <: AbstractMvLogNormal} + @simd for i in 1:length(md) @inbounds μ[i] = log(md[i]) end - μ + return μ end function _location!( - ::Type{D}, - ::Type{Val{:mode}}, - mo::AbstractVector, - S::AbstractMatrix, - μ::AbstractVector, -) where {D<:AbstractMvLogNormal} - @simd for i = 1:length(mo) + ::Type{D}, + ::Type{Val{:mode}}, + mo::AbstractVector, + S::AbstractMatrix, + μ::AbstractVector, + ) where {D <: AbstractMvLogNormal} + @simd for i in 1:length(mo) @inbounds μ[i] = log(mo[i]) + S[i, i] end - μ + return μ end ###Functions to calculate location and scale for a distribution with desired :mean, :median or :mode and covariance @@ -111,15 +111,15 @@ end Calculate the location vector (as above) and store the result in ``μ`` """ function location!( - ::Type{D}, - s::Symbol, - m::AbstractVector, - S::AbstractMatrix, - μ::AbstractVector, -) where {D<:AbstractMvLogNormal} + ::Type{D}, + s::Symbol, + m::AbstractVector, + S::AbstractMatrix, + μ::AbstractVector, + ) where {D <: AbstractMvLogNormal} @assert size(S) == (length(m), length(m)) && length(m) == length(μ) assertinsupport(D, m) - _location!(D, Val{s}, m, S, μ) + return _location!(D, Val{s}, m, S, μ) end """ @@ -137,14 +137,14 @@ It is not possible to analytically calculate the location vector from e.g., medi or from mode + covariance. """ function location( - ::Type{D}, - s::Symbol, - m::AbstractVector, - S::AbstractMatrix, -) where {D<:AbstractMvLogNormal} + ::Type{D}, + s::Symbol, + m::AbstractVector, + S::AbstractMatrix, + ) where {D <: AbstractMvLogNormal} @assert size(S) == (length(m), length(m)) assertinsupport(D, m) - _location!(D, Val{s}, m, S, similar(m)) + return _location!(D, Val{s}, m, S, similar(m)) end """ @@ -153,15 +153,15 @@ end Calculate the scale parameter, as defined for the location parameter above and store the result in `Σ`. """ function scale!( - ::Type{D}, - s::Symbol, - m::AbstractVector, - S::AbstractMatrix, - Σ::AbstractMatrix, -) where {D<:AbstractMvLogNormal} + ::Type{D}, + s::Symbol, + m::AbstractVector, + S::AbstractMatrix, + Σ::AbstractMatrix, + ) where {D <: AbstractMvLogNormal} @assert size(S) == size(Σ) == (length(m), length(m)) assertinsupport(D, m) - _scale!(D, Val{s}, m, S, Σ) + return _scale!(D, Val{s}, m, S, Σ) end """ @@ -170,14 +170,14 @@ end Calculate the scale parameter, as defined for the location parameter above. """ function scale( - ::Type{D}, - s::Symbol, - m::AbstractVector, - S::AbstractMatrix, -) where {D<:AbstractMvLogNormal} + ::Type{D}, + s::Symbol, + m::AbstractVector, + S::AbstractMatrix, + ) where {D <: AbstractMvLogNormal} @assert size(S) == (length(m), length(m)) assertinsupport(D, m) - _scale!(D, Val{s}, m, S, similar(S)) + return _scale!(D, Val{s}, m, S, similar(S)) end """ @@ -191,7 +191,7 @@ params!( S::AbstractMatrix, μ::AbstractVector, Σ::AbstractMatrix, -) where {D<:AbstractMvLogNormal} = +) where {D <: AbstractMvLogNormal} = location!(D, :meancov, m, S, μ), scale!(D, :meancov, m, S, Σ) """ @@ -199,7 +199,7 @@ params!( Return (scale,location) for a given mean and covariance """ -params(::Type{D}, m::AbstractVector, S::AbstractMatrix) where {D<:AbstractMvLogNormal} = +params(::Type{D}, m::AbstractVector, S::AbstractMatrix) where {D <: AbstractMvLogNormal} = params!(D, m, S, similar(m), similar(S)) ######################################################### @@ -223,8 +223,8 @@ Mean vector ``\\boldsymbol{\\mu}`` and covariance matrix ``\\boldsymbol{\\Sigma} underlying normal distribution are known as the *location* and *scale* parameters of the corresponding lognormal distribution. """ -struct MvLogNormal{T<:Real,Cov<:AbstractPDMat,Mean<:AbstractVector} <: AbstractMvLogNormal - normal::MvNormal{T,Cov,Mean} +struct MvLogNormal{T <: Real, Cov <: AbstractPDMat, Mean <: AbstractVector} <: AbstractMvLogNormal + normal::MvNormal{T, Cov, Mean} end # Constructors mirror the ones for MvNormmal @@ -243,18 +243,18 @@ MvLogNormal(d::Int, s::Real) = MvLogNormal(MvNormal(d, s)) Base.eltype(::Type{<:MvLogNormal{T}}) where {T} = T ### Conversion -function convert(::Type{MvLogNormal{T}}, d::MvLogNormal) where {T<:Real} - MvLogNormal(convert(MvNormal{T}, d.normal)) +function convert(::Type{MvLogNormal{T}}, d::MvLogNormal) where {T <: Real} + return MvLogNormal(convert(MvNormal{T}, d.normal)) end -Base.convert(::Type{MvLogNormal{T}}, d::MvLogNormal{T}) where {T<:Real} = d +Base.convert(::Type{MvLogNormal{T}}, d::MvLogNormal{T}) where {T <: Real} = d -function convert(::Type{MvLogNormal{T}}, pars...) where {T<:Real} - MvLogNormal(convert(MvNormal{T}, MvNormal(pars...))) +function convert(::Type{MvLogNormal{T}}, pars...) where {T <: Real} + return MvLogNormal(convert(MvNormal{T}, MvNormal(pars...))) end length(d::MvLogNormal) = length(d.normal) params(d::MvLogNormal) = params(d.normal) -@inline partype(d::MvLogNormal{T}) where {T<:Real} = T +@inline partype(d::MvLogNormal{T}) where {T <: Real} = T """ location(d::MvLogNormal) @@ -303,9 +303,9 @@ function _rand!(rng::AbstractRNG, d::MvLogNormal, x::AbstractVecOrMat{<:Real}) return x end -_logpdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T<:Real} = +_logpdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T <: Real} = insupport(d, x) ? (_logpdf(d.normal, log.(x)) - sum(log.(x))) : -Inf -_pdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T<:Real} = +_pdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T <: Real} = insupport(d, x) ? _pdf(d.normal, log.(x)) / prod(x) : 0.0 Base.show(io::IO, d::MvLogNormal) = diff --git a/src/multivariate/mvnormal.jl b/src/multivariate/mvnormal.jl index abc5dc9b95..f1be847750 100644 --- a/src/multivariate/mvnormal.jl +++ b/src/multivariate/mvnormal.jl @@ -147,10 +147,10 @@ _logpdf(d::AbstractMvNormal, x::AbstractVector) = mvnormal_c0(d) - sqmahal(d, x) function _logpdf!(r::AbstractArray{<:Real}, d::AbstractMvNormal, x::AbstractMatrix{<:Real}) sqmahal!(r, d, x) c0 = mvnormal_c0(d) - for i = 1:size(x, 2) + for i in 1:size(x, 2) @inbounds r[i] = c0 - r[i] / 2 end - r + return r end ########################################################### @@ -168,33 +168,33 @@ Generally, users don't have to worry about these internal details. We provide a common constructor `MvNormal`, which will construct a distribution of appropriate type depending on the input arguments. """ -struct MvNormal{T<:Real,Cov<:AbstractPDMat,Mean<:AbstractVector} <: AbstractMvNormal +struct MvNormal{T <: Real, Cov <: AbstractPDMat, Mean <: AbstractVector} <: AbstractMvNormal μ::Mean Σ::Cov end const MultivariateNormal = MvNormal # for the purpose of backward compatibility -const IsoNormal = MvNormal{Float64,ScalMat{Float64},Vector{Float64}} -const DiagNormal = MvNormal{Float64,PDiagMat{Float64,Vector{Float64}},Vector{Float64}} -const FullNormal = MvNormal{Float64,PDMat{Float64,Matrix{Float64}},Vector{Float64}} +const IsoNormal = MvNormal{Float64, ScalMat{Float64}, Vector{Float64}} +const DiagNormal = MvNormal{Float64, PDiagMat{Float64, Vector{Float64}}, Vector{Float64}} +const FullNormal = MvNormal{Float64, PDMat{Float64, Matrix{Float64}}, Vector{Float64}} -const ZeroMeanIsoNormal{Axes} = MvNormal{Float64,ScalMat{Float64},Zeros{Float64,1,Axes}} +const ZeroMeanIsoNormal{Axes} = MvNormal{Float64, ScalMat{Float64}, Zeros{Float64, 1, Axes}} const ZeroMeanDiagNormal{Axes} = - MvNormal{Float64,PDiagMat{Float64,Vector{Float64}},Zeros{Float64,1,Axes}} + MvNormal{Float64, PDiagMat{Float64, Vector{Float64}}, Zeros{Float64, 1, Axes}} const ZeroMeanFullNormal{Axes} = - MvNormal{Float64,PDMat{Float64,Matrix{Float64}},Zeros{Float64,1,Axes}} + MvNormal{Float64, PDMat{Float64, Matrix{Float64}}, Zeros{Float64, 1, Axes}} ### Construction -function MvNormal(μ::AbstractVector{T}, Σ::AbstractPDMat{T}) where {T<:Real} +function MvNormal(μ::AbstractVector{T}, Σ::AbstractPDMat{T}) where {T <: Real} size(Σ, 1) == length(μ) || throw(DimensionMismatch("The dimensions of mu and Sigma are inconsistent.")) - MvNormal{T,typeof(Σ),typeof(μ)}(μ, Σ) + return MvNormal{T, typeof(Σ), typeof(μ)}(μ, Σ) end function MvNormal(μ::AbstractVector{<:Real}, Σ::AbstractPDMat{<:Real}) R = Base.promote_eltype(μ, Σ) - MvNormal(convert(AbstractArray{R}, μ), convert(AbstractArray{R}, Σ)) + return MvNormal(convert(AbstractArray{R}, μ), convert(AbstractArray{R}, Σ)) end # constructor with general covariance matrix @@ -207,14 +207,14 @@ MvNormal(μ::AbstractVector{<:Real}, Σ::AbstractMatrix{<:Real}) = MvNormal(μ, MvNormal(μ::AbstractVector{<:Real}, Σ::Diagonal{<:Real}) = MvNormal(μ, PDiagMat(Σ.diag)) MvNormal( μ::AbstractVector{<:Real}, - Σ::Union{Symmetric{<:Real,<:Diagonal{<:Real}},Hermitian{<:Real,<:Diagonal{<:Real}}}, + Σ::Union{Symmetric{<:Real, <:Diagonal{<:Real}}, Hermitian{<:Real, <:Diagonal{<:Real}}}, ) = MvNormal(μ, PDiagMat(Σ.data.diag)) MvNormal(μ::AbstractVector{<:Real}, Σ::UniformScaling{<:Real}) = MvNormal(μ, ScalMat(length(μ), Σ.λ)) function MvNormal( - μ::AbstractVector{<:Real}, - Σ::Diagonal{<:Real,<:FillArrays.AbstractFill{<:Real,1}}, -) + μ::AbstractVector{<:Real}, + Σ::Diagonal{<:Real, <:FillArrays.AbstractFill{<:Real, 1}}, + ) return MvNormal(μ, ScalMat(size(Σ, 1), FillArrays.getindex_value(Σ.diag))) end @@ -242,13 +242,13 @@ Base.@deprecate MvNormal(d::Int, σ::Real) MvNormal( Base.eltype(::Type{<:MvNormal{T}}) where {T} = T ### Conversion -function convert(::Type{MvNormal{T}}, d::MvNormal) where {T<:Real} - MvNormal(convert(AbstractArray{T}, d.μ), convert(AbstractArray{T}, d.Σ)) +function convert(::Type{MvNormal{T}}, d::MvNormal) where {T <: Real} + return MvNormal(convert(AbstractArray{T}, d.μ), convert(AbstractArray{T}, d.Σ)) end -Base.convert(::Type{MvNormal{T}}, d::MvNormal{T}) where {T<:Real} = d +Base.convert(::Type{MvNormal{T}}, d::MvNormal{T}) where {T <: Real} = d -function convert(::Type{MvNormal{T}}, μ::AbstractVector, Σ::AbstractPDMat) where {T<:Real} - MvNormal(convert(AbstractArray{T}, μ), convert(AbstractArray{T}, Σ)) +function convert(::Type{MvNormal{T}}, μ::AbstractVector, Σ::AbstractPDMat) where {T <: Real} + return MvNormal(convert(AbstractArray{T}, μ), convert(AbstractArray{T}, Σ)) end ### Show @@ -269,7 +269,7 @@ Base.show(io::IO, d::MvNormal) = length(d::MvNormal) = length(d.μ) mean(d::MvNormal) = d.μ params(d::MvNormal) = (d.μ, d.Σ) -@inline partype(d::MvNormal{T}) where {T<:Real} = T +@inline partype(d::MvNormal{T}) where {T <: Real} = T var(d::MvNormal) = diag(d.Σ) cov(d::MvNormal) = d.Σ @@ -323,7 +323,7 @@ dot(d::MvNormal, b::AbstractVector) = dot(b, d) ### Estimation with known covariance -struct MvNormalKnownCov{Cov<:AbstractPDMat} +struct MvNormalKnownCov{Cov <: AbstractPDMat} Σ::Cov end @@ -333,46 +333,46 @@ MvNormalKnownCov(Σ::Matrix{Float64}) = MvNormalKnownCov(PDMat(Σ)) length(g::MvNormalKnownCov) = size(g.Σ, 1) -struct MvNormalKnownCovStats{Cov<:AbstractPDMat} +struct MvNormalKnownCovStats{Cov <: AbstractPDMat} invΣ::Cov # inverse covariance sx::Vector{Float64} # (weighted) sum of vectors tw::Float64 # sum of weights end function suffstats( - g::MvNormalKnownCov{Cov}, - x::AbstractMatrix{Float64}, -) where {Cov<:AbstractPDMat} + g::MvNormalKnownCov{Cov}, + x::AbstractMatrix{Float64}, + ) where {Cov <: AbstractPDMat} size(x, 1) == length(g) || throw(DimensionMismatch("Invalid argument dimensions.")) invΣ = inv(g.Σ) sx = vec(sum(x, dims = 2)) tw = Float64(size(x, 2)) - MvNormalKnownCovStats{Cov}(invΣ, sx, tw) + return MvNormalKnownCovStats{Cov}(invΣ, sx, tw) end function suffstats( - g::MvNormalKnownCov{Cov}, - x::AbstractMatrix{Float64}, - w::AbstractVector, -) where {Cov<:AbstractPDMat} + g::MvNormalKnownCov{Cov}, + x::AbstractMatrix{Float64}, + w::AbstractVector, + ) where {Cov <: AbstractPDMat} (size(x, 1) == length(g) && size(x, 2) == length(w)) || throw(DimensionMismatch("Inconsistent argument dimensions.")) invΣ = inv(g.Σ) sx = x * vec(w) tw = sum(w) - MvNormalKnownCovStats{Cov}(invΣ, sx, tw) + return MvNormalKnownCovStats{Cov}(invΣ, sx, tw) end ## MLE estimation with covariance known -fit_mle(g::MvNormalKnownCov{C}, ss::MvNormalKnownCovStats{C}) where {C<:AbstractPDMat} = +fit_mle(g::MvNormalKnownCov{C}, ss::MvNormalKnownCovStats{C}) where {C <: AbstractPDMat} = MvNormal(ss.sx * inv(ss.tw), g.Σ) function fit_mle(g::MvNormalKnownCov, x::AbstractMatrix{Float64}) d = length(g) size(x, 1) == d || throw(DimensionMismatch("Invalid argument dimensions.")) μ = lmul!(inv(size(x, 2)), vec(sum(x, dims = 2))) - MvNormal(μ, g.Σ) + return MvNormal(μ, g.Σ) end function fit_mle(g::MvNormalKnownCov, x::AbstractMatrix{Float64}, w::AbstractVector) @@ -380,7 +380,7 @@ function fit_mle(g::MvNormalKnownCov, x::AbstractMatrix{Float64}, w::AbstractVec (size(x, 1) == d && size(x, 2) == length(w)) || throw(DimensionMismatch("Inconsistent argument dimensions.")) μ = BLAS.gemv('N', inv(sum(w)), x, vec(w)) - MvNormal(μ, g.Σ) + return MvNormal(μ, g.Σ) end @@ -400,7 +400,7 @@ function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}) m = s * inv(n) z = x .- m s2 = z * z' - MvNormalStats(s, m, s2, Float64(n)) + return MvNormalStats(s, m, s2, Float64(n)) end function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) @@ -412,16 +412,16 @@ function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::AbstractVec s = x * vec(w) m = s * inv(tw) z = similar(x) - for j = 1:n + for j in 1:n xj = view(x, :, j) zj = view(z, :, j) swj = sqrt(w[j]) - for i = 1:d + for i in 1:d @inbounds zj[i] = swj * (xj[i] - m[i]) end end s2 = z * z' - MvNormalStats(s, m, s2, tw) + return MvNormalStats(s, m, s2, tw) end @@ -444,7 +444,7 @@ function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}) z = x .- mu C = BLAS.syrk('U', 'N', inv(n), z) LinearAlgebra.copytri!(C, 'U') - MvNormal(mu, PDMat(C)) + return MvNormal(mu, PDMat(C)) end function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) @@ -456,15 +456,15 @@ function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVec mu = BLAS.gemv('N', inv_sw, x, w) z = Matrix{Float64}(undef, m, n) - for j = 1:n + for j in 1:n cj = sqrt(w[j]) - for i = 1:m + for i in 1:m @inbounds z[i, j] = (x[i, j] - mu[i]) * cj end end C = BLAS.syrk('U', 'N', inv_sw, z) LinearAlgebra.copytri!(C, 'U') - MvNormal(mu, PDMat(C)) + return MvNormal(mu, PDMat(C)) end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) @@ -473,13 +473,13 @@ function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) mu = vec(mean(x, dims = 2)) va = zeros(Float64, m) - for j = 1:n - for i = 1:m + for j in 1:n + for i in 1:m @inbounds va[i] += abs2(x[i, j] - mu[i]) end end lmul!(inv(n), va) - MvNormal(mu, PDiagMat(va)) + return MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) @@ -491,14 +491,14 @@ function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVec mu = BLAS.gemv('N', inv_sw, x, w) va = zeros(Float64, m) - for j = 1:n + for j in 1:n @inbounds wj = w[j] - for i = 1:m + for i in 1:m @inbounds va[i] += abs2(x[i, j] - mu[i]) * wj end end lmul!(inv_sw, va) - MvNormal(mu, PDiagMat(va)) + return MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}) @@ -507,14 +507,14 @@ function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}) mu = vec(mean(x, dims = 2)) va = 0.0 - for j = 1:n + for j in 1:n va_j = 0.0 - for i = 1:m + for i in 1:m @inbounds va_j += abs2(x[i, j] - mu[i]) end va += va_j end - MvNormal(mu, ScalMat(m, va / (m * n))) + return MvNormal(mu, ScalMat(m, va / (m * n))) end function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) @@ -527,13 +527,13 @@ function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}, w::AbstractVect mu = BLAS.gemv('N', inv_sw, x, w) va = 0.0 - for j = 1:n + for j in 1:n @inbounds wj = w[j] va_j = 0.0 - for i = 1:m + for i in 1:m @inbounds va_j += abs2(x[i, j] - mu[i]) * wj end va += va_j end - MvNormal(mu, ScalMat(m, va / (m * sw))) + return MvNormal(mu, ScalMat(m, va / (m * sw))) end diff --git a/src/multivariate/mvnormalcanon.jl b/src/multivariate/mvnormalcanon.jl index 582e74e94a..9ff2d7a83f 100644 --- a/src/multivariate/mvnormalcanon.jl +++ b/src/multivariate/mvnormalcanon.jl @@ -40,48 +40,48 @@ const ZeroMeanIsoNormalCanon{Axes} = MvNormalCanon{Float64, ScalMat{Float64}, **Note:** `MvNormalCanon` share the same set of methods as `MvNormal`. """ -struct MvNormalCanon{T<:Real,P<:AbstractPDMat,V<:AbstractVector} <: AbstractMvNormal +struct MvNormalCanon{T <: Real, P <: AbstractPDMat, V <: AbstractVector} <: AbstractMvNormal μ::V # the mean vector h::V # potential vector, i.e. inv(Σ) * μ J::P # precision matrix, i.e. inv(Σ) end const FullNormalCanon = - MvNormalCanon{Float64,PDMat{Float64,Matrix{Float64}},Vector{Float64}} + MvNormalCanon{Float64, PDMat{Float64, Matrix{Float64}}, Vector{Float64}} const DiagNormalCanon = - MvNormalCanon{Float64,PDiagMat{Float64,Vector{Float64}},Vector{Float64}} -const IsoNormalCanon = MvNormalCanon{Float64,ScalMat{Float64},Vector{Float64}} + MvNormalCanon{Float64, PDiagMat{Float64, Vector{Float64}}, Vector{Float64}} +const IsoNormalCanon = MvNormalCanon{Float64, ScalMat{Float64}, Vector{Float64}} const ZeroMeanFullNormalCanon{Axes} = - MvNormalCanon{Float64,PDMat{Float64,Matrix{Float64}},Zeros{Float64,1,Axes}} + MvNormalCanon{Float64, PDMat{Float64, Matrix{Float64}}, Zeros{Float64, 1, Axes}} const ZeroMeanDiagNormalCanon{Axes} = - MvNormalCanon{Float64,PDiagMat{Float64,Vector{Float64}},Zeros{Float64,1,Axes}} + MvNormalCanon{Float64, PDiagMat{Float64, Vector{Float64}}, Zeros{Float64, 1, Axes}} const ZeroMeanIsoNormalCanon{Axes} = - MvNormalCanon{Float64,ScalMat{Float64},Zeros{Float64,1,Axes}} + MvNormalCanon{Float64, ScalMat{Float64}, Zeros{Float64, 1, Axes}} ### Constructors function MvNormalCanon( - μ::AbstractVector{T}, - h::AbstractVector{T}, - J::AbstractPDMat{T}, -) where {T<:Real} + μ::AbstractVector{T}, + h::AbstractVector{T}, + J::AbstractPDMat{T}, + ) where {T <: Real} length(μ) == length(h) == size(J, 1) || throw(DimensionMismatch("Inconsistent argument dimensions")) if typeof(μ) === typeof(h) - return MvNormalCanon{T,typeof(J),typeof(μ)}(μ, h, J) + return MvNormalCanon{T, typeof(J), typeof(μ)}(μ, h, J) else - return MvNormalCanon{T,typeof(J),Vector{T}}(collect(μ), collect(h), J) + return MvNormalCanon{T, typeof(J), Vector{T}}(collect(μ), collect(h), J) end end function MvNormalCanon( - μ::AbstractVector{T}, - h::AbstractVector{T}, - J::AbstractPDMat, -) where {T<:Real} + μ::AbstractVector{T}, + h::AbstractVector{T}, + J::AbstractPDMat, + ) where {T <: Real} R = promote_type(T, eltype(J)) - MvNormalCanon( + return MvNormalCanon( convert(AbstractArray{R}, μ), convert(AbstractArray{R}, h), convert(AbstractArray{R}, J), @@ -89,12 +89,12 @@ function MvNormalCanon( end function MvNormalCanon( - μ::AbstractVector{<:Real}, - h::AbstractVector{<:Real}, - J::AbstractPDMat, -) + μ::AbstractVector{<:Real}, + h::AbstractVector{<:Real}, + J::AbstractPDMat, + ) R = Base.promote_eltype(μ, h, J) - MvNormalCanon( + return MvNormalCanon( convert(AbstractArray{R}, μ), convert(AbstractArray{R}, h), convert(AbstractArray{R}, J), @@ -106,7 +106,7 @@ function MvNormalCanon(h::AbstractVector{<:Real}, J::AbstractPDMat) R = Base.promote_eltype(h, J) hh = convert(AbstractArray{R}, h) JJ = convert(AbstractArray{R}, J) - MvNormalCanon(JJ \ hh, hh, JJ) + return MvNormalCanon(JJ \ hh, hh, JJ) end """ @@ -121,15 +121,15 @@ MvNormalCanon(h::AbstractVector{<:Real}, J::Diagonal{<:Real}) = MvNormalCanon(h, PDiagMat(J.diag)) MvNormalCanon( μ::AbstractVector{<:Real}, - J::Union{Symmetric{<:Real,<:Diagonal{<:Real}},Hermitian{<:Real,<:Diagonal{<:Real}}}, + J::Union{Symmetric{<:Real, <:Diagonal{<:Real}}, Hermitian{<:Real, <:Diagonal{<:Real}}}, ) = MvNormalCanon(μ, PDiagMat(J.data.diag)) function MvNormalCanon(h::AbstractVector{<:Real}, J::UniformScaling{<:Real}) return MvNormalCanon(h, ScalMat(length(h), J.λ)) end function MvNormalCanon( - h::AbstractVector{<:Real}, - J::Diagonal{<:Real,<:FillArrays.AbstractFill{<:Real,1}}, -) + h::AbstractVector{<:Real}, + J::Diagonal{<:Real, <:FillArrays.AbstractFill{<:Real, 1}}, + ) return MvNormalCanon(h, ScalMat(size(J, 1), FillArrays.getindex_value(J.diag))) end @@ -169,22 +169,22 @@ distrname(d::ZeroMeanDiagNormalCanon) = "ZeroMeanDiagormalCanon" distrname(d::ZeroMeanFullNormalCanon) = "ZeroMeanFullNormalCanon" ### Conversion -function convert(::Type{MvNormalCanon{T}}, d::MvNormalCanon) where {T<:Real} - MvNormalCanon( +function convert(::Type{MvNormalCanon{T}}, d::MvNormalCanon) where {T <: Real} + return MvNormalCanon( convert(AbstractArray{T}, d.μ), convert(AbstractArray{T}, d.h), convert(AbstractArray{T}, d.J), ) end -Base.convert(::Type{MvNormalCanon{T}}, d::MvNormalCanon{T}) where {T<:Real} = d +Base.convert(::Type{MvNormalCanon{T}}, d::MvNormalCanon{T}) where {T <: Real} = d function convert( - ::Type{MvNormalCanon{T}}, - μ::AbstractVector{<:Real}, - h::AbstractVector{<:Real}, - J::AbstractPDMat, -) where {T<:Real} - MvNormalCanon( + ::Type{MvNormalCanon{T}}, + μ::AbstractVector{<:Real}, + h::AbstractVector{<:Real}, + J::AbstractPDMat, + ) where {T <: Real} + return MvNormalCanon( convert(AbstractArray{T}, μ), convert(AbstractArray{T}, h), convert(AbstractArray{T}, J), @@ -197,18 +197,18 @@ meanform(d::MvNormalCanon) = MvNormal(d.μ, inv(d.J)) # meanform{C, T<:Real}(d::MvNormalCanon{T,C,Vector{T}}) = MvNormal(d.μ, inv(d.J)) # meanform{C, T<:Real}(d::MvNormalCanon{T,C,Zeros{T}}) = MvNormal(inv(d.J)) -function canonform(d::MvNormal{T,C,<:AbstractVector{T}}) where {C,T<:Real} +function canonform(d::MvNormal{T, C, <:AbstractVector{T}}) where {C, T <: Real} J = inv(d.Σ) return MvNormalCanon(d.μ, J * collect(d.μ), J) end -canonform(d::MvNormal{T,C,Zeros{T}}) where {C,T<:Real} = MvNormalCanon(inv(d.Σ)) +canonform(d::MvNormal{T, C, Zeros{T}}) where {C, T <: Real} = MvNormalCanon(inv(d.Σ)) ### Basic statistics length(d::MvNormalCanon) = length(d.μ) mean(d::MvNormalCanon) = convert(Vector{eltype(d.μ)}, d.μ) params(d::MvNormalCanon) = (d.μ, d.h, d.J) -@inline partype(d::MvNormalCanon{T}) where {T<:Real} = T +@inline partype(d::MvNormalCanon{T}) where {T <: Real} = T Base.eltype(::Type{<:MvNormalCanon{T}}) where {T} = T var(d::MvNormalCanon) = diag(inv(d.J)) diff --git a/src/multivariate/mvtdist.jl b/src/multivariate/mvtdist.jl index ae1ae04344..65636ec667 100644 --- a/src/multivariate/mvtdist.jl +++ b/src/multivariate/mvtdist.jl @@ -4,70 +4,70 @@ abstract type AbstractMvTDist <: ContinuousMultivariateDistribution end -struct GenericMvTDist{T<:Real,Cov<:AbstractPDMat,Mean<:AbstractVector} <: AbstractMvTDist +struct GenericMvTDist{T <: Real, Cov <: AbstractPDMat, Mean <: AbstractVector} <: AbstractMvTDist df::T # non-integer degrees of freedom allowed dim::Int μ::Mean Σ::Cov - function GenericMvTDist{T,Cov,Mean}( - df::T, - dim::Int, - μ::Mean, - Σ::AbstractPDMat{T}, - ) where {T,Cov,Mean} + function GenericMvTDist{T, Cov, Mean}( + df::T, + dim::Int, + μ::Mean, + Σ::AbstractPDMat{T}, + ) where {T, Cov, Mean} df > zero(df) || error("df must be positive") - new{T,Cov,Mean}(df, dim, μ, Σ) + return new{T, Cov, Mean}(df, dim, μ, Σ) end end function GenericMvTDist( - df::T, - μ::Mean, - Σ::Cov, -) where {Cov<:AbstractPDMat,Mean<:AbstractVector,T<:Real} + df::T, + μ::Mean, + Σ::Cov, + ) where {Cov <: AbstractPDMat, Mean <: AbstractVector, T <: Real} d = length(μ) size(Σ, 1) == d || throw(DimensionMismatch("The dimensions of μ and Σ are inconsistent.")) R = Base.promote_eltype(T, μ, Σ) S = convert(AbstractArray{R}, Σ) m = convert(AbstractArray{R}, μ) - GenericMvTDist{R,typeof(S),typeof(m)}(R(df), d, m, S) + return GenericMvTDist{R, typeof(S), typeof(m)}(R(df), d, m, S) end function GenericMvTDist(df::Real, Σ::AbstractPDMat) R = Base.promote_eltype(df, Σ) - GenericMvTDist(df, Zeros{R}(size(Σ, 1)), Σ) + return GenericMvTDist(df, Zeros{R}(size(Σ, 1)), Σ) end -GenericMvTDist{T,Cov,Mean}(df, μ, Σ) where {T,Cov,Mean} = +GenericMvTDist{T, Cov, Mean}(df, μ, Σ) where {T, Cov, Mean} = GenericMvTDist(convert(T, df), convert(Mean, μ), convert(Cov, Σ)) ### Conversion -function convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist) where {T<:Real} +function convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist) where {T <: Real} S = convert(AbstractArray{T}, d.Σ) m = convert(AbstractArray{T}, d.μ) - GenericMvTDist{T,typeof(S),typeof(m)}(T(d.df), d.dim, m, S) + return GenericMvTDist{T, typeof(S), typeof(m)}(T(d.df), d.dim, m, S) end -Base.convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist{T}) where {T<:Real} = d +Base.convert(::Type{GenericMvTDist{T}}, d::GenericMvTDist{T}) where {T <: Real} = d function convert( - ::Type{GenericMvTDist{T}}, - df, - dim, - μ::AbstractVector, - Σ::AbstractPDMat, -) where {T<:Real} + ::Type{GenericMvTDist{T}}, + df, + dim, + μ::AbstractVector, + Σ::AbstractPDMat, + ) where {T <: Real} S = convert(AbstractArray{T}, Σ) m = convert(AbstractArray{T}, μ) - GenericMvTDist{T,typeof(S),typeof(m)}(T(df), dim, m, S) + return GenericMvTDist{T, typeof(S), typeof(m)}(T(df), dim, m, S) end ## Construction of multivariate normal with specific covariance type -const IsoTDist = GenericMvTDist{Float64,ScalMat{Float64},Vector{Float64}} -const DiagTDist = GenericMvTDist{Float64,PDiagMat{Float64,Vector{Float64}},Vector{Float64}} -const MvTDist = GenericMvTDist{Float64,PDMat{Float64,Matrix{Float64}},Vector{Float64}} +const IsoTDist = GenericMvTDist{Float64, ScalMat{Float64}, Vector{Float64}} +const DiagTDist = GenericMvTDist{Float64, PDiagMat{Float64, Vector{Float64}}, Vector{Float64}} +const MvTDist = GenericMvTDist{Float64, PDMat{Float64, Matrix{Float64}}, Vector{Float64}} MvTDist(df::Real, μ::Vector{<:Real}, C::PDMat) = GenericMvTDist(df, μ, C) MvTDist(df::Real, C::PDMat) = GenericMvTDist(df, C) @@ -111,7 +111,7 @@ mode(d::GenericMvTDist) = d.μ modes(d::GenericMvTDist) = [mode(d)] var(d::GenericMvTDist) = - d.df > 2 ? (d.df / (d.df - 2)) * diag(d.Σ) : Float64[NaN for i = 1:d.dim] + d.df > 2 ? (d.df / (d.df - 2)) * diag(d.Σ) : Float64[NaN for i in 1:d.dim] scale(d::GenericMvTDist) = Matrix(d.Σ) cov(d::GenericMvTDist) = d.df > 2 ? (d.df / (d.df - 2)) * Matrix(d.Σ) : NaN * ones(d.dim, d.dim) @@ -128,22 +128,22 @@ Base.eltype(::Type{<:GenericMvTDist{T}}) where {T} = T function entropy(d::GenericMvTDist) hdf, hdim = 0.5 * d.df, 0.5 * d.dim shdfhdim = hdf + hdim - 0.5 * logdet(d.Σ) + hdim * log(d.df * pi) + logbeta(hdim, hdf) - loggamma(hdim) + - shdfhdim * (digamma(shdfhdim) - digamma(hdf)) + return 0.5 * logdet(d.Σ) + hdim * log(d.df * pi) + logbeta(hdim, hdf) - loggamma(hdim) + + shdfhdim * (digamma(shdfhdim) - digamma(hdf)) end # evaluation (for GenericMvTDist) -insupport(d::AbstractMvTDist, x::AbstractVector{T}) where {T<:Real} = +insupport(d::AbstractMvTDist, x::AbstractVector{T}) where {T <: Real} = length(d) == length(x) && all(isfinite, x) sqmahal(d::GenericMvTDist, x::AbstractVector{<:Real}) = invquad(d.Σ, x - d.μ) function sqmahal!(r::AbstractArray, d::GenericMvTDist, x::AbstractMatrix{<:Real}) - invquad!(r, d.Σ, x .- d.μ) + return invquad!(r, d.Σ, x .- d.μ) end -sqmahal(d::AbstractMvTDist, x::AbstractMatrix{T}) where {T<:Real} = +sqmahal(d::AbstractMvTDist, x::AbstractMatrix{T}) where {T <: Real} = sqmahal!(Vector{T}(undef, size(x, 2)), d, x) @@ -159,15 +159,15 @@ function mvtdist_consts(d::AbstractMvTDist) return (shdfhdim, v) end -function _logpdf(d::AbstractMvTDist, x::AbstractVector{T}) where {T<:Real} +function _logpdf(d::AbstractMvTDist, x::AbstractVector{T}) where {T <: Real} shdfhdim, v = mvtdist_consts(d) - v - shdfhdim * log1p(sqmahal(d, x) / d.df) + return v - shdfhdim * log1p(sqmahal(d, x) / d.df) end function _logpdf!(r::AbstractArray{<:Real}, d::AbstractMvTDist, x::AbstractMatrix{<:Real}) sqmahal!(r, d, x) shdfhdim, v = mvtdist_consts(d) - for i = 1:size(x, 2) + for i in 1:size(x, 2) r[i] = v - shdfhdim * log1p(r[i] / d.df) end return r @@ -176,7 +176,7 @@ end function gradlogpdf(d::GenericMvTDist, x::AbstractVector{<:Real}) z = x - d.μ prz = invscale(d) * z - -((d.df + d.dim) / (d.df + dot(z, prz))) * prz + return -((d.df + d.dim) / (d.df + dot(z, prz))) * prz end # Sampling (for GenericMvTDist) @@ -185,15 +185,15 @@ function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractVector{<:Real}) y = sqrt(rand(rng, chisqd) / d.df) unwhiten!(d.Σ, randn!(rng, x)) x .= x ./ y .+ d.μ - x + return x end -function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{T}) where {T<:Real} +function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{T}) where {T <: Real} cols = size(x, 2) chisqd = Chisq{partype(d)}(d.df) y = Matrix{T}(undef, 1, cols) unwhiten!(d.Σ, randn!(rng, x)) rand!(rng, chisqd, y) x .= x ./ sqrt.(y ./ d.df) .+ d.μ - x + return x end diff --git a/src/multivariate/product.jl b/src/multivariate/product.jl index 8730428486..543d181453 100644 --- a/src/multivariate/product.jl +++ b/src/multivariate/product.jl @@ -15,30 +15,30 @@ Product(Uniform.(rand(10), 1)) # A 10-dimensional Product from 10 independent `U `Product` is deprecated and will be removed in the next breaking release. Use [`product_distribution`](@ref) instead. """ -struct Product{S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} <: - MultivariateDistribution{S} +struct Product{S <: ValueSupport, T <: UnivariateDistribution{S}, V <: AbstractVector{T}} <: + MultivariateDistribution{S} v::V - function Product{S,T,V}( - v::V, - ) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} - return new{S,T,V}(v) + function Product{S, T, V}( + v::V, + ) where {S <: ValueSupport, T <: UnivariateDistribution{S}, V <: AbstractVector{T}} + return new{S, T, V}(v) end end function Product( - v::V, -) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} + v::V, + ) where {S <: ValueSupport, T <: UnivariateDistribution{S}, V <: AbstractVector{T}} Base.depwarn( "`Product(v)` is deprecated, please use `product_distribution(v)`", :Product, ) - return Product{S,T,V}(v) + return Product{S, T, V}(v) end length(d::Product) = length(d.v) function Base.eltype( - ::Type{<:Product{S,T}}, -) where {S<:ValueSupport,T<:UnivariateDistribution{S}} + ::Type{<:Product{S, T}}, + ) where {S <: ValueSupport, T <: UnivariateDistribution{S}} return eltype(T) end @@ -65,7 +65,7 @@ maximum(d::Product) = map(maximum, d.v) # it will return a `ProductDistribution` then which is already the default for # higher-dimensional arrays and distributions function product_distribution( - dists::V, -) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:AbstractVector{T}} - return Product{S,T,V}(dists) + dists::V, + ) where {S <: ValueSupport, T <: UnivariateDistribution{S}, V <: AbstractVector{T}} + return Product{S, T, V}(dists) end diff --git a/src/multivariate/vonmisesfisher.jl b/src/multivariate/vonmisesfisher.jl index da3768261d..e47cfe40d2 100644 --- a/src/multivariate/vonmisesfisher.jl +++ b/src/multivariate/vonmisesfisher.jl @@ -12,7 +12,7 @@ # http://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf # -struct VonMisesFisher{T<:Real} <: ContinuousMultivariateDistribution +struct VonMisesFisher{T <: Real} <: ContinuousMultivariateDistribution μ::Vector{T} κ::T logCκ::T @@ -24,12 +24,12 @@ struct VonMisesFisher{T<:Real} <: ContinuousMultivariateDistribution κ > 0 || error("κ must be positive.") logCκ = vmflck(length(μ), κ) S = promote_type(T, typeof(logCκ)) - new{T}(Vector{S}(μ), S(κ), S(logCκ)) + return new{T}(Vector{S}(μ), S(κ), S(logCκ)) end end -VonMisesFisher(μ::Vector{T}, κ::T) where {T<:Real} = VonMisesFisher{T}(μ, κ) -function VonMisesFisher(μ::Vector{T}, κ::Real) where {T<:Real} +VonMisesFisher(μ::Vector{T}, κ::T) where {T <: Real} = VonMisesFisher{T}(μ, κ) +function VonMisesFisher(μ::Vector{T}, κ::Real) where {T <: Real} R = promote_type(T, eltype(κ)) return VonMisesFisher(convert(AbstractArray{R}, μ), convert(R, κ)) end @@ -42,14 +42,13 @@ end show(io::IO, d::VonMisesFisher) = show(io, d, (:μ, :κ)) ### Conversions -convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher) where {T<:Real} = +convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher) where {T <: Real} = VonMisesFisher{T}(convert(Vector{T}, d.μ), T(d.κ); checknorm = false) -Base.convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher{T}) where {T<:Real} = d -convert(::Type{VonMisesFisher{T}}, μ::Vector, κ, logCκ) where {T<:Real} = +Base.convert(::Type{VonMisesFisher{T}}, d::VonMisesFisher{T}) where {T <: Real} = d +convert(::Type{VonMisesFisher{T}}, μ::Vector, κ, logCκ) where {T <: Real} = VonMisesFisher{T}(convert(Vector{T}, μ), T(κ)) - ### Basic properties length(d::VonMisesFisher) = length(d.μ) @@ -57,9 +56,9 @@ length(d::VonMisesFisher) = length(d.μ) meandir(d::VonMisesFisher) = d.μ concentration(d::VonMisesFisher) = d.κ -insupport(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = isunitvec(x) +insupport(d::VonMisesFisher, x::AbstractVector{T}) where {T <: Real} = isunitvec(x) params(d::VonMisesFisher) = (d.μ, d.κ) -@inline partype(d::VonMisesFisher{T}) where {T<:Real} = T +@inline partype(d::VonMisesFisher{T}) where {T <: Real} = T ### Evaluation @@ -67,12 +66,12 @@ function _vmflck(p, κ) T = typeof(κ) hp = T(p / 2) q = hp - 1 - q * log(κ) - hp * log2π - log(besselix(q, κ)) - κ + return q * log(κ) - hp * log2π - log(besselix(q, κ)) - κ end _vmflck3(κ) = log(κ) - log2π - κ - log1mexp(-2κ) vmflck(p, κ) = (p == 3 ? _vmflck3(κ) : _vmflck(p, κ)) -_logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = +_logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T <: Real} = d.logCκ + d.κ * dot(d.μ, x) @@ -93,10 +92,10 @@ function fit_mle(::Type{<:VonMisesFisher}, X::Matrix{Float64}) μ = rmul!(r, 1.0 / r_nrm) ρ = r_nrm / n κ = _vmf_estkappa(length(μ), ρ) - VonMisesFisher(μ, κ) + return VonMisesFisher(μ, κ) end -fit_mle(::Type{<:VonMisesFisher}, X::Matrix{T}) where {T<:Real} = +fit_mle(::Type{<:VonMisesFisher}, X::Matrix{T}) where {T <: Real} = fit_mle(VonMisesFisher, Float64(X)) function _vmf_estkappa(p::Int, ρ::Float64) diff --git a/src/multivariates.jl b/src/multivariates.jl index 8bef508a1e..29ec1ccc43 100644 --- a/src/multivariates.jl +++ b/src/multivariates.jl @@ -20,7 +20,7 @@ size(d::MultivariateDistribution) # TODO: inconsistency with other `ArrayLikeVariate`s and `rand(s, (n,))` - maybe remove? rand(rng::AbstractRNG, s::Sampleable{Multivariate}, n::Int) = @inbounds rand!(rng, sampler(s), Matrix{eltype(s)}(undef, length(s), n)) -rand(rng::AbstractRNG, s::Sampleable{Multivariate,Continuous}, n::Int) = +rand(rng::AbstractRNG, s::Sampleable{Multivariate, Continuous}, n::Int) = @inbounds rand!(rng, sampler(s), Matrix{float(eltype(s))}(undef, length(s), n)) ## domain @@ -31,22 +31,22 @@ rand(rng::AbstractRNG, s::Sampleable{Multivariate,Continuous}, n::Int) = If ``x`` is a vector, it returns whether x is within the support of ``d``. If ``x`` is a matrix, it returns whether every column in ``x`` is within the support of ``d``. """ -insupport{D<:MultivariateDistribution}(d::Union{D,Type{D}}, x::AbstractArray) +insupport{D <: MultivariateDistribution}(d::Union{D, Type{D}}, x::AbstractArray) function insupport!( - r::AbstractArray, - d::Union{D,Type{D}}, - X::AbstractMatrix, -) where {D<:MultivariateDistribution} + r::AbstractArray, + d::Union{D, Type{D}}, + X::AbstractMatrix, + ) where {D <: MultivariateDistribution} n = length(r) size(X) == (length(d), n) || throw(DimensionMismatch("Inconsistent array dimensions.")) - for i = 1:n + for i in 1:n @inbounds r[i] = insupport(d, view(X, :, i)) end return r end -insupport(d::Union{D,Type{D}}, X::AbstractMatrix) where {D<:MultivariateDistribution} = +insupport(d::Union{D, Type{D}}, X::AbstractMatrix) where {D <: MultivariateDistribution} = insupport!(BitArray(undef, size(X, 2)), d, X) ## statistics @@ -104,12 +104,12 @@ function cor(d::MultivariateDistribution) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) - for j = 1:n - for i = 1:(j-1) + for j in 1:n + for i in 1:(j - 1) @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 - for i = (j+1):n + for i in (j + 1):n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end @@ -120,17 +120,17 @@ end ##### Specific distributions ##### for fname in [ - "dirichlet.jl", - "multinomial.jl", - "dirichletmultinomial.jl", - "jointorderstatistics.jl", - "mvnormal.jl", - "mvnormalcanon.jl", - "mvlogitnormal.jl", - "mvlognormal.jl", - "mvtdist.jl", - "product.jl", # deprecated - "vonmisesfisher.jl", -] + "dirichlet.jl", + "multinomial.jl", + "dirichletmultinomial.jl", + "jointorderstatistics.jl", + "mvnormal.jl", + "mvnormalcanon.jl", + "mvlogitnormal.jl", + "mvlognormal.jl", + "mvtdist.jl", + "product.jl", # deprecated + "vonmisesfisher.jl", + ] include(joinpath("multivariate", fname)) end diff --git a/src/namedtuple/productnamedtuple.jl b/src/namedtuple/productnamedtuple.jl index fbcbda714b..557e530f03 100644 --- a/src/namedtuple/productnamedtuple.jl +++ b/src/namedtuple/productnamedtuple.jl @@ -38,26 +38,26 @@ julia> var(d) # var of marginals (x = 1.0, y = [0.031746031746031744, 0.031746031746031744]) ``` """ -struct ProductNamedTupleDistribution{Tnames,Tdists,S<:ValueSupport,eltypes} <: - Distribution{NamedTupleVariate{Tnames},S} - dists::NamedTuple{Tnames,Tdists} +struct ProductNamedTupleDistribution{Tnames, Tdists, S <: ValueSupport, eltypes} <: + Distribution{NamedTupleVariate{Tnames}, S} + dists::NamedTuple{Tnames, Tdists} end function ProductNamedTupleDistribution( - dists::NamedTuple{K,V}, -) where {K,V<:Tuple{Distribution,Vararg{Distribution}}} + dists::NamedTuple{K, V}, + ) where {K, V <: Tuple{Distribution, Vararg{Distribution}}} vs = _product_valuesupport(values(dists)) eltypes = _product_namedtuple_eltype(values(dists)) - return ProductNamedTupleDistribution{K,V,vs,eltypes}(dists) + return ProductNamedTupleDistribution{K, V, vs, eltypes}(dists) end _gentype(d::UnivariateDistribution) = eltype(d) -_gentype(d::Distribution{<:ArrayLikeVariate{S}}) where {S} = Array{eltype(d),S} +_gentype(d::Distribution{<:ArrayLikeVariate{S}}) where {S} = Array{eltype(d), S} function _gentype(d::Distribution{CholeskyVariate}) T = eltype(d) - return LinearAlgebra.Cholesky{T,Matrix{T}} + return LinearAlgebra.Cholesky{T, Matrix{T}} end function _gentype(d::ProductNamedTupleDistribution{K}) where {K} - return NamedTuple{K,Tuple{map(_gentype, values(d.dists))...}} + return NamedTuple{K, Tuple{map(_gentype, values(d.dists))...}} end _gentype(::Distribution) = Any @@ -81,14 +81,14 @@ The function falls back to constructing a [`ProductNamedTupleDistribution`](@ref distribution but specialized methods can be defined. """ function product_distribution( - dists::NamedTuple{<:Any,<:Tuple{Distribution,Vararg{Distribution}}}, -) + dists::NamedTuple{<:Any, <:Tuple{Distribution, Vararg{Distribution}}}, + ) return ProductNamedTupleDistribution(dists) end # Properties -Base.eltype(::Type{<:ProductNamedTupleDistribution{<:Any,<:Any,<:Any,T}}) where {T} = T +Base.eltype(::Type{<:ProductNamedTupleDistribution{<:Any, <:Any, <:Any, T}}) where {T} = T Base.minimum(d::ProductNamedTupleDistribution) = map(minimum, d.dists) @@ -107,7 +107,7 @@ end function insupport(dist::ProductNamedTupleDistribution{K}, x::NamedTuple) where {K} return ( _named_fields_match(dist.dists, x) && - all(map(insupport, dist.dists, NamedTuple{K}(x))) + all(map(insupport, dist.dists, NamedTuple{K}(x))) ) end @@ -142,9 +142,9 @@ std(d::ProductNamedTupleDistribution) = map(std, d.dists) entropy(d::ProductNamedTupleDistribution) = sum(entropy, values(d.dists)) function kldivergence( - d1::ProductNamedTupleDistribution{K}, - d2::ProductNamedTupleDistribution, -) where {K} + d1::ProductNamedTupleDistribution{K}, + d2::ProductNamedTupleDistribution, + ) where {K} _named_fields_match(d1.dists, d2.dists) || throw( ArgumentError( "Sets of named tuple fields are not the same: !issetequal($(keys(d1.dists)), $(keys(d2.dists)))", @@ -155,20 +155,20 @@ end # Sampling -function sampler(d::ProductNamedTupleDistribution{K,<:Any,S}) where {K,S} +function sampler(d::ProductNamedTupleDistribution{K, <:Any, S}) where {K, S} samplers = map(sampler, d.dists) Tsamplers = typeof(values(samplers)) - return ProductNamedTupleSampler{K,Tsamplers,S}(samplers) + return ProductNamedTupleSampler{K, Tsamplers, S}(samplers) end function Base.rand(rng::AbstractRNG, d::ProductNamedTupleDistribution{K}) where {K} return NamedTuple{K}(map(Base.Fix1(rand, rng), d.dists)) end function Base.rand( - rng::AbstractRNG, - d::ProductNamedTupleDistribution{K}, - dims::Dims, -) where {K} + rng::AbstractRNG, + d::ProductNamedTupleDistribution{K}, + dims::Dims, + ) where {K} return convert(AbstractArray{<:NamedTuple{K}}, _rand(rng, sampler(d), dims)) end diff --git a/src/product.jl b/src/product.jl index 57730a8d57..34a569c121 100644 --- a/src/product.jl +++ b/src/product.jl @@ -7,11 +7,11 @@ independent `M`-dimensional distributions by stacking them. Users should use [`product_distribution`](@ref) to construct a product distribution of independent distributions instead of constructing a `ProductDistribution` directly. """ -struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVariate{N},S} +struct ProductDistribution{N, M, D, S <: ValueSupport, T} <: Distribution{ArrayLikeVariate{N}, S} dists::D size::Dims{N} - function ProductDistribution{N,M,D}(dists::D) where {N,M,D} + function ProductDistribution{N, M, D}(dists::D) where {N, M, D} if isempty(dists) throw( ArgumentError( @@ -19,7 +19,7 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar ), ) end - return new{N,M,D,_product_valuesupport(dists),_product_eltype(dists)}( + return new{N, M, D, _product_valuesupport(dists), _product_eltype(dists)}( dists, _product_size(dists), ) @@ -27,18 +27,18 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar end function ProductDistribution( - dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}, -) where {M,N} - return ProductDistribution{M + N,M,typeof(dists)}(dists) + dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}}, N}, + ) where {M, N} + return ProductDistribution{M + N, M, typeof(dists)}(dists) end function ProductDistribution( - dists::Tuple{ - Distribution{<:ArrayLikeVariate{M}}, - Vararg{Distribution{<:ArrayLikeVariate{M}}}, - }, -) where {M} - return ProductDistribution{M + 1,M,typeof(dists)}(dists) + dists::Tuple{ + Distribution{<:ArrayLikeVariate{M}}, + Vararg{Distribution{<:ArrayLikeVariate{M}}}, + }, + ) where {M} + return ProductDistribution{M + 1, M, typeof(dists)}(dists) end # default definitions (type stable e.g. for arrays with concrete `eltype`) @@ -46,15 +46,15 @@ _product_valuesupport(dists) = mapreduce(value_support ∘ typeof, promote_type, _product_eltype(dists) = mapreduce(eltype, promote_type, dists) # type-stable and faster implementations for tuples -function _product_valuesupport(dists::NTuple{<:Any,Distribution}) +function _product_valuesupport(dists::NTuple{<:Any, Distribution}) return __product_promote_type(value_support, typeof(dists)) end -function _product_eltype(dists::NTuple{<:Any,Distribution}) +function _product_eltype(dists::NTuple{<:Any, Distribution}) return __product_promote_type(eltype, typeof(dists)) end -__product_promote_type(f::F, ::Type{Tuple{D}}) where {F,D<:Distribution} = f(D) -function __product_promote_type(f::F, ::Type{T}) where {F,T} +__product_promote_type(f::F, ::Type{Tuple{D}}) where {F, D <: Distribution} = f(D) +function __product_promote_type(f::F, ::Type{T}) where {F, T} return promote_type( f(Base.tuple_type_head(T)), __product_promote_type(f, Base.tuple_type_tail(T)), @@ -62,20 +62,20 @@ function __product_promote_type(f::F, ::Type{T}) where {F,T} end function _product_size( - dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}, -) where {M,N} + dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}}, N}, + ) where {M, N} size_d = size(first(dists)) all(size(d) == size_d for d in dists) || error("all distributions must be of the same size") size_dists = size(dists) - return ntuple(i -> i <= M ? size_d[i] : size_dists[i-M], Val(M + N)) + return ntuple(i -> i <= M ? size_d[i] : size_dists[i - M], Val(M + N)) end function _product_size( - dists::Tuple{ - Distribution{<:ArrayLikeVariate{M}}, - Vararg{Distribution{<:ArrayLikeVariate{M}},N}, - }, -) where {M,N} + dists::Tuple{ + Distribution{<:ArrayLikeVariate{M}}, + Vararg{Distribution{<:ArrayLikeVariate{M}}, N}, + }, + ) where {M, N} size_d = size(first(dists)) all(size(d) == size_d for d in dists) || error("all distributions must be of the same size") @@ -83,17 +83,17 @@ function _product_size( end ## aliases -const VectorOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{1,0,D,S,T} -const MatrixOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{2,0,D,S,T} -const ArrayOfUnivariateDistribution{N,D,S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} +const VectorOfUnivariateDistribution{D, S <: ValueSupport, T} = ProductDistribution{1, 0, D, S, T} +const MatrixOfUnivariateDistribution{D, S <: ValueSupport, T} = ProductDistribution{2, 0, D, S, T} +const ArrayOfUnivariateDistribution{N, D, S <: ValueSupport, T} = ProductDistribution{N, 0, D, S, T} -const FillArrayOfUnivariateDistribution{N,D<:Fill{<:Any,N},S<:ValueSupport,T} = - ProductDistribution{N,0,D,S,T} +const FillArrayOfUnivariateDistribution{N, D <: Fill{<:Any, N}, S <: ValueSupport, T} = + ProductDistribution{N, 0, D, S, T} ## General definitions function Base.eltype( - ::Type{<:ProductDistribution{<:Any,<:Any,<:Any,<:ValueSupport,T}}, -) where {T} + ::Type{<:ProductDistribution{<:Any, <:Any, <:Any, <:ValueSupport, T}}, + ) where {T} return T end @@ -112,10 +112,10 @@ var(d::ArrayOfUnivariateDistribution) = map(var, d.dists) var(d::VectorOfUnivariateDistribution{<:Tuple}) = collect(map(var, d.dists)) function insupport( - d::ArrayOfUnivariateDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} - size(d) == size(x) && all(insupport(vi, xi) for (vi, xi) in zip(d.dists, x)) + d::ArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} + return size(d) == size(x) && all(insupport(vi, xi) for (vi, xi) in zip(d.dists, x)) end minimum(d::ArrayOfUnivariateDistribution) = map(minimum, d.dists) @@ -138,10 +138,10 @@ cov(d::ProductDistribution{2}, ::Val{false}) = reshape(cov(d), size(d)..., size( # `_rand!` for arrays of univariate distributions function _rand!( - rng::AbstractRNG, - d::ArrayOfUnivariateDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + rng::AbstractRNG, + d::ArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} @inbounds for (i, di) in zip(eachindex(x), d.dists) x[i] = rand(rng, di) end @@ -150,11 +150,11 @@ end # `_logpdf` for arrays of univariate distributions # we have to fix a method ambiguity -function _logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} +function _logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real, N}) where {N} return __logpdf(d, x) end _logpdf(d::MatrixOfUnivariateDistribution, x::AbstractMatrix{<:Real}) = __logpdf(d, x) -function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} +function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real, N}) where {N} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # without allocations to compute `sum(logpdf.(d.dists, x))` broadcasted = Broadcast.broadcasted(logpdf, d.dists, x) @@ -163,35 +163,35 @@ end # more efficient implementation of `_rand!` for `Fill` array of univariate distributions function _rand!( - rng::AbstractRNG, - d::FillArrayOfUnivariateDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + rng::AbstractRNG, + d::FillArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} return @inbounds rand!(rng, sampler(first(d.dists)), x) end # more efficient implementation of `_logpdf` for `Fill` array of univariate distributions # we have to fix a method ambiguity function _logpdf( - d::FillArrayOfUnivariateDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + d::FillArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} return __logpdf(d, x) end _logpdf(d::FillArrayOfUnivariateDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) function __logpdf( - d::FillArrayOfUnivariateDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + d::FillArrayOfUnivariateDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} return @inbounds loglikelihood(first(d.dists), x) end # `_rand! for arrays of distributions function _rand!( - rng::AbstractRNG, - d::ProductDistribution{N,M}, - A::AbstractArray{<:Real,N}, -) where {N,M} + rng::AbstractRNG, + d::ProductDistribution{N, M}, + A::AbstractArray{<:Real, N}, + ) where {N, M} @inbounds for (di, Ai) in zip(d.dists, eachvariate(A, ArrayLikeVariate{M})) rand!(rng, di, Ai) end @@ -200,9 +200,9 @@ end # `_logpdf` for arrays of distributions # we have to fix some method ambiguities -_logpdf(d::ProductDistribution{N}, x::AbstractArray{<:Real,N}) where {N} = __logpdf(d, x) +_logpdf(d::ProductDistribution{N}, x::AbstractArray{<:Real, N}) where {N} = __logpdf(d, x) _logpdf(d::ProductDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) -function __logpdf(d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}) where {N,M} +function __logpdf(d::ProductDistribution{N, M}, x::AbstractArray{<:Real, N}) where {N, M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` @inbounds broadcasted = @@ -212,26 +212,26 @@ end # more efficient implementation of `_rand!` for `Fill` arrays of distributions function _rand!( - rng::AbstractRNG, - d::ProductDistribution{N,M,<:Fill}, - A::AbstractArray{<:Real,N}, -) where {N,M} + rng::AbstractRNG, + d::ProductDistribution{N, M, <:Fill}, + A::AbstractArray{<:Real, N}, + ) where {N, M} @inbounds rand!(rng, sampler(first(d.dists)), A) return A end # more efficient implementation of `_logpdf` for `Fill` arrays of distributions # we have to fix a method ambiguity -function _logpdf(d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}) where {N,M} +function _logpdf(d::ProductDistribution{N, M, <:Fill}, x::AbstractArray{<:Real, N}) where {N, M} return __logpdf(d, x) end -function _logpdf(d::ProductDistribution{2,M,<:Fill}, x::AbstractMatrix{<:Real}) where {M} +function _logpdf(d::ProductDistribution{2, M, <:Fill}, x::AbstractMatrix{<:Real}) where {M} return __logpdf(d, x) end function __logpdf( - d::ProductDistribution{N,M,<:Fill}, - x::AbstractArray{<:Real,N}, -) where {N,M} + d::ProductDistribution{N, M, <:Fill}, + x::AbstractArray{<:Real, N}, + ) where {N, M} return @inbounds loglikelihood(first(d.dists), x) end @@ -249,9 +249,9 @@ function product_distribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVar end function product_distribution( - dist::Distribution{ArrayLikeVariate{N}}, - dists::Distribution{ArrayLikeVariate{N}}..., -) where {N} + dist::Distribution{ArrayLikeVariate{N}}, + dists::Distribution{ArrayLikeVariate{N}}..., + ) where {N} return ProductDistribution((dist, dists...)) end diff --git a/src/qq.jl b/src/qq.jl index d9744178c6..bd27521fec 100644 --- a/src/qq.jl +++ b/src/qq.jl @@ -1,11 +1,11 @@ -struct QQPair{U<:AbstractVector,V<:AbstractVector} +struct QQPair{U <: AbstractVector, V <: AbstractVector} qx::U qy::V end function qqbuild(x::AbstractVector, y::AbstractVector) n = min(length(x), length(y)) - grid = [0.0:(1/(n-1)):1.0;] + grid = [0.0:(1 / (n - 1)):1.0;] qx = quantile(x, grid) qy = quantile(y, grid) return QQPair(qx, qy) @@ -27,7 +27,7 @@ https://ch.mathworks.com/help/stats/probplot.html """ function ppoints(n::Int) m = 2 * n - return (1:2:(m-1)) ./ m + return (1:2:(m - 1)) ./ m end diff --git a/src/quantilealgs.jl b/src/quantilealgs.jl index 39f5c6f350..df01a20af0 100644 --- a/src/quantilealgs.jl +++ b/src/quantilealgs.jl @@ -1,11 +1,11 @@ # Various algorithms for computing quantile function quantile_bisect( - d::ContinuousUnivariateDistribution, - p::Real, - lx::T, - rx::T, -) where {T<:Real} + d::ContinuousUnivariateDistribution, + p::Real, + lx::T, + rx::T, + ) where {T <: Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` @@ -56,11 +56,11 @@ quantile_bisect(d::ContinuousUnivariateDistribution, p::Real) = # http://www.statsci.org/smyth/pubs/qinvgaussPreprint.pdf function quantile_newton( - d::ContinuousUnivariateDistribution, - p::Real, - xs::Real = mode(d), - tol::Real = 1e-12, -) + d::ContinuousUnivariateDistribution, + p::Real, + xs::Real = mode(d), + tol::Real = 1.0e-12, + ) x = xs + (p - cdf(d, xs)) / pdf(d, xs) T = typeof(x) if 0 < p < 1 @@ -80,11 +80,11 @@ function quantile_newton( end function cquantile_newton( - d::ContinuousUnivariateDistribution, - p::Real, - xs::Real = mode(d), - tol::Real = 1e-12, -) + d::ContinuousUnivariateDistribution, + p::Real, + xs::Real = mode(d), + tol::Real = 1.0e-12, + ) x = xs + (ccdf(d, xs) - p) / pdf(d, xs) T = typeof(x) if 0 < p < 1 @@ -104,11 +104,11 @@ function cquantile_newton( end function invlogcdf_newton( - d::ContinuousUnivariateDistribution, - lp::Real, - xs::Real = mode(d), - tol::Real = 1e-12, -) + d::ContinuousUnivariateDistribution, + lp::Real, + xs::Real = mode(d), + tol::Real = 1.0e-12, + ) T = typeof(lp - logpdf(d, xs)) if -Inf < lp < 0 x0 = T(xs) @@ -136,11 +136,11 @@ function invlogcdf_newton( end function invlogccdf_newton( - d::ContinuousUnivariateDistribution, - lp::Real, - xs::Real = mode(d), - tol::Real = 1e-12, -) + d::ContinuousUnivariateDistribution, + lp::Real, + xs::Real = mode(d), + tol::Real = 1.0e-12, + ) T = typeof(lp - logpdf(d, xs)) if -Inf < lp < 0 x0 = T(xs) @@ -170,10 +170,12 @@ end # A macro: specify that the quantile (and friends) of distribution D # is computed using the newton method macro quantile_newton(D) - esc(quote - quantile(d::$D, p::Real) = quantile_newton(d, p) - cquantile(d::$D, p::Real) = cquantile_newton(d, p) - invlogcdf(d::$D, lp::Real) = invlogcdf_newton(d, lp) - invlogccdf(d::$D, lp::Real) = invlogccdf_newton(d, lp) - end) + return esc( + quote + quantile(d::$D, p::Real) = quantile_newton(d, p) + cquantile(d::$D, p::Real) = cquantile_newton(d, p) + invlogcdf(d::$D, lp::Real) = invlogcdf_newton(d, lp) + invlogccdf(d::$D, lp::Real) = invlogccdf_newton(d, lp) + end + ) end diff --git a/src/reshaped.jl b/src/reshaped.jl index c1f492ce63..6c7b6c91ea 100644 --- a/src/reshaped.jl +++ b/src/reshaped.jl @@ -8,22 +8,22 @@ It is recommended to not use `reshape` instead of the constructor of `ReshapedDi directly since `reshape` can return more optimized distributions for specific types of `d` and number of dimensions `N`. """ -struct ReshapedDistribution{N,S<:ValueSupport,D<:Distribution{<:ArrayLikeVariate,S}} <: - Distribution{ArrayLikeVariate{N},S} +struct ReshapedDistribution{N, S <: ValueSupport, D <: Distribution{<:ArrayLikeVariate, S}} <: + Distribution{ArrayLikeVariate{N}, S} dist::D dims::Dims{N} function ReshapedDistribution( - dist::Distribution{<:ArrayLikeVariate,S}, - dims::Dims{N}, - ) where {N,S<:ValueSupport} + dist::Distribution{<:ArrayLikeVariate, S}, + dims::Dims{N}, + ) where {N, S <: ValueSupport} _reshape_check_dims(dist, dims) - return new{N,S,typeof(dist)}(dist, dims) + return new{N, S, typeof(dist)}(dist, dims) end end function _reshape_check_dims(dist::Distribution{<:ArrayLikeVariate}, dims::Dims) - (all(d > 0 for d in dims) && length(dist) == prod(dims)) || throw( + return (all(d > 0 for d in dims) && length(dist) == prod(dims)) || throw( ArgumentError( "dimensions $(dims) do not match size of source distribution $(size(dist))", ), @@ -31,12 +31,12 @@ function _reshape_check_dims(dist::Distribution{<:ArrayLikeVariate}, dims::Dims) end Base.size(d::ReshapedDistribution) = d.dims -Base.eltype(::Type{ReshapedDistribution{<:Any,<:ValueSupport,D}}) where {D} = eltype(D) +Base.eltype(::Type{ReshapedDistribution{<:Any, <:ValueSupport, D}}) where {D} = eltype(D) partype(d::ReshapedDistribution) = partype(d.dist) params(d::ReshapedDistribution) = (d.dist, d.dims) -function insupport(d::ReshapedDistribution{N}, x::AbstractArray{<:Real,N}) where {N} +function insupport(d::ReshapedDistribution{N}, x::AbstractArray{<:Real, N}) where {N} return size(d) == size(x) && insupport(d.dist, reshape(x, size(d.dist))) end @@ -55,9 +55,9 @@ rank(d::ReshapedDistribution{2}) = minimum(size(d)) # logpdf evaluation # have to fix method ambiguity due to default fallback for `MatrixDistribution`... -_logpdf(d::ReshapedDistribution{N}, x::AbstractArray{<:Real,N}) where {N} = __logpdf(d, x) +_logpdf(d::ReshapedDistribution{N}, x::AbstractArray{<:Real, N}) where {N} = __logpdf(d, x) _logpdf(d::ReshapedDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) -function __logpdf(d::ReshapedDistribution{N}, x::AbstractArray{<:Real,N}) where {N} +function __logpdf(d::ReshapedDistribution{N}, x::AbstractArray{<:Real, N}) where {N} dist = d.dist return @inbounds logpdf(dist, reshape(x, size(dist))) end @@ -65,9 +65,9 @@ end # loglikelihood # useful if the original distribution defined more optimized methods @inline function loglikelihood( - d::ReshapedDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + d::ReshapedDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} @boundscheck begin size(x) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -75,9 +75,9 @@ end return @inbounds loglikelihood(dist, reshape(x, size(dist))) end @inline function loglikelihood( - d::ReshapedDistribution{N}, - x::AbstractArray{<:Real,M}, -) where {N,M} + d::ReshapedDistribution{N}, + x::AbstractArray{<:Real, M}, + ) where {N, M} @boundscheck begin M > N || throw( DimensionMismatch( @@ -94,10 +94,10 @@ end # sampling function _rand!( - rng::AbstractRNG, - d::ReshapedDistribution{N}, - x::AbstractArray{<:Real,N}, -) where {N} + rng::AbstractRNG, + d::ReshapedDistribution{N}, + x::AbstractArray{<:Real, N}, + ) where {N} dist = d.dist @inbounds rand!(rng, dist, reshape(x, size(dist))) return x @@ -151,9 +151,9 @@ Base.vec(dist::Distribution{<:ArrayLikeVariate}) = reshape(dist, length(dist)) # avoid unnecessary wrappers function Base.reshape( - dist::ReshapedDistribution{<:Any,<:ValueSupport,<:MultivariateDistribution}, - dims::Tuple{Int}, -) + dist::ReshapedDistribution{<:Any, <:ValueSupport, <:MultivariateDistribution}, + dims::Tuple{Int}, + ) _reshape_check_dims(dist, dims) return dist.dist end diff --git a/src/samplers.jl b/src/samplers.jl index 21c954a6f7..2037147cac 100644 --- a/src/samplers.jl +++ b/src/samplers.jl @@ -15,19 +15,19 @@ Samples from the sampler and returns the result. rand(::AbstractRNG, ::Sampleable) for fname in [ - "aliastable.jl", - "binomial.jl", - "poissonbinomial.jl", - "poisson.jl", - "exponential.jl", - "gamma.jl", - "multinomial.jl", - "vonmises.jl", - "vonmisesfisher.jl", - "discretenonparametric.jl", - "categorical.jl", - "productnamedtuple.jl", -] + "aliastable.jl", + "binomial.jl", + "poissonbinomial.jl", + "poisson.jl", + "exponential.jl", + "gamma.jl", + "multinomial.jl", + "vonmises.jl", + "vonmisesfisher.jl", + "discretenonparametric.jl", + "categorical.jl", + "productnamedtuple.jl", + ] include(joinpath("samplers", fname)) end diff --git a/src/samplers/aliastable.jl b/src/samplers/aliastable.jl index 495d83f957..58a33e8e91 100644 --- a/src/samplers/aliastable.jl +++ b/src/samplers/aliastable.jl @@ -1,5 +1,5 @@ -struct AliasTable <: Sampleable{Univariate,Discrete} - at::AliasTables.AliasTable{UInt64,Int} +struct AliasTable <: Sampleable{Univariate, Discrete} + at::AliasTables.AliasTable{UInt64, Int} AliasTable(probs::AbstractVector{<:Real}) = new(AliasTables.AliasTable(probs)) end ncategories(s::AliasTable) = length(s.at) diff --git a/src/samplers/binomial.jl b/src/samplers/binomial.jl index 34b5167567..4770d6946b 100644 --- a/src/samplers/binomial.jl +++ b/src/samplers/binomial.jl @@ -1,4 +1,3 @@ - # compute probability vector of a Binomial distribution function binompvec(n::Int, p::Float64) pv = Vector{Float64}(undef, n + 1) @@ -7,13 +6,13 @@ function binompvec(n::Int, p::Float64) pv[1] = 1.0 elseif p == 1.0 fill!(pv, 0.0) - pv[n+1] = 1.0 + pv[n + 1] = 1.0 else q = 1.0 - p a = p / q @inbounds pv[1] = pk = q^n - for k = 1:n - @inbounds pv[k+1] = (pk *= ((n - k + 1) / k) * a) + for k in 1:n + @inbounds pv[k + 1] = (pk *= ((n - k + 1) / k) * a) end end return pv @@ -25,7 +24,7 @@ end # "Generating the maximum of independent identically distributed random variables" # Computers & Mathematics with Applications, Volume 6, Issue 3, 1980, Pages 305-315. # -struct BinomialGeomSampler <: Sampleable{Univariate,Discrete} +struct BinomialGeomSampler <: Sampleable{Univariate, Discrete} comp::Bool n::Int scale::Float64 @@ -41,7 +40,7 @@ function BinomialGeomSampler(n::Int, prob::Float64) comp = true scale = prob < 1.0 ? -1.0 / log(prob) : Inf end - BinomialGeomSampler(comp, n, scale) + return BinomialGeomSampler(comp, n, scale) end function rand(rng::AbstractRNG, s::BinomialGeomSampler) @@ -60,7 +59,7 @@ function rand(rng::AbstractRNG, s::BinomialGeomSampler) end x += 1 end - (s.comp ? s.n - x : x)::Int + return (s.comp ? s.n - x : x)::Int end @@ -73,7 +72,7 @@ end # Note: only use this sampler when n * min(p, 1-p) is large enough # e.g., it is greater than 20. # -struct BinomialTPESampler <: Sampleable{Univariate,Discrete} +struct BinomialTPESampler <: Sampleable{Univariate, Discrete} comp::Bool n::Int r::Float64 @@ -141,7 +140,7 @@ function BinomialTPESampler(n::Int, prob::Float64) p3 = p2 + c / λL p4 = p3 + c / λR - BinomialTPESampler(comp, n, r, q, nrq, M, Mi, p1, p2, p3, p4, xM, xL, xR, c, λL, λR) + return BinomialTPESampler(comp, n, r, q, nrq, M, Mi, p1, p2, p3, p4, xM, xL, xR, c, λL, λR) end function rand(rng::AbstractRNG, s::BinomialTPESampler) @@ -190,11 +189,11 @@ function rand(rng::AbstractRNG, s::BinomialTPESampler) a = S * (s.n + 1) F = 1.0 if s.Mi < y - for i = (s.Mi+1):y + for i in (s.Mi + 1):y F *= a / i - S end elseif s.Mi > y - for i = (y+1):s.Mi + for i in (y + 1):s.Mi F /= a / i - S end end @@ -224,14 +223,14 @@ function rand(rng::AbstractRNG, s::BinomialTPESampler) w = Float64(s.n - y + 1) if A > ( - s.xM * log(f1 / x1) + - ((s.n - s.Mi) + 0.5) * log(z / w) + - (y - s.Mi) * log(w * s.r / (x1 * s.q)) + - lstirling_asym(f1) + - lstirling_asym(z) + - lstirling_asym(x1) + - lstirling_asym(w) - ) + s.xM * log(f1 / x1) + + ((s.n - s.Mi) + 0.5) * log(z / w) + + (y - s.Mi) * log(w * s.r / (x1 * s.q)) + + lstirling_asym(f1) + + lstirling_asym(z) + + lstirling_asym(x1) + + lstirling_asym(w) + ) # Goto 1 continue end @@ -241,13 +240,13 @@ function rand(rng::AbstractRNG, s::BinomialTPESampler) end end # 6 - (s.comp ? s.n - y : y)::Int + return (s.comp ? s.n - y : y)::Int end # Constructing an alias table by directly computing the probability vector # -struct BinomialAliasSampler <: Sampleable{Univariate,Discrete} +struct BinomialAliasSampler <: Sampleable{Univariate, Discrete} table::AliasTable end @@ -260,7 +259,7 @@ rand(rng::AbstractRNG, s::BinomialAliasSampler) = rand(rng, s.table) - 1 # # It is important for type-stability # -mutable struct BinomialPolySampler <: Sampleable{Univariate,Discrete} +mutable struct BinomialPolySampler <: Sampleable{Univariate, Discrete} use_btpe::Bool geom_sampler::BinomialGeomSampler btpe_sampler::BinomialTPESampler @@ -277,7 +276,7 @@ function BinomialPolySampler(n::Int, p::Float64) geom_sampler = BinomialGeomSampler(n, p) btpe_sampler = BinomialTPESampler() end - BinomialPolySampler(use_btpe, geom_sampler, btpe_sampler) + return BinomialPolySampler(use_btpe, geom_sampler, btpe_sampler) end BinomialPolySampler(n::Real, p::Real) = BinomialPolySampler(round(Int, n), Float64(p)) diff --git a/src/samplers/categorical.jl b/src/samplers/categorical.jl index 287d0c87c1..5802ca7ded 100644 --- a/src/samplers/categorical.jl +++ b/src/samplers/categorical.jl @@ -1,17 +1,17 @@ #### naive sampling -struct CategoricalDirectSampler{T<:Real,Ts<:AbstractVector{T}} <: - Sampleable{Univariate,Discrete} +struct CategoricalDirectSampler{T <: Real, Ts <: AbstractVector{T}} <: + Sampleable{Univariate, Discrete} prob::Ts - function CategoricalDirectSampler{T,Ts}(p::Ts) where {T<:Real,Ts<:AbstractVector{T}} + function CategoricalDirectSampler{T, Ts}(p::Ts) where {T <: Real, Ts <: AbstractVector{T}} isempty(p) && throw(ArgumentError("p is empty.")) - new{T,Ts}(p) + return new{T, Ts}(p) end end -CategoricalDirectSampler(p::Ts) where {T<:Real,Ts<:AbstractVector{T}} = - CategoricalDirectSampler{T,Ts}(p) +CategoricalDirectSampler(p::Ts) where {T <: Real, Ts <: AbstractVector{T}} = + CategoricalDirectSampler{T, Ts}(p) ncategories(s::CategoricalDirectSampler) = length(s.prob) @@ -22,7 +22,7 @@ function rand(rng::AbstractRNG, s::CategoricalDirectSampler) c = p[1] u = rand(rng) while c < u && i < n - c += p[i+=1] + c += p[i += 1] end return i end diff --git a/src/samplers/discretenonparametric.jl b/src/samplers/discretenonparametric.jl index 3bc02bd8b5..f89c7839bb 100644 --- a/src/samplers/discretenonparametric.jl +++ b/src/samplers/discretenonparametric.jl @@ -4,24 +4,24 @@ Data structure for efficiently sampling from an arbitrary probability mass function defined by support `xs` and probabilities `ps`. """ -struct DiscreteNonParametricSampler{T<:Real,S<:AbstractVector{T},A<:AliasTable} <: - Sampleable{Univariate,Discrete} +struct DiscreteNonParametricSampler{T <: Real, S <: AbstractVector{T}, A <: AliasTable} <: + Sampleable{Univariate, Discrete} support::S aliastable::A - function DiscreteNonParametricSampler{T,S}( - support::S, - probs::AbstractVector{<:Real}, - ) where {T<:Real,S<:AbstractVector{T}} + function DiscreteNonParametricSampler{T, S}( + support::S, + probs::AbstractVector{<:Real}, + ) where {T <: Real, S <: AbstractVector{T}} aliastable = AliasTable(probs) - new{T,S,typeof(aliastable)}(support, aliastable) + return new{T, S, typeof(aliastable)}(support, aliastable) end end DiscreteNonParametricSampler( support::S, probs::AbstractVector{<:Real}, -) where {T<:Real,S<:AbstractVector{T}} = DiscreteNonParametricSampler{T,S}(support, probs) +) where {T <: Real, S <: AbstractVector{T}} = DiscreteNonParametricSampler{T, S}(support, probs) rand(rng::AbstractRNG, s::DiscreteNonParametricSampler) = (@inbounds v = s.support[rand(rng, s.aliastable)]; v) diff --git a/src/samplers/exponential.jl b/src/samplers/exponential.jl index ccb335ebc4..440a8b4dc0 100644 --- a/src/samplers/exponential.jl +++ b/src/samplers/exponential.jl @@ -1,10 +1,10 @@ -struct ExponentialSampler <: Sampleable{Univariate,Continuous} +struct ExponentialSampler <: Sampleable{Univariate, Continuous} scale::Float64 end rand(rng::AbstractRNG, s::ExponentialSampler) = s.scale * randexp(rng) -struct ExponentialLogUSampler <: Sampleable{Univariate,Continuous} +struct ExponentialLogUSampler <: Sampleable{Univariate, Continuous} scale::Float64 end diff --git a/src/samplers/gamma.jl b/src/samplers/gamma.jl index 6719eb51f4..652ba80fa1 100644 --- a/src/samplers/gamma.jl +++ b/src/samplers/gamma.jl @@ -1,4 +1,3 @@ - # "Generating gamma variates by a modified rejection technique" # J.H. Ahrens, U. Dieter # Communications of the ACM, Vol 25(1), 1982, pp 47-54 @@ -6,7 +5,7 @@ # suitable for shape >= 1.0 -struct GammaGDSampler{T<:Real} <: Sampleable{Univariate,Continuous} +struct GammaGDSampler{T <: Real} <: Sampleable{Univariate, Continuous} a::T s2::T s::T @@ -31,17 +30,17 @@ function GammaGDSampler(g::Gamma{T}) where {T} ia = 1.0 / a q0 = ia * @horner( - ia, - 0.0416666664, - 0.0208333723, - 0.0079849875, - 0.0015746717, - -0.0003349403, - 0.0003340332, - 0.0006053049, - -0.0004701849, - 0.0001710320 - ) + ia, + 0.0416666664, + 0.0208333723, + 0.0079849875, + 0.0015746717, + -0.0003349403, + 0.0003340332, + 0.0006053049, + -0.0004701849, + 0.000171032 + ) if a <= 3.686 b = 0.463 + s + 0.178s2 @@ -57,7 +56,7 @@ function GammaGDSampler(g::Gamma{T}) where {T} c = 0.1515 / s end - GammaGDSampler(T(a), T(s2), T(s), T(i2s), T(d), T(q0), T(b), T(σ), T(c), scale(g)) + return GammaGDSampler(T(a), T(s2), T(s), T(i2s), T(d), T(q0), T(b), T(σ), T(c), scale(g)) end function calc_q(s::GammaGDSampler, t) @@ -66,23 +65,23 @@ function calc_q(s::GammaGDSampler, t) return s.q0 - s.s * t + 0.25 * t * t + 2.0 * s.s2 * log1p(v) else return s.q0 + - 0.5 * - t * - t * - ( - v * @horner( - v, - 0.333333333, - -0.249999949, - 0.199999867, - -0.1666774828, - 0.142873973, - -0.124385581, - 0.110368310, - -0.112750886, - 0.10408986 - ) - ) + 0.5 * + t * + t * + ( + v * @horner( + v, + 0.333333333, + -0.249999949, + 0.199999867, + -0.1666774828, + 0.142873973, + -0.124385581, + 0.11036831, + -0.112750886, + 0.10408986 + ) + ) end end @@ -135,7 +134,7 @@ end # doi:10.1007/BF02293108 # valid for 0 < shape <= 1 -struct GammaGSSampler <: Sampleable{Univariate,Continuous} +struct GammaGSSampler <: Sampleable{Univariate, Continuous} a::Float64 ia::Float64 b::Float64 @@ -146,7 +145,7 @@ function GammaGSSampler(d::Gamma) a = shape(d) ia = 1.0 / a b = 1.0 + 0.36787944117144233 * a - GammaGSSampler(a, ia, b, scale(d)) + return GammaGSSampler(a, ia, b, scale(d)) end function rand(rng::AbstractRNG, s::GammaGSSampler) @@ -164,6 +163,7 @@ function rand(rng::AbstractRNG, s::GammaGSSampler) e < log(x) * (1.0 - s.a) || return s.scale * x end end + return end @@ -174,7 +174,7 @@ end # http://www.cparity.com/projects/AcmClassification/samples/358414.pdf # valid for shape >= 1 -struct GammaMTSampler{T<:Real} <: Sampleable{Univariate,Continuous} +struct GammaMTSampler{T <: Real} <: Sampleable{Univariate, Continuous} d::T c::T κ::T @@ -193,7 +193,7 @@ function GammaMTSampler(g::Gamma) return GammaMTSampler(promote(d, c, κ, 331 // 10_000)...) end -function rand(rng::AbstractRNG, s::GammaMTSampler{T}) where {T<:Real} +function rand(rng::AbstractRNG, s::GammaMTSampler{T}) where {T <: Real} d = s.d c = s.c κ = s.κ @@ -218,18 +218,19 @@ function rand(rng::AbstractRNG, s::GammaMTSampler{T}) where {T<:Real} return v * κ end end + return end # Inverse Power sampler # uses the x*u^(1/a) trick from Marsaglia and Tsang (2000) for when shape < 1 -struct GammaIPSampler{S<:Sampleable{Univariate,Continuous},T<:Real} <: - Sampleable{Univariate,Continuous} +struct GammaIPSampler{S <: Sampleable{Univariate, Continuous}, T <: Real} <: + Sampleable{Univariate, Continuous} s::S #sampler for Gamma(1+shape,scale) nia::T #-1/scale end GammaIPSampler(d::Gamma) = GammaIPSampler(d, GammaMTSampler) -function GammaIPSampler(d::Gamma, ::Type{S}) where {S<:Sampleable} +function GammaIPSampler(d::Gamma, ::Type{S}) where {S <: Sampleable} shape_d = shape(d) sampler = S(Gamma{partype(d)}(1 + shape_d, scale(d))) return GammaIPSampler(sampler, -inv(shape_d)) @@ -238,5 +239,5 @@ end function rand(rng::AbstractRNG, s::GammaIPSampler) x = rand(rng, s.s) e = randexp(rng, typeof(x)) - x * exp(s.nia * e) + return x * exp(s.nia * e) end diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index 61e570d492..499a8dd74a 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -1,9 +1,9 @@ function multinom_rand!( - rng::AbstractRNG, - n::Int, - p::AbstractVector{<:Real}, - x::AbstractVector{<:Real}, -) + rng::AbstractRNG, + n::Int, + p::AbstractVector{<:Real}, + x::AbstractVector{<:Real}, + ) k = length(p) length(x) == k || throw(DimensionMismatch("Invalid argument dimension.")) @@ -35,7 +35,7 @@ function multinom_rand!( @inbounds x[k] = n else # n must have been zero z = zero(eltype(x)) - for j = (i+1):k + for j in (i + 1):k @inbounds x[j] = z end end @@ -43,7 +43,7 @@ function multinom_rand!( return x end -struct MultinomialSampler{T<:Real} <: Sampleable{Multivariate,Discrete} +struct MultinomialSampler{T <: Real} <: Sampleable{Multivariate, Discrete} n::Int prob::Vector{T} alias::AliasTable @@ -62,7 +62,7 @@ function _rand!(rng::AbstractRNG, s::MultinomialSampler, x::AbstractVector{<:Rea # Use an alias table fill!(x, zero(eltype(x))) a = s.alias - for i = 1:n + for i in 1:n x[rand(rng, a)] += 1 end end diff --git a/src/samplers/obsoleted.jl b/src/samplers/obsoleted.jl index cfa28813a4..f5409eb519 100644 --- a/src/samplers/obsoleted.jl +++ b/src/samplers/obsoleted.jl @@ -7,19 +7,19 @@ ##### Draw Table ##### # Store an alias table -struct DiscreteDistributionTable <: Sampler{Univariate,Discrete} +struct DiscreteDistributionTable <: Sampler{Univariate, Discrete} table::Vector{Vector{Int64}} bounds::Vector{Int64} end # TODO: Test if bit operations can speed up Base64 mod's and fld's -function DiscreteDistributionTable(probs::Vector{T}) where {T<:Real} +function DiscreteDistributionTable(probs::Vector{T}) where {T <: Real} # Cache the cardinality of the outcome set n = length(probs) # Convert all Float64's into integers vals = Vector{Int64}(undef, n) - for i = 1:n + for i in 1:n vals[i] = round(Int, probs[i] * 64^9) end @@ -28,14 +28,14 @@ function DiscreteDistributionTable(probs::Vector{T}) where {T<:Real} bounds = zeros(Int64, 9) # Special case for deterministic distributions - for i = 1:n + for i in 1:n if vals[i] == 64^9 table[1] = Vector{Int64}(undef, 64) - for j = 1:64 + for j in 1:64 table[1][j] = i end bounds[1] = 64^9 - for j = 2:9 + for j in 2:9 table[j] = Vector{Int64}() bounds[j] = 64^9 end @@ -45,14 +45,14 @@ function DiscreteDistributionTable(probs::Vector{T}) where {T<:Real} # Fill tables multiplier = 1 - for index = 9:-1:1 + for index in 9:-1:1 counts = Vector{Int64}() - for i = 1:n + for i in 1:n digit = mod(vals[i], 64) # vals[i] = fld(vals[i], 64) vals[i] >>= 6 bounds[index] += digit - for itr = 1:digit + for itr in 1:digit push!(counts, i) end end @@ -70,7 +70,7 @@ end function rand(table::DiscreteDistributionTable) # 64^9 - 1 == 0x003fffffffffffff - i = rand(1:(64^9-1)) + i = rand(1:(64^9 - 1)) # if i == 64^9 # return table.table[9][rand(1:length(table.table[9]))] # end @@ -79,7 +79,7 @@ function rand(table::DiscreteDistributionTable) bound += 1 end if bound > 1 - index = fld(i - table.bounds[bound-1] - 1, 64^(9 - bound)) + 1 + index = fld(i - table.bounds[bound - 1] - 1, 64^(9 - bound)) + 1 else index = fld(i - 1, 64^(9 - bound)) + 1 end @@ -91,7 +91,7 @@ Base.show(io::IO, table::DiscreteDistributionTable) = @printf io "DiscreteDistri ##### Huffman Table ###### -abstract type HuffmanNode{T} <: Sampler{Univariate,Discrete} end +abstract type HuffmanNode{T} <: Sampler{Univariate, Discrete} end struct HuffmanLeaf{T} <: HuffmanNode{T} value::T @@ -118,13 +118,13 @@ function Base.getindex(h::HuffmanBranch{T}, u::UInt64) where {T} h = h.right end end - h.value + return h.value end # build the huffman tree # could be slightly more efficient using a Deque. function huffman(values::AbstractVector{T}, weights::AbstractVector{UInt64}) where {T} - leafs = [HuffmanLeaf{T}(values[i], weights[i]) for i = 1:length(weights)] + leafs = [HuffmanLeaf{T}(values[i], weights[i]) for i in 1:length(weights)] sort!(leafs; rev = true) branches = Vector{HuffmanBranch{T}}() @@ -139,7 +139,7 @@ function huffman(values::AbstractVector{T}, weights::AbstractVector{UInt64}) whe pushfirst!(branches, HuffmanBranch(left, right)) end - pop!(branches) + return pop!(branches) end function rand(h::HuffmanNode{T}) where {T} @@ -158,5 +158,5 @@ function rand(h::HuffmanNode{T}) where {T} end u = rem(u, w) end - h[u] + return h[u] end diff --git a/src/samplers/poisson.jl b/src/samplers/poisson.jl index 201c4b7ac3..56a725d591 100644 --- a/src/samplers/poisson.jl +++ b/src/samplers/poisson.jl @@ -1,10 +1,9 @@ - function poissonpvec(μ::Float64, n::Int) # Poisson probabilities, from 0 to n pv = Vector{Float64}(undef, n + 1) @inbounds pv[1] = p = exp(-μ) - for i = 1:n - @inbounds pv[i+1] = (p *= (μ / i)) + for i in 1:n + @inbounds pv[i + 1] = (p *= (μ / i)) end return pv end @@ -13,7 +12,7 @@ end # # Suitable for small μ # -struct PoissonCountSampler{T<:Real} <: Sampleable{Univariate,Discrete} +struct PoissonCountSampler{T <: Real} <: Sampleable{Univariate, Discrete} μ::T end @@ -39,7 +38,7 @@ end # # For μ sufficiently large, (i.e. >= 10.0) # -struct PoissonADSampler{T<:Real} <: Sampleable{Univariate,Discrete} +struct PoissonADSampler{T <: Real} <: Sampleable{Univariate, Discrete} μ::T s::T d::T @@ -53,7 +52,7 @@ function PoissonADSampler(μ::Real) d = 6 * μ^2 L = floor(Int, μ - 1.1484) - PoissonADSampler(promote(μ, s, d)..., L) + return PoissonADSampler(promote(μ, s, d)..., L) end function rand(rng::AbstractRNG, sampler::PoissonADSampler) @@ -106,6 +105,7 @@ function rand(rng::AbstractRNG, sampler::PoissonADSampler) return K end end + return end # Procedure F diff --git a/src/samplers/poissonbinomial.jl b/src/samplers/poissonbinomial.jl index 1c6b815026..6e9c45dc96 100644 --- a/src/samplers/poissonbinomial.jl +++ b/src/samplers/poissonbinomial.jl @@ -1,4 +1,4 @@ -struct PoissBinAliasSampler <: Sampleable{Univariate,Discrete} +struct PoissBinAliasSampler <: Sampleable{Univariate, Discrete} table::AliasTable end diff --git a/src/samplers/productnamedtuple.jl b/src/samplers/productnamedtuple.jl index 3d07fec0e1..2fefd68046 100644 --- a/src/samplers/productnamedtuple.jl +++ b/src/samplers/productnamedtuple.jl @@ -1,6 +1,6 @@ -struct ProductNamedTupleSampler{Tnames,Tsamplers,S<:ValueSupport} <: - Sampleable{NamedTupleVariate{Tnames},S} - samplers::NamedTuple{Tnames,Tsamplers} +struct ProductNamedTupleSampler{Tnames, Tsamplers, S <: ValueSupport} <: + Sampleable{NamedTupleVariate{Tnames}, S} + samplers::NamedTuple{Tnames, Tsamplers} end function Base.rand(rng::AbstractRNG, spl::ProductNamedTupleSampler{K}) where {K} @@ -14,10 +14,10 @@ function _rand(rng::AbstractRNG, spl::ProductNamedTupleSampler, dims::Dims) end function _rand!( - rng::AbstractRNG, - spl::ProductNamedTupleSampler, - xs::AbstractArray{<:NamedTuple{K}}, -) where {K} + rng::AbstractRNG, + spl::ProductNamedTupleSampler, + xs::AbstractArray{<:NamedTuple{K}}, + ) where {K} for i in eachindex(xs) xs[i] = NamedTuple{K}(rand(rng, spl)) end diff --git a/src/samplers/vonmises.jl b/src/samplers/vonmises.jl index b3d751b0fa..ec738726b3 100644 --- a/src/samplers/vonmises.jl +++ b/src/samplers/vonmises.jl @@ -1,5 +1,4 @@ - -struct VonMisesSampler <: Sampleable{Univariate,Continuous} +struct VonMisesSampler <: Sampleable{Univariate, Continuous} μ::Float64 κ::Float64 r::Float64 @@ -7,7 +6,7 @@ struct VonMisesSampler <: Sampleable{Univariate,Continuous} function VonMisesSampler(μ::Float64, κ::Float64) τ = 1.0 + sqrt(1.0 + 4 * abs2(κ)) ρ = (τ - sqrt(2.0 * τ)) / (2.0 * κ) - new(μ, κ, (1.0 + abs2(ρ)) / (2.0 * ρ)) + return new(μ, κ, (1.0 + abs2(ρ)) / (2.0 * ρ)) end end diff --git a/src/samplers/vonmisesfisher.jl b/src/samplers/vonmisesfisher.jl index dabad736f5..1d1a785536 100644 --- a/src/samplers/vonmisesfisher.jl +++ b/src/samplers/vonmisesfisher.jl @@ -1,5 +1,5 @@ # Sampler for von Mises-Fisher -struct VonMisesFisherSampler <: Sampleable{Multivariate,Continuous} +struct VonMisesFisherSampler <: Sampleable{Multivariate, Continuous} p::Int # the dimension κ::Float64 b::Float64 @@ -14,7 +14,7 @@ function VonMisesFisherSampler(μ::Vector{Float64}, κ::Float64) x0 = (1.0 - b) / (1.0 + b) c = κ * x0 + (p - 1) * log1p(-abs2(x0)) v = _vmf_householder_vec(μ) - VonMisesFisherSampler(p, κ, b, x0, c, v) + return VonMisesFisherSampler(p, κ, b, x0, c, v) end Base.length(s::VonMisesFisherSampler) = length(s.v) @@ -32,14 +32,14 @@ function _rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector) p = spl.p x[1] = w s = 0.0 - @inbounds for i = 2:p + @inbounds for i in 2:p x[i] = xi = randn(rng) s += abs2(xi) end # normalize x[2:p] r = sqrt((1.0 - abs2(w)) / s) - @inbounds for i = 2:p + @inbounds for i in 2:p x[i] *= r end @@ -94,7 +94,7 @@ function _vmf_householder_vec(μ::Vector{Float64}) s = sqrt(-2 * v[1]) v[1] /= s - @inbounds for i = 2:p + @inbounds for i in 2:p v[i] = μ[i] / s end diff --git a/src/show.jl b/src/show.jl index 5db81a5fe1..67c0afe7e5 100644 --- a/src/show.jl +++ b/src/show.jl @@ -1,5 +1,3 @@ - - # the name of a distribution # # Generally, this should be just the type name, e.g. Normal. @@ -17,10 +15,10 @@ show(io::IO, d::Distribution) = show(io, d, fieldnames(typeof(d))) # function show(io::IO, d::Distribution, pnames) uml, namevals = _use_multline_show(d, pnames) - uml ? show_multline(io, d, namevals) : show_oneline(io, d, namevals) + return uml ? show_multline(io, d, namevals) : show_oneline(io, d, namevals) end -const _NameVal = Tuple{Symbol,Any} +const _NameVal = Tuple{Symbol, Any} function _use_multline_show(d::Distribution, pnames) # decide whether to use one-line or multi-line format @@ -48,7 +46,7 @@ function _use_multline_show(d::Distribution, pnames) end function _use_multline_show(d::Distribution) - _use_multline_show(d, fieldnames(typeof(d))) + return _use_multline_show(d, fieldnames(typeof(d))) end function show_oneline(io::IO, d::Distribution, namevals) @@ -64,7 +62,7 @@ function show_oneline(io::IO, d::Distribution, namevals) print(io, ", ") end end - print(io, ')') + return print(io, ')') end function show_multline(io::IO, d::Distribution, namevals; newline = true) @@ -75,5 +73,5 @@ function show_multline(io::IO, d::Distribution, namevals; newline = true) print(io, ": ") println(io, pv) end - newline ? println(io, ")") : print(io, ")") + return newline ? println(io, ")") : print(io, ")") end diff --git a/src/statsapi.jl b/src/statsapi.jl index cabf6928fe..93eae67fa3 100644 --- a/src/statsapi.jl +++ b/src/statsapi.jl @@ -1,16 +1,16 @@ function _check_tail(tail::Symbol) - if tail !== :both && tail !== :left && tail !== :right + return if tail !== :both && tail !== :left && tail !== :right throw(ArgumentError("`tail=$(repr(tail))` is invalid")) end end function StatsAPI.pvalue( - dist::DiscreteUnivariateDistribution, - x::Number; - tail::Symbol = :both, -) + dist::DiscreteUnivariateDistribution, + x::Number; + tail::Symbol = :both, + ) _check_tail(tail) - if tail === :both + return if tail === :both p = 2 * min(ccdf(dist, x - 1), cdf(dist, x)) min(p, oneunit(p)) # if P(X = x) > 0, then possibly p > 1 elseif tail === :left @@ -21,12 +21,12 @@ function StatsAPI.pvalue( end function StatsAPI.pvalue( - dist::ContinuousUnivariateDistribution, - x::Number; - tail::Symbol = :both, -) + dist::ContinuousUnivariateDistribution, + x::Number; + tail::Symbol = :both, + ) _check_tail(tail) - if tail === :both + return if tail === :both p = 2 * min(cdf(dist, x), ccdf(dist, x)) min(p, oneunit(p)) # if P(X = x) > 0, then possibly p > 1 elseif tail === :left diff --git a/src/test_utils.jl b/src/test_utils.jl index 63a3a0b0b2..851cd6f2e2 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -7,9 +7,9 @@ function test_mvnormal end if isdefined(Base, :get_extension) && isdefined(Base.Experimental, :register_error_hint) function __init__() # Better error message if users forget to load Test - Base.Experimental.register_error_hint(MethodError) do io, exc, _, _ + return Base.Experimental.register_error_hint(MethodError) do io, exc, _, _ if exc.f === test_mvnormal && - (Base.get_extension(Distributions, :DistributionsTestExt) === nothing) + (Base.get_extension(Distributions, :DistributionsTestExt) === nothing) print(io, "\nDid you forget to load Test?") end end diff --git a/src/truncate.jl b/src/truncate.jl index 8118cce148..3062372e5c 100644 --- a/src/truncate.jl +++ b/src/truncate.jl @@ -34,10 +34,10 @@ function truncated(d::UnivariateDistribution, l::Real, u::Real) return truncated(d, promote(l, u)...) end function truncated( - d::UnivariateDistribution; - lower::Union{Real,Nothing} = nothing, - upper::Union{Real,Nothing} = nothing, -) + d::UnivariateDistribution; + lower::Union{Real, Nothing} = nothing, + upper::Union{Real, Nothing} = nothing, + ) return truncated(d, lower, upper) end function truncated(d::UnivariateDistribution, ::Nothing, u::Real) @@ -45,7 +45,7 @@ function truncated(d::UnivariateDistribution, ::Nothing, u::Real) logucdf = logtp = logcdf(d, u) ucdf = tp = exp(logucdf) - Truncated(d, nothing, promote(u, oftype(ucdf, -Inf), zero(ucdf), ucdf, tp, logtp)...) + return Truncated(d, nothing, promote(u, oftype(ucdf, -Inf), zero(ucdf), ucdf, tp, logtp)...) end function truncated(d::UnivariateDistribution, l::Real, ::Nothing) # (log)lcdf = (log) P(X < l) where X ~ d @@ -57,10 +57,10 @@ function truncated(d::UnivariateDistribution, l::Real, ::Nothing) tp = exp(logtp) l, loglcdf, lcdf, ucdf, tp, logtp = promote(l, loglcdf, lcdf, one(lcdf), tp, logtp) - Truncated(d, l, nothing, loglcdf, lcdf, ucdf, tp, logtp) + return Truncated(d, l, nothing, loglcdf, lcdf, ucdf, tp, logtp) end truncated(d::UnivariateDistribution, ::Nothing, ::Nothing) = d -function truncated(d::UnivariateDistribution, l::T, u::T) where {T<:Real} +function truncated(d::UnivariateDistribution, l::T, u::T) where {T <: Real} l <= u || error("the lower bound must be less or equal than the upper bound") # (log)lcdf = (log) P(X < l) where X ~ d @@ -75,7 +75,7 @@ function truncated(d::UnivariateDistribution, l::T, u::T) where {T<:Real} logtp = logsubexp(loglcdf, logucdf) tp = exp(logtp) - Truncated(d, promote(l, u, loglcdf, lcdf, ucdf, tp, logtp)...) + return Truncated(d, promote(l, u, loglcdf, lcdf, ucdf, tp, logtp)...) end """ @@ -87,12 +87,12 @@ The *truncated normal distribution* is a particularly important one in the famil Unlike the general case, truncated normal distributions support `mean`, `mode`, `modes`, `var`, `std`, and `entropy`. """ struct Truncated{ - D<:UnivariateDistribution, - S<:ValueSupport, - T<:Real, - TL<:Union{T,Nothing}, - TU<:Union{T,Nothing}, -} <: UnivariateDistribution{S} + D <: UnivariateDistribution, + S <: ValueSupport, + T <: Real, + TL <: Union{T, Nothing}, + TU <: Union{T, Nothing}, + } <: UnivariateDistribution{S} untruncated::D # the original distribution (untruncated) lower::TL # lower bound upper::TU # upper bound @@ -104,16 +104,16 @@ struct Truncated{ logtp::T # log(tp), i.e. log(ucdf - lcdf) function Truncated( - d::UnivariateDistribution, - l::TL, - u::TU, - loglcdf::T, - lcdf::T, - ucdf::T, - tp::T, - logtp::T, - ) where {T<:Real,TL<:Union{T,Nothing},TU<:Union{T,Nothing}} - new{typeof(d),value_support(typeof(d)),T,TL,TU}( + d::UnivariateDistribution, + l::TL, + u::TU, + loglcdf::T, + lcdf::T, + ucdf::T, + tp::T, + logtp::T, + ) where {T <: Real, TL <: Union{T, Nothing}, TU <: Union{T, Nothing}} + return new{typeof(d), value_support(typeof(d)), T, TL, TU}( d, l, u, @@ -126,10 +126,10 @@ struct Truncated{ end end -const LeftTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = - Truncated{D,S,T,T,Nothing} -const RightTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = - Truncated{D,S,T,Nothing,T} +const LeftTruncated{D <: UnivariateDistribution, S <: ValueSupport, T <: Real} = + Truncated{D, S, T, T, Nothing} +const RightTruncated{D <: UnivariateDistribution, S <: ValueSupport, T <: Real} = + Truncated{D, S, T, Nothing, T} ### Constructors of `Truncated` are deprecated - users should call `truncated` @deprecate Truncated(d::UnivariateDistribution, l::Real, u::Real) truncated(d, l, u) @@ -141,9 +141,9 @@ const RightTruncated{D<:UnivariateDistribution,S<:ValueSupport,T<:Real} = ucdf::T, tp::T, logtp::T, -) where {T<:Real} Truncated(d, l, u, log(lcdf), lcdf, ucdf, tp, logtp) +) where {T <: Real} Truncated(d, l, u, log(lcdf), lcdf, ucdf, tp, logtp) -function truncated(d::Truncated, l::T, u::T) where {T<:Real} +function truncated(d::Truncated, l::T, u::T) where {T <: Real} return truncated( d.untruncated, d.lower === nothing ? l : max(l, d.lower), @@ -158,10 +158,10 @@ function truncated(d::Truncated, l::Real, ::Nothing) end params(d::Truncated) = tuple(params(d.untruncated)..., d.lower, d.upper) -partype(d::Truncated{<:UnivariateDistribution,<:ValueSupport,T}) where {T<:Real} = +partype(d::Truncated{<:UnivariateDistribution, <:ValueSupport, T}) where {T <: Real} = promote_type(partype(d.untruncated), T) -Base.eltype(::Type{<:Truncated{D}}) where {D<:UnivariateDistribution} = eltype(D) +Base.eltype(::Type{<:Truncated{D}}) where {D <: UnivariateDistribution} = eltype(D) Base.eltype(d::Truncated) = eltype(d.untruncated) ### range and support @@ -179,9 +179,9 @@ maximum(d::LeftTruncated) = maximum(d.untruncated) maximum(d::Truncated) = min(maximum(d.untruncated), d.upper) function insupport( - d::Truncated{<:UnivariateDistribution,<:Union{Discrete,Continuous}}, - x::Real, -) + d::Truncated{<:UnivariateDistribution, <:Union{Discrete, Continuous}}, + x::Real, + ) return _in_closed_interval(x, d.lower, d.upper) && insupport(d.untruncated, x) end @@ -285,7 +285,7 @@ function show(io::IO, d::Truncated) else print(io, "; lower=$(d.lower), upper=$(d.upper))") end - uml && println(io) + return uml && println(io) end _use_multline_show(d::Truncated) = _use_multline_show(d.untruncated) diff --git a/src/truncated/discrete_uniform.jl b/src/truncated/discrete_uniform.jl index 6c54e5bca5..a22ac390d6 100644 --- a/src/truncated/discrete_uniform.jl +++ b/src/truncated/discrete_uniform.jl @@ -2,7 +2,7 @@ ##### Shortcut for truncating discrete uniform distributions. ##### -function truncated(d::DiscreteUniform, l::T, u::T) where {T<:Real} +function truncated(d::DiscreteUniform, l::T, u::T) where {T <: Real} a = ceil(Int, max(l, d.a)) b = floor(Int, min(u, d.b)) return DiscreteUniform(a, b) diff --git a/src/truncated/exponential.jl b/src/truncated/exponential.jl index 2af92ab080..ca3227f9fb 100644 --- a/src/truncated/exponential.jl +++ b/src/truncated/exponential.jl @@ -2,10 +2,10 @@ ##### Truncated exponential distribution ##### -function mean(d::Truncated{<:Exponential,Continuous}) +function mean(d::Truncated{<:Exponential, Continuous}) θ = d.untruncated.θ l, r = extrema(d) # l is always finite - if isfinite(r) + return if isfinite(r) if abs(l - r) ≤ √eps() * θ # linear is a good approximation, just take the midpoint middle(l, r) diff --git a/src/truncated/loguniform.jl b/src/truncated/loguniform.jl index 6743e44fc3..ea58cfdde0 100644 --- a/src/truncated/loguniform.jl +++ b/src/truncated/loguniform.jl @@ -1,4 +1,4 @@ -truncated(d::LogUniform, lo::T, hi::T) where {T<:Real} = +truncated(d::LogUniform, lo::T, hi::T) where {T <: Real} = LogUniform(max(d.a, lo), min(d.b, hi)) truncated(d::LogUniform, lo::Real, ::Nothing) = LogUniform(max(d.a, lo), d.b) truncated(d::LogUniform, ::Nothing, hi::Real) = LogUniform(d.a, min(d.b, hi)) diff --git a/src/truncated/normal.jl b/src/truncated/normal.jl index 9dab23d902..93f79f6eda 100644 --- a/src/truncated/normal.jl +++ b/src/truncated/normal.jl @@ -1,11 +1,11 @@ ### statistics -function mode(d::Truncated{<:Normal{<:Real},Continuous,T}) where {T<:Real} +function mode(d::Truncated{<:Normal{<:Real}, Continuous, T}) where {T <: Real} μ = mean(d.untruncated) return T(clamp(μ, extrema(d)...)) end -modes(d::Truncated{<:Normal{<:Real},Continuous}) = [mode(d)] +modes(d::Truncated{<:Normal{<:Real}, Continuous}) = [mode(d)] # do not export. Used in mean # computes mean of standard normal distribution truncated to [a, b] @@ -77,7 +77,7 @@ function _tnvar(a::Real, b::Real) end end -function mean(d::Truncated{<:Normal{<:Real},Continuous,T}) where {T<:Real} +function mean(d::Truncated{<:Normal{<:Real}, Continuous, T}) where {T <: Real} d0 = d.untruncated μ = mean(d0) σ = std(d0) @@ -91,7 +91,7 @@ function mean(d::Truncated{<:Normal{<:Real},Continuous,T}) where {T<:Real} end end -function var(d::Truncated{<:Normal{<:Real},Continuous,T}) where {T<:Real} +function var(d::Truncated{<:Normal{<:Real}, Continuous, T}) where {T <: Real} d0 = d.untruncated μ = mean(d0) σ = std(d0) @@ -105,7 +105,7 @@ function var(d::Truncated{<:Normal{<:Real},Continuous,T}) where {T<:Real} end end -function entropy(d::Truncated{<:Normal{<:Real},Continuous}) +function entropy(d::Truncated{<:Normal{<:Real}, Continuous}) d0 = d.untruncated z = d.tp μ = mean(d0) @@ -115,7 +115,7 @@ function entropy(d::Truncated{<:Normal{<:Real},Continuous}) b = (upper - μ) / σ aφa = isinf(a) ? 0.0 : a * normpdf(a) bφb = isinf(b) ? 0.0 : b * normpdf(b) - 0.5 * (log2π + 1.0) + log(σ * z) + (aφa - bφb) / (2.0 * z) + return 0.5 * (log2π + 1.0) + log(σ * z) + (aφa - bφb) / (2.0 * z) end @@ -124,7 +124,7 @@ end ## Use specialized sampler, as quantile-based method is inaccurate in ## tail regions of the Normal, issue #343 -function rand(rng::AbstractRNG, d::Truncated{<:Normal{<:Real},Continuous}) +function rand(rng::AbstractRNG, d::Truncated{<:Normal{<:Real}, Continuous}) d0 = d.untruncated μ = mean(d0) σ = std(d0) @@ -155,7 +155,7 @@ function randnt(rng::AbstractRNG, lb::Float64, ub::Float64, tp::Float64) else span = ub - lb if lb > 0 && - span > 2.0 / (lb + sqrt(lb^2 + 4.0)) * exp((lb^2 - lb * sqrt(lb^2 + 4.0)) / 4.0) + span > 2.0 / (lb + sqrt(lb^2 + 4.0)) * exp((lb^2 - lb * sqrt(lb^2 + 4.0)) / 4.0) a = (lb + sqrt(lb^2 + 4.0)) / 2.0 while true r = rand(rng, Exponential(1.0 / a)) + lb @@ -165,8 +165,8 @@ function randnt(rng::AbstractRNG, lb::Float64, ub::Float64, tp::Float64) end end elseif ub < 0 && - ub - lb > - 2.0 / (-ub + sqrt(ub^2 + 4.0)) * exp((ub^2 + ub * sqrt(ub^2 + 4.0)) / 4.0) + ub - lb > + 2.0 / (-ub + sqrt(ub^2 + 4.0)) * exp((ub^2 + ub * sqrt(ub^2 + 4.0)) / 4.0) a = (-ub + sqrt(ub^2 + 4.0)) / 2.0 while true r = rand(rng, Exponential(1.0 / a)) - ub diff --git a/src/truncated/uniform.jl b/src/truncated/uniform.jl index cf67fc8e5a..bb56609928 100644 --- a/src/truncated/uniform.jl +++ b/src/truncated/uniform.jl @@ -2,6 +2,6 @@ ##### Shortcut for truncating uniform distributions. ##### -truncated(d::Uniform, l::T, u::T) where {T<:Real} = Uniform(max(l, d.a), min(u, d.b)) +truncated(d::Uniform, l::T, u::T) where {T <: Real} = Uniform(max(l, d.a), min(u, d.b)) truncated(d::Uniform, l::Real, ::Nothing) = Uniform(max(l, d.a), d.b) truncated(d::Uniform, ::Nothing, u::Real) = Uniform(d.a, min(u, d.b)) diff --git a/src/univariate/continuous/arcsine.jl b/src/univariate/continuous/arcsine.jl index 8e802a7ae6..3858510a81 100644 --- a/src/univariate/continuous/arcsine.jl +++ b/src/univariate/continuous/arcsine.jl @@ -25,13 +25,13 @@ External links Use `Arcsine(a, b, check_args=false)` to bypass argument checks. """ -struct Arcsine{T<:Real} <: ContinuousUnivariateDistribution +struct Arcsine{T <: Real} <: ContinuousUnivariateDistribution a::T b::T - Arcsine{T}(a::T, b::T) where {T<:Real} = new{T}(a, b) + Arcsine{T}(a::T, b::T) where {T <: Real} = new{T}(a, b) end -function Arcsine(a::T, b::T; check_args::Bool = true) where {T<:Real} +function Arcsine(a::T, b::T; check_args::Bool = true) where {T <: Real} @check_args Arcsine a < b return Arcsine{T}(a, b) end @@ -46,11 +46,11 @@ Arcsine() = Arcsine{Float64}(0.0, 1.0) @distr_support Arcsine d.a d.b #### Conversions -function convert(::Type{Arcsine{T}}, a::Real, b::Real) where {T<:Real} - Arcsine(T(a), T(b)) +function convert(::Type{Arcsine{T}}, a::Real, b::Real) where {T <: Real} + return Arcsine(T(a), T(b)) end -Base.convert(::Type{Arcsine{T}}, d::Arcsine) where {T<:Real} = Arcsine{T}(T(d.a), T(d.b)) -Base.convert(::Type{Arcsine{T}}, d::Arcsine{T}) where {T<:Real} = d +Base.convert(::Type{Arcsine{T}}, d::Arcsine) where {T <: Real} = Arcsine{T}(T(d.a), T(d.b)) +Base.convert(::Type{Arcsine{T}}, d::Arcsine{T}) where {T <: Real} = d ### Parameters @@ -77,20 +77,20 @@ entropy(d::Arcsine) = -0.24156447527049044469 + log(scale(d)) ### Evaluation function pdf(d::Arcsine, x::Real) - insupport(d, x) ? one(d.a) / (π * sqrt((x - d.a) * (d.b - x))) : zero(d.a) + return insupport(d, x) ? one(d.a) / (π * sqrt((x - d.a) * (d.b - x))) : zero(d.a) end -function logpdf(d::Arcsine{T}, x::Real) where {T<:Real} - insupport(d, x) ? -(logπ + log((x - d.a) * (d.b - x)) / 2) : -T(Inf) +function logpdf(d::Arcsine{T}, x::Real) where {T <: Real} + return insupport(d, x) ? -(logπ + log((x - d.a) * (d.b - x)) / 2) : -T(Inf) end -cdf(d::Arcsine{T}, x::Real) where {T<:Real} = +cdf(d::Arcsine{T}, x::Real) where {T <: Real} = x < d.a ? zero(T) : x > d.b ? one(T) : 0.636619772367581343 * asin(sqrt((x - d.a) / (d.b - d.a))) quantile(d::Arcsine, p::Real) = location(d) + abs2(sin(halfπ * p)) * scale(d) -function gradlogpdf(d::Arcsine{T}, x::R) where {T,R<:Real} +function gradlogpdf(d::Arcsine{T}, x::R) where {T, R <: Real} TP = promote_type(T, R) (a, b) = extrema(d) # on the bounds, we consider the gradient limit inside the domain diff --git a/src/univariate/continuous/beta.jl b/src/univariate/continuous/beta.jl index 1dd9635545..cf7aec24a5 100644 --- a/src/univariate/continuous/beta.jl +++ b/src/univariate/continuous/beta.jl @@ -26,13 +26,13 @@ External links * [Beta distribution on Wikipedia](http://en.wikipedia.org/wiki/Beta_distribution) """ -struct Beta{T<:Real} <: ContinuousUnivariateDistribution +struct Beta{T <: Real} <: ContinuousUnivariateDistribution α::T β::T Beta{T}(α::T, β::T) where {T} = new{T}(α, β) end -function Beta(α::T, β::T; check_args::Bool = true) where {T<:Real} +function Beta(α::T, β::T; check_args::Bool = true) where {T <: Real} @check_args Beta (α, α > zero(α)) (β, β > zero(β)) return Beta{T}(α, β) end @@ -43,23 +43,23 @@ Beta(α::Integer, β::Integer; check_args::Bool = true) = Beta(float(α), float(β); check_args = check_args) function Beta(α::Real; check_args::Bool = true) @check_args Beta (α, α > zero(α)) - Beta(α, α; check_args = false) + return Beta(α, α; check_args = false) end Beta() = Beta{Float64}(1.0, 1.0) @distr_support Beta 0.0 1.0 #### Conversions -function convert(::Type{Beta{T}}, α::Real, β::Real) where {T<:Real} - Beta(T(α), T(β)) +function convert(::Type{Beta{T}}, α::Real, β::Real) where {T <: Real} + return Beta(T(α), T(β)) end -Base.convert(::Type{Beta{T}}, d::Beta) where {T<:Real} = Beta{T}(T(d.α), T(d.β)) -Base.convert(::Type{Beta{T}}, d::Beta{T}) where {T<:Real} = d +Base.convert(::Type{Beta{T}}, d::Beta) where {T <: Real} = Beta{T}(T(d.α), T(d.β)) +Base.convert(::Type{Beta{T}}, d::Beta{T}) where {T <: Real} = d #### Parameters params(d::Beta) = (d.α, d.β) -@inline partype(d::Beta{T}) where {T<:Real} = T +@inline partype(d::Beta{T}) where {T <: Real} = T #### Statistics @@ -103,39 +103,39 @@ function kurtosis(d::Beta) α, β = params(d) s = α + β p = α * β - 6(abs2(α - β) * (s + 1) - p * (s + 2)) / (p * (s + 2) * (s + 3)) + return 6(abs2(α - β) * (s + 1) - p * (s + 2)) / (p * (s + 2) * (s + 3)) end function entropy(d::Beta) α, β = params(d) s = α + β - logbeta(α, β) - (α - 1) * digamma(α) - (β - 1) * digamma(β) + (s - 2) * digamma(s) + return logbeta(α, β) - (α - 1) * digamma(α) - (β - 1) * digamma(β) + (s - 2) * digamma(s) end function kldivergence(p::Beta, q::Beta) αp, βp = params(p) αq, βq = params(q) return logbeta(αq, βq) - logbeta(αp, βp) + - (αp - αq) * digamma(αp) + - (βp - βq) * digamma(βp) + - (αq - αp + βq - βp) * digamma(αp + βp) + (αp - αq) * digamma(αp) + + (βp - βq) * digamma(βp) + + (αq - αp + βq - βp) * digamma(αp + βp) end #### Evaluation @_delegate_statsfuns Beta beta α β -gradlogpdf(d::Beta{T}, x::Real) where {T<:Real} = +gradlogpdf(d::Beta{T}, x::Real) where {T <: Real} = ((α, β) = params(d); 0 <= x <= 1 ? (α - 1) / x - (β - 1) / (1 - x) : zero(T)) #### Sampling struct BetaSampler{ - T<:Real, - S1<:Sampleable{Univariate,Continuous}, - S2<:Sampleable{Univariate,Continuous}, -} <: Sampleable{Univariate,Continuous} + T <: Real, + S1 <: Sampleable{Univariate, Continuous}, + S2 <: Sampleable{Univariate, Continuous}, + } <: Sampleable{Univariate, Continuous} γ::Bool iα::T iβ::T @@ -217,13 +217,12 @@ function rand(rng::AbstractRNG, d::Beta{T}) where {T} end - function fit_mle( - ::Type{<:Beta}, - x::AbstractArray{T}; - maxiter::Int = 1000, - tol::Float64 = 1e-14, -) where {T<:Real} + ::Type{<:Beta}, + x::AbstractArray{T}; + maxiter::Int = 1000, + tol::Float64 = 1.0e-14, + ) where {T <: Real} α₀, β₀ = params(fit(Beta, x)) #initial guess of parameters g₁ = mean(log.(x)) @@ -241,8 +240,8 @@ function fit_mle( temp1 + g₂ - digamma(θ[2]) ] hess = [ - temp2-trigamma(θ[1]) temp2 - temp2 temp2-trigamma(θ[2]) + temp2 - trigamma(θ[1]) temp2 + temp2 temp2 - trigamma(θ[2]) ] Δθ = hess \ grad #newton step θ .-= Δθ @@ -253,8 +252,7 @@ function fit_mle( end - -function fit(::Type{<:Beta}, x::AbstractArray{T}) where {T<:Real} +function fit(::Type{<:Beta}, x::AbstractArray{T}) where {T <: Real} x_bar = mean(x) v_bar = varm(x, x_bar) temp = ((x_bar * (one(T) - x_bar)) / v_bar) - one(T) diff --git a/src/univariate/continuous/betaprime.jl b/src/univariate/continuous/betaprime.jl index 28bd276349..f85922aca2 100644 --- a/src/univariate/continuous/betaprime.jl +++ b/src/univariate/continuous/betaprime.jl @@ -26,13 +26,13 @@ External links * [Beta prime distribution on Wikipedia](http://en.wikipedia.org/wiki/Beta_prime_distribution) """ -struct BetaPrime{T<:Real} <: ContinuousUnivariateDistribution +struct BetaPrime{T <: Real} <: ContinuousUnivariateDistribution α::T β::T BetaPrime{T}(α::T, β::T) where {T} = new{T}(α, β) end -function BetaPrime(α::T, β::T; check_args::Bool = true) where {T<:Real} +function BetaPrime(α::T, β::T; check_args::Bool = true) where {T <: Real} @check_args BetaPrime (α, α > zero(α)) (β, β > zero(β)) return BetaPrime{T}(α, β) end @@ -43,41 +43,41 @@ BetaPrime(α::Integer, β::Integer; check_args::Bool = true) = BetaPrime(float(α), float(β); check_args = check_args) function BetaPrime(α::Real; check_args::Bool = true) @check_args BetaPrime (α, α > zero(α)) - BetaPrime(α, α; check_args = false) + return BetaPrime(α, α; check_args = false) end BetaPrime() = BetaPrime{Float64}(1.0, 1.0) @distr_support BetaPrime 0.0 Inf #### Conversions -function convert(::Type{BetaPrime{T}}, α::Real, β::Real) where {T<:Real} - BetaPrime(T(α), T(β)) +function convert(::Type{BetaPrime{T}}, α::Real, β::Real) where {T <: Real} + return BetaPrime(T(α), T(β)) end -Base.convert(::Type{BetaPrime{T}}, d::BetaPrime) where {T<:Real} = +Base.convert(::Type{BetaPrime{T}}, d::BetaPrime) where {T <: Real} = BetaPrime{T}(T(d.α), T(d.β)) -Base.convert(::Type{BetaPrime{T}}, d::BetaPrime{T}) where {T<:Real} = d +Base.convert(::Type{BetaPrime{T}}, d::BetaPrime{T}) where {T <: Real} = d #### Parameters params(d::BetaPrime) = (d.α, d.β) -@inline partype(d::BetaPrime{T}) where {T<:Real} = T +@inline partype(d::BetaPrime{T}) where {T <: Real} = T #### Statistics -function mean(d::BetaPrime{T}) where {T<:Real} +function mean(d::BetaPrime{T}) where {T <: Real} ((α, β) = params(d); β > 1 ? α / (β - 1) : T(NaN)) end -function mode(d::BetaPrime{T}) where {T<:Real} +function mode(d::BetaPrime{T}) where {T <: Real} ((α, β) = params(d); α > 1 ? (α - 1) / (β + 1) : zero(T)) end -function var(d::BetaPrime{T}) where {T<:Real} +function var(d::BetaPrime{T}) where {T <: Real} (α, β) = params(d) - β > 2 ? α * (α + β - 1) / ((β - 2) * (β - 1)^2) : T(NaN) + return β > 2 ? α * (α + β - 1) / ((β - 2) * (β - 1)^2) : T(NaN) end -function skewness(d::BetaPrime{T}) where {T<:Real} +function skewness(d::BetaPrime{T}) where {T <: Real} (α, β) = params(d) if β > 3 s = α + β - 1 @@ -117,5 +117,5 @@ end function rand(rng::AbstractRNG, d::BetaPrime) (α, β) = params(d) - rand(rng, Gamma(α)) / rand(rng, Gamma(β)) + return rand(rng, Gamma(α)) / rand(rng, Gamma(β)) end diff --git a/src/univariate/continuous/biweight.jl b/src/univariate/continuous/biweight.jl index 6df7b84fdd..13c9843dec 100644 --- a/src/univariate/continuous/biweight.jl +++ b/src/univariate/continuous/biweight.jl @@ -1,13 +1,13 @@ """ Biweight(μ, σ) """ -struct Biweight{T<:Real} <: ContinuousUnivariateDistribution +struct Biweight{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T - Biweight{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) + Biweight{T}(µ::T, σ::T) where {T <: Real} = new{T}(µ, σ) end -function Biweight(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Biweight(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Biweight (σ, σ > zero(σ)) return Biweight{T}(μ, σ) end @@ -22,7 +22,7 @@ Biweight(μ::Real = 0.0) = Biweight(μ, one(μ); check_args = false) ## Parameters params(d::Biweight) = (d.μ, d.σ) -@inline partype(d::Biweight{T}) where {T<:Real} = T +@inline partype(d::Biweight{T}) where {T <: Real} = T ## Properties mean(d::Biweight) = d.μ @@ -30,37 +30,37 @@ median(d::Biweight) = d.μ mode(d::Biweight) = d.μ var(d::Biweight) = d.σ^2 / 7 -skewness(d::Biweight{T}) where {T<:Real} = zero(T) -kurtosis(d::Biweight{T}) where {T<:Real} = T(1) / 21 - 3 +skewness(d::Biweight{T}) where {T <: Real} = zero(T) +kurtosis(d::Biweight{T}) where {T <: Real} = T(1) / 21 - 3 ## Functions -function pdf(d::Biweight{T}, x::Real) where {T<:Real} +function pdf(d::Biweight{T}, x::Real) where {T <: Real} u = abs(x - d.μ) / d.σ - u >= 1 ? zero(T) : (15 // 16) * (1 - u^2)^2 / d.σ + return u >= 1 ? zero(T) : (15 // 16) * (1 - u^2)^2 / d.σ end logpdf(d::Biweight, x::Real) = log(pdf(d, x)) -function cdf(d::Biweight{T}, x::Real) where {T<:Real} +function cdf(d::Biweight{T}, x::Real) where {T <: Real} u = (x - d.μ) / d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) + return u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) end -function ccdf(d::Biweight{T}, x::Real) where {T<:Real} +function ccdf(d::Biweight{T}, x::Real) where {T <: Real} u = (d.μ - x) / d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) + return u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : (u + 1)^3 / 16 * @horner(u, 8, -9, 3) end @quantile_newton Biweight -function mgf(d::Biweight{T}, t::Real) where {T<:Real} +function mgf(d::Biweight{T}, t::Real) where {T <: Real} a = d.σ * t a2 = a^2 - a == 0 ? one(T) : 15exp(d.μ * t) * (-3cosh(a) + (a + 3 / a) * sinh(a)) / (a2^2) + return a == 0 ? one(T) : 15exp(d.μ * t) * (-3cosh(a) + (a + 3 / a) * sinh(a)) / (a2^2) end -function cf(d::Biweight{T}, t::Real) where {T<:Real} +function cf(d::Biweight{T}, t::Real) where {T <: Real} a = d.σ * t a2 = a^2 - a == 0 ? one(T) + zero(T) * im : - -15cis(d.μ * t) * (3cos(a) + (a - 3 / a) * sin(a)) / (a2^2) + return a == 0 ? one(T) + zero(T) * im : + -15cis(d.μ * t) * (3cos(a) + (a - 3 / a) * sin(a)) / (a2^2) end diff --git a/src/univariate/continuous/cauchy.jl b/src/univariate/continuous/cauchy.jl index 8995650154..027e0b8685 100644 --- a/src/univariate/continuous/cauchy.jl +++ b/src/univariate/continuous/cauchy.jl @@ -22,13 +22,13 @@ External links * [Cauchy distribution on Wikipedia](http://en.wikipedia.org/wiki/Cauchy_distribution) """ -struct Cauchy{T<:Real} <: ContinuousUnivariateDistribution +struct Cauchy{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T Cauchy{T}(µ, σ) where {T} = new{T}(µ, σ) end -function Cauchy(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Cauchy(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Cauchy (σ, σ > zero(σ)) return Cauchy{T}(μ, σ) end @@ -42,11 +42,11 @@ Cauchy(μ::Real = 0.0) = Cauchy(μ, one(μ); check_args = false) @distr_support Cauchy -Inf Inf #### Conversions -function convert(::Type{Cauchy{T}}, μ::Real, σ::Real) where {T<:Real} - Cauchy(T(μ), T(σ)) +function convert(::Type{Cauchy{T}}, μ::Real, σ::Real) where {T <: Real} + return Cauchy(T(μ), T(σ)) end -Base.convert(::Type{Cauchy{T}}, d::Cauchy) where {T<:Real} = Cauchy{T}(T(d.μ), T(d.σ)) -Base.convert(::Type{Cauchy{T}}, d::Cauchy{T}) where {T<:Real} = d +Base.convert(::Type{Cauchy{T}}, d::Cauchy) where {T <: Real} = Cauchy{T}(T(d.μ), T(d.σ)) +Base.convert(::Type{Cauchy{T}}, d::Cauchy{T}) where {T <: Real} = d #### Parameters @@ -54,18 +54,18 @@ location(d::Cauchy) = d.μ scale(d::Cauchy) = d.σ params(d::Cauchy) = (d.μ, d.σ) -@inline partype(d::Cauchy{T}) where {T<:Real} = T +@inline partype(d::Cauchy{T}) where {T <: Real} = T #### Statistics -mean(d::Cauchy{T}) where {T<:Real} = T(NaN) +mean(d::Cauchy{T}) where {T <: Real} = T(NaN) median(d::Cauchy) = d.μ mode(d::Cauchy) = d.μ -var(d::Cauchy{T}) where {T<:Real} = T(NaN) -skewness(d::Cauchy{T}) where {T<:Real} = T(NaN) -kurtosis(d::Cauchy{T}) where {T<:Real} = T(NaN) +var(d::Cauchy{T}) where {T <: Real} = T(NaN) +skewness(d::Cauchy{T}) where {T <: Real} = T(NaN) +kurtosis(d::Cauchy{T}) where {T <: Real} = T(NaN) entropy(d::Cauchy) = log4π + log(d.σ) @@ -80,25 +80,25 @@ logpdf(d::Cauchy, x::Real) = -(log1psq(zval(d, x)) + logπ + log(d.σ)) function cdf(d::Cauchy, x::Real) μ, σ = params(d) - invπ * atan(x - μ, σ) + 1 // 2 + return invπ * atan(x - μ, σ) + 1 // 2 end function ccdf(d::Cauchy, x::Real) μ, σ = params(d) - invπ * atan(μ - x, σ) + 1 // 2 + return invπ * atan(μ - x, σ) + 1 // 2 end function quantile(d::Cauchy, p::Real) μ, σ = params(d) - μ + σ * tan(π * (p - 1 // 2)) + return μ + σ * tan(π * (p - 1 // 2)) end function cquantile(d::Cauchy, p::Real) μ, σ = params(d) - μ + σ * tan(π * (1 // 2 - p)) + return μ + σ * tan(π * (1 // 2 - p)) end -mgf(d::Cauchy{T}, t::Real) where {T<:Real} = t == zero(t) ? one(T) : T(NaN) +mgf(d::Cauchy{T}, t::Real) where {T <: Real} = t == zero(t) ? one(T) : T(NaN) cf(d::Cauchy, t::Real) = exp(im * (t * d.μ) - d.σ * abs(t)) #### Affine transformations @@ -109,7 +109,7 @@ Base.:*(c::Real, d::Cauchy) = Cauchy(c * d.μ, abs(c) * d.σ) #### Fitting # Note: this is not a Maximum Likelihood estimator -function fit(::Type{<:Cauchy}, x::AbstractArray{T}) where {T<:Real} +function fit(::Type{<:Cauchy}, x::AbstractArray{T}) where {T <: Real} l, m, u = quantile(x, [0.25, 0.5, 0.75]) - Cauchy(m, (u - l) / 2) + return Cauchy(m, (u - l) / 2) end diff --git a/src/univariate/continuous/chernoff.jl b/src/univariate/continuous/chernoff.jl index c574cbbdd5..cda2d41b8a 100644 --- a/src/univariate/continuous/chernoff.jl +++ b/src/univariate/continuous/chernoff.jl @@ -33,131 +33,133 @@ cdf(Chernoff(),-x) # For tail probabilities, use this instead of 1- struct Chernoff <: ContinuousUnivariateDistribution end module ChernoffComputations -import QuadGK.quadgk -# The following arrays of constants have been precomputed to speed up computation. -# The first two correspond to the arrays a and b in the Groeneboom and Wellner article. -# The array airyai_roots contains roots of the airyai functions (atilde in the paper). -# Finally airyai_prime contains contains airyaiprime evaluated at the corresponding atildes -const a = [ - 0.14583333333333334 - -0.0014105902777777765 - -2.045269786155239e-5 - 1.7621771897199738e-6 - -5.7227597511834636e-8 - 1.2738111577329235e-9 - -2.1640824986157608e-11 - 2.87925759635786e-13 - -2.904496582565578e-15 - 1.8213982724416428e-17 - 4.318809086319947e-20 - -3.459634830348895e-21 - 6.541295360751684e-23 - -8.92275524974091e-25 - 1.0102911018761945e-26 - -9.976759882474874e-29 -] -const b = [ - 0.6666666666666666 - 0.021164021164021163 - -0.0007893341226674574 - 1.9520978781876193e-5 - -3.184888134149933e-7 - 2.4964953625552273e-9 - 3.6216439304006735e-11 - -1.874438299727797e-12 - 4.393984966701305e-14 - -7.57237085924526e-16 - 1.063018673013022e-17 - -1.26451831871265e-19 - 1.2954000902764143e-21 - -1.1437790369170514e-23 - 8.543413293195206e-26 - -5.05879944592705e-28 -] -const airyai_roots = [ - -2.338107410459767 - -4.08794944413097 - -5.520559828095551 - -6.786708090071759 - -7.944133587120853 - -9.02265085334098 - -10.040174341558085 - -11.008524303733262 - -11.936015563236262 - -12.828776752865757 - -13.691489035210719 - -14.527829951775335 - -15.340755135977997 - -16.132685156945772 - -16.90563399742994 - -17.66130010569706 -] -const airyai_prime = [ - 0.7012108227206906 - -0.8031113696548534 - 0.865204025894141 - -0.9108507370495821 - 0.9473357094415571 - -0.9779228085695176 - 1.0043701226603126 - -1.0277386888207862 - 1.0487206485881893 - -1.0677938591574279 - 1.0853028313507 - -1.1015045702774968 - 1.1165961779326556 - -1.1307323104931881 - 1.1440366732735523 - -1.156609849116565 -] + import QuadGK.quadgk + # The following arrays of constants have been precomputed to speed up computation. + # The first two correspond to the arrays a and b in the Groeneboom and Wellner article. + # The array airyai_roots contains roots of the airyai functions (atilde in the paper). + # Finally airyai_prime contains contains airyaiprime evaluated at the corresponding atildes + const a = [ + 0.14583333333333334 + -0.0014105902777777765 + -2.045269786155239e-5 + 1.7621771897199738e-6 + -5.7227597511834636e-8 + 1.2738111577329235e-9 + -2.1640824986157608e-11 + 2.87925759635786e-13 + -2.904496582565578e-15 + 1.8213982724416428e-17 + 4.318809086319947e-20 + -3.459634830348895e-21 + 6.541295360751684e-23 + -8.92275524974091e-25 + 1.0102911018761945e-26 + -9.976759882474874e-29 + ] + const b = [ + 0.6666666666666666 + 0.021164021164021163 + -0.0007893341226674574 + 1.9520978781876193e-5 + -3.184888134149933e-7 + 2.4964953625552273e-9 + 3.6216439304006735e-11 + -1.874438299727797e-12 + 4.393984966701305e-14 + -7.57237085924526e-16 + 1.063018673013022e-17 + -1.26451831871265e-19 + 1.2954000902764143e-21 + -1.1437790369170514e-23 + 8.543413293195206e-26 + -5.05879944592705e-28 + ] + const airyai_roots = [ + -2.338107410459767 + -4.08794944413097 + -5.520559828095551 + -6.786708090071759 + -7.944133587120853 + -9.02265085334098 + -10.040174341558085 + -11.008524303733262 + -11.936015563236262 + -12.828776752865757 + -13.691489035210719 + -14.527829951775335 + -15.340755135977997 + -16.132685156945772 + -16.90563399742994 + -17.66130010569706 + ] + const airyai_prime = [ + 0.7012108227206906 + -0.8031113696548534 + 0.865204025894141 + -0.9108507370495821 + 0.9473357094415571 + -0.9779228085695176 + 1.0043701226603126 + -1.0277386888207862 + 1.0487206485881893 + -1.0677938591574279 + 1.0853028313507 + -1.1015045702774968 + 1.1165961779326556 + -1.1307323104931881 + 1.1440366732735523 + -1.156609849116565 + ] -const cuberoottwo = cbrt(2.0) -const sqrthalfpi = sqrt(0.5 * pi) -const sqrttwopi = sqrt(2.0 * pi) + const cuberoottwo = cbrt(2.0) + const sqrthalfpi = sqrt(0.5 * pi) + const sqrttwopi = sqrt(2.0 * pi) -function p(y::Real) - if iszero(y) - return -sqrt(0.5 * pi) - end + function p(y::Real) + if iszero(y) + return -sqrt(0.5 * pi) + end - (y > 0) || throw(DomainError(y, "argument must be positive")) + (y > 0) || throw(DomainError(y, "argument must be positive")) - cnsty = y^(-1.5) - if (y <= 1.0) - return sum([(b[k] * cnsty - a[k] * sqrthalfpi) * y^(3 * k) for k = 1:length(a)]) - sqrthalfpi - else - return sum([exp(cuberoottwo * a * y) for a in airyai_roots]) * - 2 * - sqrttwopi * - exp(-y * y * y / 6) - cnsty + cnsty = y^(-1.5) + if (y <= 1.0) + return sum([(b[k] * cnsty - a[k] * sqrthalfpi) * y^(3 * k) for k in 1:length(a)]) - sqrthalfpi + else + return sum([exp(cuberoottwo * a * y) for a in airyai_roots]) * + 2 * + sqrttwopi * + exp(-y * y * y / 6) - cnsty + end end -end -function g(x::Real) - function g_one(y::Real) - return p(y) * exp(-0.5 * y * (2 * x + y) * (2 * x + y)) - end - function g_two(y::Real) - z = 2 * x + y * y - return (z * y * y + 0.5 * z * z) * exp(-0.5 * y * y * z * z) - end - if (x <= -1.0) - return cuberoottwo * - cuberoottwo * - exp(2 * x * x * x / 3.0) * - sum([ - exp(-cuberoottwo * airyai_roots[k] * x) / airyai_prime[k] for - k = 1:length(airyai_roots) - ]) - else - return 2 * x - - (quadgk(g_one, 0.0, Inf)[1] - 4 * quadgk(g_two, 0.0, Inf)[1]) / sqrttwopi # should perhaps combine integrals + function g(x::Real) + function g_one(y::Real) + return p(y) * exp(-0.5 * y * (2 * x + y) * (2 * x + y)) + end + function g_two(y::Real) + z = 2 * x + y * y + return (z * y * y + 0.5 * z * z) * exp(-0.5 * y * y * z * z) + end + if (x <= -1.0) + return cuberoottwo * + cuberoottwo * + exp(2 * x * x * x / 3.0) * + sum( + [ + exp(-cuberoottwo * airyai_roots[k] * x) / airyai_prime[k] for + k in 1:length(airyai_roots) + ] + ) + else + return 2 * x - + (quadgk(g_one, 0.0, Inf)[1] - 4 * quadgk(g_two, 0.0, Inf)[1]) / sqrttwopi # should perhaps combine integrals + end end -end -_pdf(x::Real) = g(x) * g(-x) * 0.5 -_cdf(x::Real) = (x < 0.0) ? _cdfbar(-x) : 0.5 + quadgk(_pdf, 0.0, x)[1] -_cdfbar(x::Real) = (x < 0.0) ? _cdf(x) : quadgk(_pdf, x, Inf)[1] + _pdf(x::Real) = g(x) * g(-x) * 0.5 + _cdf(x::Real) = (x < 0.0) ? _cdfbar(-x) : 0.5 + quadgk(_pdf, 0.0, x)[1] + _cdfbar(x::Real) = (x < 0.0) ? _cdf(x) : quadgk(_pdf, x, Inf)[1] end pdf(d::Chernoff, x::Real) = ChernoffComputations._pdf(x) @@ -308,13 +310,13 @@ function rand(rng::AbstractRNG, d::Chernoff) # Ziggurat random n 1.516689116183566 ] n = length(x) - i = rand(rng, 0:(n-1)) + i = rand(rng, 0:(n - 1)) r = (2.0 * rand(rng) - 1) * ((i > 0) ? x[i] : A / y[1]) rabs = abs(r) - if rabs < x[i+1] + if rabs < x[i + 1] return r end - s = (i > 0) ? (y[i] + rand(rng) * (y[i+1] - y[i])) : rand(rng) * y[1] + s = (i > 0) ? (y[i] + rand(rng) * (y[i + 1] - y[i])) : rand(rng) * y[1] if s < 2.0 * ChernoffComputations._pdf(rabs) return r end diff --git a/src/univariate/continuous/chi.jl b/src/univariate/continuous/chi.jl index 95c2a59e71..b2933dba7c 100644 --- a/src/univariate/continuous/chi.jl +++ b/src/univariate/continuous/chi.jl @@ -21,7 +21,7 @@ External links * [Chi distribution on Wikipedia](http://en.wikipedia.org/wiki/Chi_distribution) """ -struct Chi{T<:Real} <: ContinuousUnivariateDistribution +struct Chi{T <: Real} <: ContinuousUnivariateDistribution ν::T Chi{T}(ν::T) where {T} = new{T}(ν) end @@ -36,15 +36,15 @@ Chi(ν::Integer; check_args::Bool = true) = Chi(float(ν); check_args = check_ar @distr_support Chi 0.0 Inf ### Conversions -convert(::Type{Chi{T}}, ν::Real) where {T<:Real} = Chi(T(ν)) -Base.convert(::Type{Chi{T}}, d::Chi) where {T<:Real} = Chi{T}(T(d.ν)) -Base.convert(::Type{Chi{T}}, d::Chi{T}) where {T<:Real} = d +convert(::Type{Chi{T}}, ν::Real) where {T <: Real} = Chi(T(ν)) +Base.convert(::Type{Chi{T}}, d::Chi) where {T <: Real} = Chi{T}(T(d.ν)) +Base.convert(::Type{Chi{T}}, d::Chi{T}) where {T <: Real} = d #### Parameters dof(d::Chi) = d.ν params(d::Chi) = (d.ν,) -@inline partype(d::Chi{T}) where {T<:Real} = T +@inline partype(d::Chi{T}) where {T <: Real} = T #### Statistics @@ -57,23 +57,23 @@ _chi_skewness(μ::Real, σ::Real) = (σ2 = σ^2; σ3 = σ2 * σ; (μ / σ3) * (1 function skewness(d::Chi) μ = mean(d) σ = sqrt(d.ν - μ^2) - _chi_skewness(μ, σ) + return _chi_skewness(μ, σ) end function kurtosis(d::Chi) μ = mean(d) σ = sqrt(d.ν - μ^2) γ = _chi_skewness(μ, σ) - (2 / σ^2) * (1 - μ * σ * γ - σ^2) + return (2 / σ^2) * (1 - μ * σ * γ - σ^2) end -entropy(d::Chi{T}) where {T<:Real} = +entropy(d::Chi{T}) where {T <: Real} = (ν = d.ν; loggamma(ν / 2) - T(logtwo) / 2 - ((ν - 1) / 2) * digamma(ν / 2) + ν / 2) function mode(d::Chi; check_args::Bool = true) ν = d.ν - @check_args(Chi, (ν, ν >= 1, "Chi distribution has no mode when ν < 1"),) - sqrt(ν - 1) + @check_args(Chi, (ν, ν >= 1, "Chi distribution has no mode when ν < 1")) + return sqrt(ν - 1) end function kldivergence(p::Chi, q::Chi) @@ -93,7 +93,7 @@ function logpdf(d::Chi, x::Real) return x < zero(x) ? oftype(val, -Inf) : val end -gradlogpdf(d::Chi{T}, x::Real) where {T<:Real} = x >= 0 ? (d.ν - 1) / x - x : zero(T) +gradlogpdf(d::Chi{T}, x::Real) where {T <: Real} = x >= 0 ? (d.ν - 1) / x - x : zero(T) for f in (:cdf, :ccdf, :logcdf, :logccdf) @eval $f(d::Chi, x::Real) = $f(Chisq(d.ν; check_args = false), max(x, 0)^2) @@ -107,7 +107,7 @@ end rand(rng::AbstractRNG, d::Chi) = (ν = d.ν; sqrt(rand(rng, Gamma(ν / 2.0, 2.0one(ν))))) -struct ChiSampler{S<:Sampleable{Univariate,Continuous}} <: Sampleable{Univariate,Continuous} +struct ChiSampler{S <: Sampleable{Univariate, Continuous}} <: Sampleable{Univariate, Continuous} s::S end diff --git a/src/univariate/continuous/chisq.jl b/src/univariate/continuous/chisq.jl index 1e25cc1d70..f849cef4ea 100644 --- a/src/univariate/continuous/chisq.jl +++ b/src/univariate/continuous/chisq.jl @@ -20,7 +20,7 @@ External links * [Chi-squared distribution on Wikipedia](http://en.wikipedia.org/wiki/Chi-squared_distribution) """ -struct Chisq{T<:Real} <: ContinuousUnivariateDistribution +struct Chisq{T <: Real} <: ContinuousUnivariateDistribution ν::T Chisq{T}(ν::T) where {T} = new{T}(ν) end @@ -38,12 +38,12 @@ Chisq(ν::Integer; check_args::Bool = true) = Chisq(float(ν); check_args = chec dof(d::Chisq) = d.ν params(d::Chisq) = (d.ν,) -@inline partype(d::Chisq{T}) where {T<:Real} = T +@inline partype(d::Chisq{T}) where {T <: Real} = T ### Conversions -convert(::Type{Chisq{T}}, ν::Real) where {T<:Real} = Chisq(T(ν)) -Base.convert(::Type{Chisq{T}}, d::Chisq) where {T<:Real} = Chisq{T}(T(d.ν)) -Base.convert(::Type{Chisq{T}}, d::Chisq{T}) where {T<:Real} = d +convert(::Type{Chisq{T}}, ν::Real) where {T <: Real} = Chisq(T(ν)) +Base.convert(::Type{Chisq{T}}, d::Chisq) where {T <: Real} = Chisq{T}(T(d.ν)) +Base.convert(::Type{Chisq{T}}, d::Chisq{T}) where {T <: Real} = d #### Statistics @@ -55,7 +55,7 @@ skewness(d::Chisq) = sqrt(8 / d.ν) kurtosis(d::Chisq) = 12 / d.ν -mode(d::Chisq{T}) where {T<:Real} = d.ν > 2 ? d.ν - 2 : zero(T) +mode(d::Chisq{T}) where {T <: Real} = d.ν > 2 ? d.ν - 2 : zero(T) function median(d::Chisq; approx::Bool = false) if approx @@ -67,7 +67,7 @@ end function entropy(d::Chisq) hν = d.ν / 2 - hν + logtwo + loggamma(hν) + (1 - hν) * digamma(hν) + return hν + logtwo + loggamma(hν) + (1 - hν) * digamma(hν) end function kldivergence(p::Chisq, q::Chisq) @@ -77,7 +77,6 @@ function kldivergence(p::Chisq, q::Chisq) end - #### Evaluation @_delegate_statsfuns Chisq chisq ν @@ -91,7 +90,7 @@ end cf(d::Chisq, t::Real) = (1 - 2 * im * t)^(-d.ν / 2) -gradlogpdf(d::Chisq{T}, x::Real) where {T<:Real} = +gradlogpdf(d::Chisq{T}, x::Real) where {T <: Real} = x > 0 ? (d.ν / 2 - 1) / x - 1 // 2 : zero(T) diff --git a/src/univariate/continuous/cosine.jl b/src/univariate/continuous/cosine.jl index 2e4f89d633..43b2cdba4c 100644 --- a/src/univariate/continuous/cosine.jl +++ b/src/univariate/continuous/cosine.jl @@ -7,13 +7,13 @@ External link: * [Cosine distribution on wikipedia](http://en.wikipedia.org/wiki/Raised_cosine_distribution) """ -struct Cosine{T<:Real} <: ContinuousUnivariateDistribution +struct Cosine{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T Cosine{T}(μ::T, σ::T) where {T} = new{T}(µ, σ) end -function Cosine(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Cosine(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Cosine (σ, σ > zero(σ)) return Cosine{T}(μ, σ) end @@ -27,13 +27,13 @@ Cosine(μ::Real = 0.0) = Cosine(μ, one(µ); check_args = false) @distr_support Cosine d.μ - d.σ d.μ + d.σ #### Conversions -function convert(::Type{Cosine{T}}, μ::Real, σ::Real) where {T<:Real} - Cosine(T(μ), T(σ)) +function convert(::Type{Cosine{T}}, μ::Real, σ::Real) where {T <: Real} + return Cosine(T(μ), T(σ)) end -function Base.convert(::Type{Cosine{T}}, d::Cosine) where {T<:Real} - Cosine{T}(T(d.μ), T(d.σ)) +function Base.convert(::Type{Cosine{T}}, d::Cosine) where {T <: Real} + return Cosine{T}(T(d.μ), T(d.σ)) end -Base.convert(::Type{Cosine{T}}, d::Cosine{T}) where {T<:Real} = d +Base.convert(::Type{Cosine{T}}, d::Cosine{T}) where {T <: Real} = d #### Parameters @@ -41,7 +41,7 @@ location(d::Cosine) = d.μ scale(d::Cosine) = d.σ params(d::Cosine) = (d.μ, d.σ) -@inline partype(d::Cosine{T}) where {T<:Real} = T +@inline partype(d::Cosine{T}) where {T <: Real} = T #### Statistics @@ -52,16 +52,16 @@ median(d::Cosine) = d.μ mode(d::Cosine) = d.μ -var(d::Cosine{T}) where {T<:Real} = d.σ^2 * (1 // 3 - 2 / T(π)^2) +var(d::Cosine{T}) where {T <: Real} = d.σ^2 * (1 // 3 - 2 / T(π)^2) -skewness(d::Cosine{T}) where {T<:Real} = zero(T) +skewness(d::Cosine{T}) where {T <: Real} = zero(T) -kurtosis(d::Cosine{T}) where {T<:Real} = 6 * (90 - T(π)^4) / (5 * (T(π)^2 - 6)^2) +kurtosis(d::Cosine{T}) where {T <: Real} = 6 * (90 - T(π)^4) / (5 * (T(π)^2 - 6)^2) #### Evaluation -function pdf(d::Cosine{T}, x::Real) where {T<:Real} +function pdf(d::Cosine{T}, x::Real) where {T <: Real} if insupport(d, x) z = (x - d.μ) / d.σ return (1 + cospi(z)) / (2d.σ) @@ -72,7 +72,7 @@ end logpdf(d::Cosine, x::Real) = log(pdf(d, x)) -function cdf(d::Cosine{T}, x::Real) where {T<:Real} +function cdf(d::Cosine{T}, x::Real) where {T <: Real} if x < d.μ - d.σ return zero(T) end @@ -80,10 +80,10 @@ function cdf(d::Cosine{T}, x::Real) where {T<:Real} return one(T) end z = (x - d.μ) / d.σ - (1 + z + sinpi(z) * invπ) / 2 + return (1 + z + sinpi(z) * invπ) / 2 end -function ccdf(d::Cosine{T}, x::Real) where {T<:Real} +function ccdf(d::Cosine{T}, x::Real) where {T <: Real} if x < d.μ - d.σ return one(T) end @@ -91,7 +91,7 @@ function ccdf(d::Cosine{T}, x::Real) where {T<:Real} return zero(T) end nz = (d.μ - x) / d.σ - (1 + nz + sinpi(nz) * invπ) / 2 + return (1 + nz + sinpi(nz) * invπ) / 2 end quantile(d::Cosine, p::Real) = quantile_bisect(d, p) diff --git a/src/univariate/continuous/epanechnikov.jl b/src/univariate/continuous/epanechnikov.jl index 54a546b224..4bf6f7c90a 100644 --- a/src/univariate/continuous/epanechnikov.jl +++ b/src/univariate/continuous/epanechnikov.jl @@ -1,13 +1,13 @@ """ Epanechnikov(μ, σ) """ -struct Epanechnikov{T<:Real} <: ContinuousUnivariateDistribution +struct Epanechnikov{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T Epanechnikov{T}(µ::T, σ::T) where {T} = new{T}(µ, σ) end -function Epanechnikov(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Epanechnikov(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Epanechnikov (σ, σ > zero(σ)) return Epanechnikov{T}(μ, σ) end @@ -21,20 +21,20 @@ Epanechnikov(μ::Real = 0.0) = Epanechnikov(μ, one(μ); check_args = false) @distr_support Epanechnikov d.μ - d.σ d.μ + d.σ #### Conversions -function convert(::Type{Epanechnikov{T}}, μ::Real, σ::Real) where {T<:Real} - Epanechnikov(T(μ), T(σ), check_args = false) +function convert(::Type{Epanechnikov{T}}, μ::Real, σ::Real) where {T <: Real} + return Epanechnikov(T(μ), T(σ), check_args = false) end -function Base.convert(::Type{Epanechnikov{T}}, d::Epanechnikov) where {T<:Real} - Epanechnikov{T}(T(d.μ), T(d.σ)) +function Base.convert(::Type{Epanechnikov{T}}, d::Epanechnikov) where {T <: Real} + return Epanechnikov{T}(T(d.μ), T(d.σ)) end -Base.convert(::Type{Epanechnikov{T}}, d::Epanechnikov{T}) where {T<:Real} = d +Base.convert(::Type{Epanechnikov{T}}, d::Epanechnikov{T}) where {T <: Real} = d ## Parameters location(d::Epanechnikov) = d.μ scale(d::Epanechnikov) = d.σ params(d::Epanechnikov) = (d.μ, d.σ) -@inline partype(d::Epanechnikov{T}) where {T<:Real} = T +@inline partype(d::Epanechnikov{T}) where {T <: Real} = T ## Properties mean(d::Epanechnikov) = d.μ @@ -42,34 +42,34 @@ median(d::Epanechnikov) = d.μ mode(d::Epanechnikov) = d.μ var(d::Epanechnikov) = d.σ^2 / 5 -skewness(d::Epanechnikov{T}) where {T<:Real} = zero(T) -kurtosis(d::Epanechnikov{T}) where {T<:Real} = -2.914285714285714 * one(T) # 3/35-3 +skewness(d::Epanechnikov{T}) where {T <: Real} = zero(T) +kurtosis(d::Epanechnikov{T}) where {T <: Real} = -2.914285714285714 * one(T) # 3/35-3 ## Functions -function pdf(d::Epanechnikov{T}, x::Real) where {T<:Real} +function pdf(d::Epanechnikov{T}, x::Real) where {T <: Real} u = abs(x - d.μ) / d.σ - u >= 1 ? zero(T) : 3 * (1 - u^2) / (4 * d.σ) + return u >= 1 ? zero(T) : 3 * (1 - u^2) / (4 * d.σ) end logpdf(d::Epanechnikov, x::Real) = log(pdf(d, x)) -function cdf(d::Epanechnikov{T}, x::Real) where {T<:Real} +function cdf(d::Epanechnikov{T}, x::Real) where {T <: Real} u = (x - d.μ) / d.σ - u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) + return u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) end -function ccdf(d::Epanechnikov{T}, x::Real) where {T<:Real} +function ccdf(d::Epanechnikov{T}, x::Real) where {T <: Real} u = (d.μ - x) / d.σ - u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) + return u <= -1 ? zero(T) : u >= 1 ? one(T) : 1 // 2 + u * (3 // 4 - u^2 / 4) end @quantile_newton Epanechnikov -function mgf(d::Epanechnikov{T}, t::Real) where {T<:Real} +function mgf(d::Epanechnikov{T}, t::Real) where {T <: Real} a = d.σ * t - a == 0 ? one(T) : 3exp(d.μ * t) * (cosh(a) - sinh(a) / a) / a^2 + return a == 0 ? one(T) : 3exp(d.μ * t) * (cosh(a) - sinh(a) / a) / a^2 end -function cf(d::Epanechnikov{T}, t::Real) where {T<:Real} +function cf(d::Epanechnikov{T}, t::Real) where {T <: Real} a = d.σ * t - a == 0 ? one(T) + zero(T) * im : -3exp(im * d.μ * t) * (cos(a) - sin(a) / a) / a^2 + return a == 0 ? one(T) + zero(T) * im : -3exp(im * d.μ * t) * (cos(a) - sin(a) / a) / a^2 end diff --git a/src/univariate/continuous/erlang.jl b/src/univariate/continuous/erlang.jl index 76cfaea443..99c6b2c5a6 100644 --- a/src/univariate/continuous/erlang.jl +++ b/src/univariate/continuous/erlang.jl @@ -14,7 +14,7 @@ External links * [Erlang distribution on Wikipedia](http://en.wikipedia.org/wiki/Erlang_distribution) """ -struct Erlang{T<:Real} <: ContinuousUnivariateDistribution +struct Erlang{T <: Real} <: ContinuousUnivariateDistribution α::Int θ::T Erlang{T}(α::Int, θ::T) where {T} = new{T}(α, θ) @@ -39,13 +39,13 @@ Erlang(α::Integer = 1) = Erlang(α, 1.0; check_args = false) @distr_support Erlang 0.0 Inf #### Conversions -function convert(::Type{Erlang{T}}, α::Integer, θ::S) where {T<:Real,S<:Real} - Erlang(α, T(θ), check_args = false) +function convert(::Type{Erlang{T}}, α::Integer, θ::S) where {T <: Real, S <: Real} + return Erlang(α, T(θ), check_args = false) end -function Base.convert(::Type{Erlang{T}}, d::Erlang) where {T<:Real} - Erlang{T}(d.α, T(d.θ)) +function Base.convert(::Type{Erlang{T}}, d::Erlang) where {T <: Real} + return Erlang{T}(d.α, T(d.θ)) end -Base.convert(::Type{Erlang{T}}, d::Erlang{T}) where {T<:Real} = d +Base.convert(::Type{Erlang{T}}, d::Erlang{T}) where {T <: Real} = d #### Parameters @@ -53,7 +53,7 @@ shape(d::Erlang) = d.α scale(d::Erlang) = d.θ rate(d::Erlang) = inv(d.θ) params(d::Erlang) = (d.α, d.θ) -@inline partype(d::Erlang{T}) where {T<:Real} = T +@inline partype(d::Erlang{T}) where {T <: Real} = T #### Statistics @@ -64,19 +64,19 @@ kurtosis(d::Erlang) = 6 / d.α function mode(d::Erlang; check_args::Bool = true) α, θ = params(d) - @check_args(Erlang, (α, α >= 1, "Erlang has no mode when α < 1"),) - θ * (α - 1) + @check_args(Erlang, (α, α >= 1, "Erlang has no mode when α < 1")) + return θ * (α - 1) end function entropy(d::Erlang) (α, θ) = params(d) - α + loggamma(α) + (1 - α) * digamma(α) + log(θ) + return α + loggamma(α) + (1 - α) * digamma(α) + log(θ) end mgf(d::Erlang, t::Real) = (1 - t * d.θ)^(-d.α) function cgf(d::Erlang, t) α, θ = params(d) - -α * log1p(-t * θ) + return -α * log1p(-t * θ) end cf(d::Erlang, t::Real) = (1 - im * t * d.θ)^(-d.α) diff --git a/src/univariate/continuous/exponential.jl b/src/univariate/continuous/exponential.jl index d73f9644b6..d53dd371b4 100644 --- a/src/univariate/continuous/exponential.jl +++ b/src/univariate/continuous/exponential.jl @@ -21,7 +21,7 @@ External links * [Exponential distribution on Wikipedia](http://en.wikipedia.org/wiki/Exponential_distribution) """ -struct Exponential{T<:Real} <: ContinuousUnivariateDistribution +struct Exponential{T <: Real} <: ContinuousUnivariateDistribution θ::T # note: scale not rate Exponential{T}(θ::T) where {T} = new{T}(θ) end @@ -38,11 +38,11 @@ Exponential() = Exponential{Float64}(1.0) @distr_support Exponential 0.0 Inf ### Conversions -convert(::Type{Exponential{T}}, θ::S) where {T<:Real,S<:Real} = Exponential(T(θ)) -function Base.convert(::Type{Exponential{T}}, d::Exponential) where {T<:Real} +convert(::Type{Exponential{T}}, θ::S) where {T <: Real, S <: Real} = Exponential(T(θ)) +function Base.convert(::Type{Exponential{T}}, d::Exponential) where {T <: Real} return Exponential(T(d.θ)) end -Base.convert(::Type{Exponential{T}}, d::Exponential{T}) where {T<:Real} = d +Base.convert(::Type{Exponential{T}}, d::Exponential{T}) where {T <: Real} = d #### Parameters @@ -50,13 +50,13 @@ scale(d::Exponential) = d.θ rate(d::Exponential) = inv(d.θ) params(d::Exponential) = (d.θ,) -partype(::Exponential{T}) where {T<:Real} = T +partype(::Exponential{T}) where {T <: Real} = T #### Statistics mean(d::Exponential) = d.θ median(d::Exponential) = logtwo * d.θ -mode(::Exponential{T}) where {T<:Real} = zero(T) +mode(::Exponential{T}) where {T <: Real} = zero(T) var(d::Exponential) = d.θ^2 skewness(::Exponential{T}) where {T} = T(2) @@ -95,7 +95,7 @@ cquantile(d::Exponential, p::Real) = -xval(d, log(p)) invlogcdf(d::Exponential, lp::Real) = -xval(d, log1mexp(lp)) invlogccdf(d::Exponential, lp::Real) = -xval(d, lp) -gradlogpdf(d::Exponential{T}, x::Real) where {T<:Real} = x > 0 ? -rate(d) : zero(T) +gradlogpdf(d::Exponential{T}, x::Real) where {T <: Real} = x > 0 ? -rate(d) : zero(T) mgf(d::Exponential, t::Real) = 1 / (1 - t * scale(d)) function cgf(d::Exponential, t) @@ -123,12 +123,12 @@ struct ExponentialStats <: SufficientStats ExponentialStats(sx::Real, sw::Real) = new(sx, sw) end -suffstats(::Type{<:Exponential}, x::AbstractArray{T}) where {T<:Real} = +suffstats(::Type{<:Exponential}, x::AbstractArray{T}) where {T <: Real} = ExponentialStats(sum(x), length(x)) suffstats( ::Type{<:Exponential}, x::AbstractArray{T}, w::AbstractArray{Float64}, -) where {T<:Real} = ExponentialStats(dot(x, w), sum(w)) +) where {T <: Real} = ExponentialStats(dot(x, w), sum(w)) fit_mle(::Type{<:Exponential}, ss::ExponentialStats) = Exponential(ss.sx / ss.sw) diff --git a/src/univariate/continuous/fdist.jl b/src/univariate/continuous/fdist.jl index 650fffa9bd..0619df1ec9 100644 --- a/src/univariate/continuous/fdist.jl +++ b/src/univariate/continuous/fdist.jl @@ -22,17 +22,17 @@ External links * [F distribution on Wikipedia](http://en.wikipedia.org/wiki/F-distribution) """ -struct FDist{T<:Real} <: ContinuousUnivariateDistribution +struct FDist{T <: Real} <: ContinuousUnivariateDistribution ν1::T ν2::T function FDist{T}(ν1::T, ν2::T; check_args::Bool = true) where {T} @check_args FDist (ν1, ν1 > zero(ν1)) (ν2, ν2 > zero(ν2)) - new{T}(ν1, ν2) + return new{T}(ν1, ν2) end end -FDist(ν1::T, ν2::T; check_args::Bool = true) where {T<:Real} = +FDist(ν1::T, ν2::T; check_args::Bool = true) where {T <: Real} = FDist{T}(ν1, ν2; check_args = check_args) FDist(ν1::Integer, ν2::Integer; check_args::Bool = true) = FDist(float(ν1), float(ν2); check_args = check_args) @@ -42,33 +42,33 @@ FDist(ν1::Real, ν2::Real; check_args::Bool = true) = @distr_support FDist 0.0 Inf #### Conversions -function convert(::Type{FDist{T}}, ν1::S, ν2::S) where {T<:Real,S<:Real} - FDist(T(ν1), T(ν2)) +function convert(::Type{FDist{T}}, ν1::S, ν2::S) where {T <: Real, S <: Real} + return FDist(T(ν1), T(ν2)) end -Base.convert(::Type{FDist{T}}, d::FDist) where {T<:Real} = FDist{T}(T(d.ν1), T(d.ν2)) -Base.convert(::Type{FDist{T}}, d::FDist{T}) where {T<:Real} = d +Base.convert(::Type{FDist{T}}, d::FDist) where {T <: Real} = FDist{T}(T(d.ν1), T(d.ν2)) +Base.convert(::Type{FDist{T}}, d::FDist{T}) where {T <: Real} = d #### Parameters params(d::FDist) = (d.ν1, d.ν2) -@inline partype(d::FDist{T}) where {T<:Real} = T +@inline partype(d::FDist{T}) where {T <: Real} = T #### Statistics -mean(d::FDist{T}) where {T<:Real} = (ν2 = d.ν2; ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) +mean(d::FDist{T}) where {T <: Real} = (ν2 = d.ν2; ν2 > 2 ? ν2 / (ν2 - 2) : T(NaN)) -function mode(d::FDist{T}) where {T<:Real} +function mode(d::FDist{T}) where {T <: Real} (ν1, ν2) = params(d) - ν1 > 2 ? ((ν1 - 2) / ν1) * (ν2 / (ν2 + 2)) : zero(T) + return ν1 > 2 ? ((ν1 - 2) / ν1) * (ν2 / (ν2 + 2)) : zero(T) end -function var(d::FDist{T}) where {T<:Real} +function var(d::FDist{T}) where {T <: Real} (ν1, ν2) = params(d) - ν2 > 4 ? 2ν2^2 * (ν1 + ν2 - 2) / (ν1 * (ν2 - 2)^2 * (ν2 - 4)) : T(NaN) + return ν2 > 4 ? 2ν2^2 * (ν1 + ν2 - 2) / (ν1 * (ν2 - 2)^2 * (ν2 - 4)) : T(NaN) end -function skewness(d::FDist{T}) where {T<:Real} +function skewness(d::FDist{T}) where {T <: Real} (ν1, ν2) = params(d) if ν2 > 6 return (2ν1 + ν2 - 2) * sqrt(8(ν2 - 4)) / ((ν2 - 6) * sqrt(ν1 * (ν1 + ν2 - 2))) @@ -77,7 +77,7 @@ function skewness(d::FDist{T}) where {T<:Real} end end -function kurtosis(d::FDist{T}) where {T<:Real} +function kurtosis(d::FDist{T}) where {T <: Real} (ν1, ν2) = params(d) if ν2 > 8 a = ν1 * (5ν2 - 22) * (ν1 + ν2 - 2) + (ν2 - 4) * (ν2 - 2)^2 @@ -94,9 +94,9 @@ function entropy(d::FDist) hν2 = ν2 / 2 hs = (ν1 + ν2) / 2 return log(ν2 / ν1) + loggamma(hν1) + loggamma(hν2) - loggamma(hs) + - (1 - hν1) * digamma(hν1) + - (-1 - hν2) * digamma(hν2) + - hs * digamma(hs) + (1 - hν1) * digamma(hν1) + + (-1 - hν2) * digamma(hν2) + + hs * digamma(hs) end #### Evaluation & Sampling diff --git a/src/univariate/continuous/frechet.jl b/src/univariate/continuous/frechet.jl index d7fc5005d2..11def43886 100644 --- a/src/univariate/continuous/frechet.jl +++ b/src/univariate/continuous/frechet.jl @@ -23,13 +23,13 @@ External links * [Fréchet_distribution on Wikipedia](http://en.wikipedia.org/wiki/Fréchet_distribution) """ -struct Frechet{T<:Real} <: ContinuousUnivariateDistribution +struct Frechet{T <: Real} <: ContinuousUnivariateDistribution α::T θ::T - Frechet{T}(α::T, θ::T) where {T<:Real} = new{T}(α, θ) + Frechet{T}(α::T, θ::T) where {T <: Real} = new{T}(α, θ) end -function Frechet(α::T, θ::T; check_args::Bool = true) where {T<:Real} +function Frechet(α::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Frechet (α, α > zero(α)) (θ, θ > zero(θ)) return Frechet{T}(α, θ) end @@ -43,11 +43,11 @@ Frechet(α::Real = 1.0) = Frechet(α, one(α); check_args = false) @distr_support Frechet 0.0 Inf #### Conversions -function convert(::Type{Frechet{T}}, α::S, θ::S) where {T<:Real,S<:Real} - Frechet(T(α), T(θ)) +function convert(::Type{Frechet{T}}, α::S, θ::S) where {T <: Real, S <: Real} + return Frechet(T(α), T(θ)) end -Base.convert(::Type{Frechet{T}}, d::Frechet) where {T<:Real} = Frechet{T}(T(d.α), T(d.θ)) -Base.convert(::Type{Frechet{T}}, d::Frechet{T}) where {T<:Real} = d +Base.convert(::Type{Frechet{T}}, d::Frechet) where {T <: Real} = Frechet{T}(T(d.α), T(d.θ)) +Base.convert(::Type{Frechet{T}}, d::Frechet{T}) where {T <: Real} = d #### Parameters @@ -68,7 +68,7 @@ median(d::Frechet) = d.θ * logtwo^(-1 / d.α) mode(d::Frechet) = (iα = -1 / d.α; d.θ * (1 - iα)^iα) -function var(d::Frechet{T}) where {T<:Real} +function var(d::Frechet{T}) where {T <: Real} if d.α > 2 iα = 1 / d.α return d.θ^2 * (gamma(1 - 2 * iα) - gamma(1 - iα)^2) @@ -77,7 +77,7 @@ function var(d::Frechet{T}) where {T<:Real} end end -function skewness(d::Frechet{T}) where {T<:Real} +function skewness(d::Frechet{T}) where {T <: Real} if d.α > 3 iα = 1 / d.α g1 = gamma(1 - iα) @@ -89,7 +89,7 @@ function skewness(d::Frechet{T}) where {T<:Real} end end -function kurtosis(d::Frechet{T}) where {T<:Real} +function kurtosis(d::Frechet{T}) where {T <: Real} if d.α > 3 iα = 1 / d.α g1 = gamma(1 - iα) @@ -103,13 +103,13 @@ function kurtosis(d::Frechet{T}) where {T<:Real} end function entropy(d::Frechet) - 1 + MathConstants.γ / d.α + MathConstants.γ + log(d.θ / d.α) + return 1 + MathConstants.γ / d.α + MathConstants.γ + log(d.θ / d.α) end #### Evaluation -function logpdf(d::Frechet{T}, x::Real) where {T<:Real} +function logpdf(d::Frechet{T}, x::Real) where {T <: Real} (α, θ) = params(d) if x > 0 z = θ / x @@ -132,9 +132,9 @@ cquantile(d::Frechet, p::Real) = xval(d, -log1p(-p)) invlogcdf(d::Frechet, lp::Real) = xval(d, -lp) invlogccdf(d::Frechet, lp::Real) = xval(d, -log1mexp(lp)) -function gradlogpdf(d::Frechet{T}, x::Real) where {T<:Real} +function gradlogpdf(d::Frechet{T}, x::Real) where {T <: Real} (α, θ) = params(d) - insupport(Frechet, x) ? -(α + 1) / x + α * (θ^α) * x^(-α - 1) : zero(T) + return insupport(Frechet, x) ? -(α + 1) / x + α * (θ^α) * x^(-α - 1) : zero(T) end ## Sampling diff --git a/src/univariate/continuous/gamma.jl b/src/univariate/continuous/gamma.jl index 52ae64de70..f8e70411d4 100644 --- a/src/univariate/continuous/gamma.jl +++ b/src/univariate/continuous/gamma.jl @@ -24,13 +24,13 @@ External links * [Gamma distribution on Wikipedia](http://en.wikipedia.org/wiki/Gamma_distribution) """ -struct Gamma{T<:Real} <: ContinuousUnivariateDistribution +struct Gamma{T <: Real} <: ContinuousUnivariateDistribution α::T θ::T Gamma{T}(α, θ) where {T} = new{T}(α, θ) end -function Gamma(α::T, θ::T; check_args::Bool = true) where {T<:Real} +function Gamma(α::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Gamma (α, α > zero(α)) (θ, θ > zero(θ)) return Gamma{T}(α, θ) end @@ -45,9 +45,9 @@ Gamma() = Gamma{Float64}(1.0, 1.0) @distr_support Gamma 0.0 Inf #### Conversions -convert(::Type{Gamma{T}}, α::S, θ::S) where {T<:Real,S<:Real} = Gamma(T(α), T(θ)) -Base.convert(::Type{Gamma{T}}, d::Gamma) where {T<:Real} = Gamma{T}(T(d.α), T(d.θ)) -Base.convert(::Type{Gamma{T}}, d::Gamma{T}) where {T<:Real} = d +convert(::Type{Gamma{T}}, α::S, θ::S) where {T <: Real, S <: Real} = Gamma(T(α), T(θ)) +Base.convert(::Type{Gamma{T}}, d::Gamma) where {T <: Real} = Gamma{T}(T(d.α), T(d.θ)) +Base.convert(::Type{Gamma{T}}, d::Gamma{T}) where {T <: Real} = d #### Parameters @@ -70,12 +70,12 @@ kurtosis(d::Gamma) = 6 / d.α function mode(d::Gamma) (α, θ) = params(d) - α >= 1 ? θ * (α - 1) : error("Gamma has no mode when shape < 1") + return α >= 1 ? θ * (α - 1) : error("Gamma has no mode when shape < 1") end function entropy(d::Gamma) (α, θ) = params(d) - α + loggamma(α) + (1 - α) * digamma(α) + log(θ) + return α + loggamma(α) + (1 - α) * digamma(α) + log(θ) end mgf(d::Gamma, t::Real) = (1 - t * d.θ)^(-d.α) @@ -92,14 +92,14 @@ function kldivergence(p::Gamma, q::Gamma) αq, θq = params(q) θp_over_θq = θp / θq return (αp - αq) * digamma(αp) - loggamma(αp) + loggamma(αq) - αq * log(θp_over_θq) + - αp * (θp_over_θq - 1) + αp * (θp_over_θq - 1) end #### Evaluation & Sampling @_delegate_statsfuns Gamma gamma α θ -gradlogpdf(d::Gamma{T}, x::Real) where {T<:Real} = +gradlogpdf(d::Gamma{T}, x::Real) where {T <: Real} = insupport(Gamma, x) ? (d.α - 1) / x - 1 / d.θ : zero(T) function rand(rng::AbstractRNG, d::Gamma) @@ -134,21 +134,21 @@ struct GammaStats <: SufficientStats GammaStats(sx::Real, slogx::Real, tw::Real) = new(sx, slogx, tw) end -function suffstats(::Type{<:Gamma}, x::AbstractArray{T}) where {T<:Real} +function suffstats(::Type{<:Gamma}, x::AbstractArray{T}) where {T <: Real} sx = zero(T) slogx = zero(T) for xi in x sx += xi slogx += log(xi) end - GammaStats(sx, slogx, length(x)) + return GammaStats(sx, slogx, length(x)) end function suffstats( - ::Type{<:Gamma}, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Real} + ::Type{<:Gamma}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Real} n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) @@ -164,22 +164,22 @@ function suffstats( slogx += wi * log(xi) tw += wi end - GammaStats(sx, slogx, tw) + return GammaStats(sx, slogx, tw) end function gamma_mle_update(logmx::Float64, mlogx::Float64, a::Float64) ia = 1 / a z = ia + (mlogx - logmx + log(a) - digamma(a)) / (abs2(a) * (ia - trigamma(a))) - 1 / z + return 1 / z end function fit_mle( - ::Type{<:Gamma}, - ss::GammaStats; - alpha0::Float64 = NaN, - maxiter::Int = 1000, - tol::Float64 = 1e-16, -) + ::Type{<:Gamma}, + ss::GammaStats; + alpha0::Float64 = NaN, + maxiter::Int = 1000, + tol::Float64 = 1.0e-16, + ) mx = ss.sx / ss.tw logmx = log(mx) @@ -196,5 +196,5 @@ function fit_mle( converged = abs(a - a_old) <= tol end - Gamma(a, mx / a) + return Gamma(a, mx / a) end diff --git a/src/univariate/continuous/generalizedextremevalue.jl b/src/univariate/continuous/generalizedextremevalue.jl index 65ed9438ec..d5b86cb99e 100644 --- a/src/univariate/continuous/generalizedextremevalue.jl +++ b/src/univariate/continuous/generalizedextremevalue.jl @@ -34,18 +34,18 @@ External links * [Generalized extreme value distribution on Wikipedia](https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution) """ -struct GeneralizedExtremeValue{T<:Real} <: ContinuousUnivariateDistribution +struct GeneralizedExtremeValue{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T ξ::T function GeneralizedExtremeValue{T}(μ::T, σ::T, ξ::T) where {T} σ > zero(σ) || error("Scale must be positive") - new{T}(μ, σ, ξ) + return new{T}(μ, σ, ξ) end end -GeneralizedExtremeValue(μ::T, σ::T, ξ::T) where {T<:Real} = +GeneralizedExtremeValue(μ::T, σ::T, ξ::T) where {T <: Real} = GeneralizedExtremeValue{T}(μ, σ, ξ) GeneralizedExtremeValue(μ::Real, σ::Real, ξ::Real) = GeneralizedExtremeValue(promote(μ, σ, ξ)...) @@ -55,26 +55,26 @@ end #### Conversions function convert( - ::Type{GeneralizedExtremeValue{T}}, - μ::Real, - σ::Real, - ξ::Real, -) where {T<:Real} - GeneralizedExtremeValue(T(μ), T(σ), T(ξ)) + ::Type{GeneralizedExtremeValue{T}}, + μ::Real, + σ::Real, + ξ::Real, + ) where {T <: Real} + return GeneralizedExtremeValue(T(μ), T(σ), T(ξ)) end function Base.convert( - ::Type{GeneralizedExtremeValue{T}}, - d::GeneralizedExtremeValue, -) where {T<:Real} - GeneralizedExtremeValue{T}(T(d.μ), T(d.σ), T(d.ξ)) + ::Type{GeneralizedExtremeValue{T}}, + d::GeneralizedExtremeValue, + ) where {T <: Real} + return GeneralizedExtremeValue{T}(T(d.μ), T(d.σ), T(d.ξ)) end Base.convert( ::Type{GeneralizedExtremeValue{T}}, d::GeneralizedExtremeValue{T}, -) where {T<:Real} = d +) where {T <: Real} = d -minimum(d::GeneralizedExtremeValue{T}) where {T<:Real} = d.ξ > 0 ? d.μ - d.σ / d.ξ : -T(Inf) -maximum(d::GeneralizedExtremeValue{T}) where {T<:Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : T(Inf) +minimum(d::GeneralizedExtremeValue{T}) where {T <: Real} = d.ξ > 0 ? d.μ - d.σ / d.ξ : -T(Inf) +maximum(d::GeneralizedExtremeValue{T}) where {T <: Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : T(Inf) #### Parameters @@ -83,7 +83,7 @@ shape(d::GeneralizedExtremeValue) = d.ξ scale(d::GeneralizedExtremeValue) = d.σ location(d::GeneralizedExtremeValue) = d.μ params(d::GeneralizedExtremeValue) = (d.μ, d.σ, d.ξ) -@inline partype(d::GeneralizedExtremeValue{T}) where {T<:Real} = T +@inline partype(d::GeneralizedExtremeValue{T}) where {T <: Real} = T #### Statistics @@ -101,7 +101,7 @@ function median(d::GeneralizedExtremeValue) end end -function mean(d::GeneralizedExtremeValue{T}) where {T<:Real} +function mean(d::GeneralizedExtremeValue{T}) where {T <: Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 @@ -123,7 +123,7 @@ function mode(d::GeneralizedExtremeValue) end end -function var(d::GeneralizedExtremeValue{T}) where {T<:Real} +function var(d::GeneralizedExtremeValue{T}) where {T <: Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 @@ -135,7 +135,7 @@ function var(d::GeneralizedExtremeValue{T}) where {T<:Real} end end -function skewness(d::GeneralizedExtremeValue{T}) where {T<:Real} +function skewness(d::GeneralizedExtremeValue{T}) where {T <: Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 @@ -150,7 +150,7 @@ function skewness(d::GeneralizedExtremeValue{T}) where {T<:Real} end end -function kurtosis(d::GeneralizedExtremeValue{T}) where {T<:Real} +function kurtosis(d::GeneralizedExtremeValue{T}) where {T <: Real} (μ, σ, ξ) = params(d) if abs(ξ) < eps(one(ξ)) # ξ == 0 @@ -189,7 +189,7 @@ insupport(d::GeneralizedExtremeValue, x::Real) = minimum(d) <= x <= maximum(d) #### Evaluation -function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where {T<:Real} +function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where {T <: Real} if x == -Inf || x == Inf || !insupport(d, x) return -T(Inf) else @@ -210,7 +210,7 @@ function logpdf(d::GeneralizedExtremeValue{T}, x::Real) where {T<:Real} end end -function pdf(d::GeneralizedExtremeValue{T}, x::Real) where {T<:Real} +function pdf(d::GeneralizedExtremeValue{T}, x::Real) where {T <: Real} if x == -Inf || x == Inf || !insupport(d, x) return zero(T) else diff --git a/src/univariate/continuous/generalizedpareto.jl b/src/univariate/continuous/generalizedpareto.jl index 099a085c42..b4eeefdfb9 100644 --- a/src/univariate/continuous/generalizedpareto.jl +++ b/src/univariate/continuous/generalizedpareto.jl @@ -31,14 +31,14 @@ External links * [Generalized Pareto distribution on Wikipedia](https://en.wikipedia.org/wiki/Generalized_Pareto_distribution) """ -struct GeneralizedPareto{T<:Real} <: ContinuousUnivariateDistribution +struct GeneralizedPareto{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T ξ::T GeneralizedPareto{T}(μ::T, σ::T, ξ::T) where {T} = new{T}(μ, σ, ξ) end -function GeneralizedPareto(μ::T, σ::T, ξ::T; check_args::Bool = true) where {T<:Real} +function GeneralizedPareto(μ::T, σ::T, ξ::T; check_args::Bool = true) where {T <: Real} @check_args GeneralizedPareto (σ, σ > zero(σ)) return GeneralizedPareto{T}(μ, σ, ξ) end @@ -48,29 +48,29 @@ function GeneralizedPareto(μ::Real, σ::Real, ξ::Real; check_args::Bool = true end function GeneralizedPareto(μ::Integer, σ::Integer, ξ::Integer; check_args::Bool = true) - GeneralizedPareto(float(μ), float(σ), float(ξ); check_args = check_args) + return GeneralizedPareto(float(μ), float(σ), float(ξ); check_args = check_args) end function GeneralizedPareto(σ::Real, ξ::Real; check_args::Bool = true) - GeneralizedPareto(zero(σ), σ, ξ; check_args = check_args) + return GeneralizedPareto(zero(σ), σ, ξ; check_args = check_args) end function GeneralizedPareto(ξ::Real; check_args::Bool = true) - GeneralizedPareto(zero(ξ), one(ξ), ξ; check_args = check_args) + return GeneralizedPareto(zero(ξ), one(ξ), ξ; check_args = check_args) end GeneralizedPareto() = GeneralizedPareto{Float64}(0.0, 1.0, 1.0) minimum(d::GeneralizedPareto) = d.μ -maximum(d::GeneralizedPareto{T}) where {T<:Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : Inf +maximum(d::GeneralizedPareto{T}) where {T <: Real} = d.ξ < 0 ? d.μ - d.σ / d.ξ : Inf #### Conversions -function convert(::Type{GeneralizedPareto{T}}, μ::S, σ::S, ξ::S) where {T<:Real,S<:Real} - GeneralizedPareto(T(μ), T(σ), T(ξ)) +function convert(::Type{GeneralizedPareto{T}}, μ::S, σ::S, ξ::S) where {T <: Real, S <: Real} + return GeneralizedPareto(T(μ), T(σ), T(ξ)) end -function Base.convert(::Type{GeneralizedPareto{T}}, d::GeneralizedPareto) where {T<:Real} - GeneralizedPareto{T}(T(d.μ), T(d.σ), T(d.ξ)) +function Base.convert(::Type{GeneralizedPareto{T}}, d::GeneralizedPareto) where {T <: Real} + return GeneralizedPareto{T}(T(d.μ), T(d.σ), T(d.ξ)) end -Base.convert(::Type{GeneralizedPareto{T}}, d::GeneralizedPareto{T}) where {T<:Real} = d +Base.convert(::Type{GeneralizedPareto{T}}, d::GeneralizedPareto{T}) where {T <: Real} = d #### Parameters @@ -85,7 +85,7 @@ partype(::GeneralizedPareto{T}) where {T} = T median(d::GeneralizedPareto) = d.ξ == 0 ? d.μ + d.σ * logtwo : d.μ + d.σ * expm1(d.ξ * logtwo) / d.ξ -function mean(d::GeneralizedPareto{T}) where {T<:Real} +function mean(d::GeneralizedPareto{T}) where {T <: Real} if d.ξ < 1 return d.μ + d.σ / (1 - d.ξ) else @@ -93,7 +93,7 @@ function mean(d::GeneralizedPareto{T}) where {T<:Real} end end -function var(d::GeneralizedPareto{T}) where {T<:Real} +function var(d::GeneralizedPareto{T}) where {T <: Real} if d.ξ < 0.5 return d.σ^2 / ((1 - d.ξ)^2 * (1 - 2 * d.ξ)) else @@ -101,7 +101,7 @@ function var(d::GeneralizedPareto{T}) where {T<:Real} end end -function skewness(d::GeneralizedPareto{T}) where {T<:Real} +function skewness(d::GeneralizedPareto{T}) where {T <: Real} (μ, σ, ξ) = params(d) if ξ < (1 / 3) @@ -111,7 +111,7 @@ function skewness(d::GeneralizedPareto{T}) where {T<:Real} end end -function kurtosis(d::GeneralizedPareto{T}) where {T<:Real} +function kurtosis(d::GeneralizedPareto{T}) where {T <: Real} (μ, σ, ξ) = params(d) if ξ < 0.25 @@ -126,7 +126,7 @@ end #### Evaluation -function logpdf(d::GeneralizedPareto{T}, x::Real) where {T<:Real} +function logpdf(d::GeneralizedPareto{T}, x::Real) where {T <: Real} (μ, σ, ξ) = params(d) # The logpdf is log(0) outside the support range. @@ -161,7 +161,7 @@ ccdf(d::GeneralizedPareto, x::Real) = exp(logccdf(d, x)) cdf(d::GeneralizedPareto, x::Real) = -expm1(logccdf(d, x)) logcdf(d::GeneralizedPareto, x::Real) = log1mexp(logccdf(d, x)) -function quantile(d::GeneralizedPareto{T}, p::Real) where {T<:Real} +function quantile(d::GeneralizedPareto{T}, p::Real) where {T <: Real} (μ, σ, ξ) = params(d) if p == 0 diff --git a/src/univariate/continuous/gumbel.jl b/src/univariate/continuous/gumbel.jl index 8f4cda871a..5c5486b83c 100644 --- a/src/univariate/continuous/gumbel.jl +++ b/src/univariate/continuous/gumbel.jl @@ -22,13 +22,13 @@ External links * [Gumbel distribution on Wikipedia](http://en.wikipedia.org/wiki/Gumbel_distribution) """ -struct Gumbel{T<:Real} <: ContinuousUnivariateDistribution +struct Gumbel{T <: Real} <: ContinuousUnivariateDistribution μ::T # location θ::T # scale Gumbel{T}(µ::T, θ::T) where {T} = new{T}(µ, θ) end -function Gumbel(μ::T, θ::T; check_args::Bool = true) where {T<:Real} +function Gumbel(μ::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Gumbel (θ, θ > zero(θ)) return Gumbel{T}(μ, θ) end @@ -47,9 +47,9 @@ Base.eltype(::Type{Gumbel{T}}) where {T} = T #### Conversions -convert(::Type{Gumbel{T}}, μ::S, θ::S) where {T<:Real,S<:Real} = Gumbel(T(μ), T(θ)) -Base.convert(::Type{Gumbel{T}}, d::Gumbel) where {T<:Real} = Gumbel{T}(T(d.μ), T(d.θ)) -Base.convert(::Type{Gumbel{T}}, d::Gumbel{T}) where {T<:Real} = d +convert(::Type{Gumbel{T}}, μ::S, θ::S) where {T <: Real, S <: Real} = Gumbel(T(μ), T(θ)) +Base.convert(::Type{Gumbel{T}}, d::Gumbel) where {T <: Real} = Gumbel{T}(T(d.μ), T(d.θ)) +Base.convert(::Type{Gumbel{T}}, d::Gumbel{T}) where {T <: Real} = d #### Parameters @@ -71,15 +71,15 @@ end mean(d::Gumbel) = d.μ + d.θ * MathConstants.γ -median(d::Gumbel{T}) where {T<:Real} = d.μ - d.θ * log(T(logtwo)) +median(d::Gumbel{T}) where {T <: Real} = d.μ - d.θ * log(T(logtwo)) mode(d::Gumbel) = d.μ -var(d::Gumbel{T}) where {T<:Real} = T(π)^2 / 6 * d.θ^2 +var(d::Gumbel{T}) where {T <: Real} = T(π)^2 / 6 * d.θ^2 -skewness(d::Gumbel{T}) where {T<:Real} = 12 * sqrt(T(6)) * zeta(T(3)) / π^3 +skewness(d::Gumbel{T}) where {T <: Real} = 12 * sqrt(T(6)) * zeta(T(3)) / π^3 -kurtosis(d::Gumbel{T}) where {T<:Real} = T(12) / 5 +kurtosis(d::Gumbel{T}) where {T <: Real} = T(12) / 5 entropy(d::Gumbel) = log(d.θ) + 1 + MathConstants.γ @@ -91,12 +91,12 @@ xval(d::Gumbel, z::Real) = z * d.θ + d.μ function pdf(d::Gumbel, x::Real) z = zval(d, x) - exp(-z - exp(-z)) / d.θ + return exp(-z - exp(-z)) / d.θ end function logpdf(d::Gumbel, x::Real) z = zval(d, x) - -(z + exp(-z) + log(d.θ)) + return -(z + exp(-z) + log(d.θ)) end cdf(d::Gumbel, x::Real) = exp(-exp(-zval(d, x))) diff --git a/src/univariate/continuous/inversegamma.jl b/src/univariate/continuous/inversegamma.jl index a127c3a2ca..87c86450b8 100644 --- a/src/univariate/continuous/inversegamma.jl +++ b/src/univariate/continuous/inversegamma.jl @@ -25,14 +25,14 @@ External links * [Inverse gamma distribution on Wikipedia](http://en.wikipedia.org/wiki/Inverse-gamma_distribution) """ -struct InverseGamma{T<:Real} <: ContinuousUnivariateDistribution +struct InverseGamma{T <: Real} <: ContinuousUnivariateDistribution invd::Gamma{T} θ::T - InverseGamma{T}(α::T, θ::T) where {T<:Real} = + InverseGamma{T}(α::T, θ::T) where {T <: Real} = new{T}(Gamma(α, inv(θ), check_args = false), θ) end -function InverseGamma(α::T, θ::T; check_args::Bool = true) where {T<:Real} +function InverseGamma(α::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args InverseGamma (α, α > zero(α)) (θ, θ > zero(θ)) return InverseGamma{T}(α, θ) end @@ -48,12 +48,12 @@ InverseGamma() = InverseGamma{Float64}(1.0, 1.0) @distr_support InverseGamma 0.0 Inf #### Conversions -convert(::Type{InverseGamma{T}}, α::S, θ::S) where {T<:Real,S<:Real} = +convert(::Type{InverseGamma{T}}, α::S, θ::S) where {T <: Real, S <: Real} = InverseGamma(T(α), T(θ)) -function Base.convert(::Type{InverseGamma{T}}, d::InverseGamma) where {T<:Real} +function Base.convert(::Type{InverseGamma{T}}, d::InverseGamma) where {T <: Real} return InverseGamma{T}(T(shape(d)), T(d.θ)) end -Base.convert(::Type{InverseGamma{T}}, d::InverseGamma{T}) where {T<:Real} = d +Base.convert(::Type{InverseGamma{T}}, d::InverseGamma{T}) where {T <: Real} = d #### Parameters @@ -71,24 +71,24 @@ mean(d::InverseGamma{T}) where {T} = ((α, θ) = params(d); α > 1 ? θ / (α - mode(d::InverseGamma) = scale(d) / (shape(d) + 1) -function var(d::InverseGamma{T}) where {T<:Real} +function var(d::InverseGamma{T}) where {T <: Real} (α, θ) = params(d) - α > 2 ? θ^2 / ((α - 1)^2 * (α - 2)) : T(Inf) + return α > 2 ? θ^2 / ((α - 1)^2 * (α - 2)) : T(Inf) end -function skewness(d::InverseGamma{T}) where {T<:Real} +function skewness(d::InverseGamma{T}) where {T <: Real} α = shape(d) - α > 3 ? 4sqrt(α - 2) / (α - 3) : T(NaN) + return α > 3 ? 4sqrt(α - 2) / (α - 3) : T(NaN) end -function kurtosis(d::InverseGamma{T}) where {T<:Real} +function kurtosis(d::InverseGamma{T}) where {T <: Real} α = shape(d) - α > 4 ? (30α - 66) / ((α - 3) * (α - 4)) : T(NaN) + return α > 4 ? (30α - 66) / ((α - 3) * (α - 4)) : T(NaN) end function entropy(d::InverseGamma) (α, θ) = params(d) - α + loggamma(α) - (1 + α) * digamma(α) + log(θ) + return α + loggamma(α) - (1 + α) * digamma(α) + log(θ) end function kldivergence(p::InverseGamma, q::InverseGamma) @@ -101,7 +101,7 @@ end function logpdf(d::InverseGamma, x::Real) (α, θ) = params(d) - α * log(θ) - loggamma(α) - (α + 1) * log(x) - θ / x + return α * log(θ) - loggamma(α) - (α + 1) * log(x) - θ / x end zval(::InverseGamma, x::Real) = inv(max(x, 0)) @@ -116,15 +116,15 @@ cquantile(d::InverseGamma, p::Real) = inv(quantile(d.invd, p)) invlogcdf(d::InverseGamma, p::Real) = inv(invlogccdf(d.invd, p)) invlogccdf(d::InverseGamma, p::Real) = inv(invlogcdf(d.invd, p)) -function mgf(d::InverseGamma{T}, t::Real) where {T<:Real} +function mgf(d::InverseGamma{T}, t::Real) where {T <: Real} (a, b) = params(d) - t == zero(t) ? one(T) : 2(-b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * b * t)) + return t == zero(t) ? one(T) : 2(-b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * b * t)) end -function cf(d::InverseGamma{T}, t::Real) where {T<:Real} +function cf(d::InverseGamma{T}, t::Real) where {T <: Real} (a, b) = params(d) - t == zero(t) ? one(T) + zero(T) * im : - 2(-im * b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * im * b * t)) + return t == zero(t) ? one(T) + zero(T) * im : + 2(-im * b * t)^(0.5a) / gamma(a) * besselk(a, sqrt(-4 * im * b * t)) end diff --git a/src/univariate/continuous/inversegaussian.jl b/src/univariate/continuous/inversegaussian.jl index 6579eff927..00cbd17342 100644 --- a/src/univariate/continuous/inversegaussian.jl +++ b/src/univariate/continuous/inversegaussian.jl @@ -23,13 +23,13 @@ External links * [Inverse Gaussian distribution on Wikipedia](http://en.wikipedia.org/wiki/Inverse_Gaussian_distribution) """ -struct InverseGaussian{T<:Real} <: ContinuousUnivariateDistribution +struct InverseGaussian{T <: Real} <: ContinuousUnivariateDistribution μ::T λ::T - InverseGaussian{T}(μ::T, λ::T) where {T<:Real} = new{T}(μ, λ) + InverseGaussian{T}(μ::T, λ::T) where {T <: Real} = new{T}(μ, λ) end -function InverseGaussian(μ::T, λ::T; check_args::Bool = true) where {T<:Real} +function InverseGaussian(μ::T, λ::T; check_args::Bool = true) where {T <: Real} @check_args InverseGaussian (μ, μ > zero(μ)) (λ, λ > zero(λ)) return InverseGaussian{T}(μ, λ) end @@ -46,13 +46,13 @@ InverseGaussian() = InverseGaussian{Float64}(1.0, 1.0) #### Conversions -function convert(::Type{InverseGaussian{T}}, μ::S, λ::S) where {T<:Real,S<:Real} - InverseGaussian(T(μ), T(λ)) +function convert(::Type{InverseGaussian{T}}, μ::S, λ::S) where {T <: Real, S <: Real} + return InverseGaussian(T(μ), T(λ)) end -function Base.convert(::Type{InverseGaussian{T}}, d::InverseGaussian) where {T<:Real} - InverseGaussian{T}(T(d.μ), T(d.λ)) +function Base.convert(::Type{InverseGaussian{T}}, d::InverseGaussian) where {T <: Real} + return InverseGaussian{T}(T(d.μ), T(d.λ)) end -Base.convert(::Type{InverseGaussian{T}}, d::InverseGaussian{T}) where {T<:Real} = d +Base.convert(::Type{InverseGaussian{T}}, d::InverseGaussian{T}) where {T <: Real} = d #### Parameters @@ -73,13 +73,13 @@ kurtosis(d::InverseGaussian) = 15d.μ / d.λ function mode(d::InverseGaussian) μ, λ = params(d) r = μ / λ - μ * (sqrt(1 + (3r / 2)^2) - (3r / 2)) + return μ * (sqrt(1 + (3r / 2)^2) - (3r / 2)) end #### Evaluation -function pdf(d::InverseGaussian{T}, x::Real) where {T<:Real} +function pdf(d::InverseGaussian{T}, x::Real) where {T <: Real} if x > 0 μ, λ = params(d) return sqrt(λ / (twoπ * x^3)) * exp(-λ * (x - μ)^2 / (2μ^2 * x)) @@ -88,7 +88,7 @@ function pdf(d::InverseGaussian{T}, x::Real) where {T<:Real} end end -function logpdf(d::InverseGaussian{T}, x::Real) where {T<:Real} +function logpdf(d::InverseGaussian{T}, x::Real) where {T <: Real} if x > 0 μ, λ = params(d) return (log(λ) - (log2π + 3log(x)) - λ * (x - μ)^2 / (μ^2 * x)) / 2 @@ -169,7 +169,7 @@ function rand(rng::AbstractRNG, d::InverseGaussian) x1 = μ + μ / (2λ) * (w - sqrt(w * (4λ + w))) p1 = μ / (μ + x1) u = rand(rng) - u >= p1 ? μ^2 / x1 : x1 + return u >= p1 ? μ^2 / x1 : x1 end #### Fit model @@ -187,14 +187,14 @@ end function suffstats(::Type{<:InverseGaussian}, x::AbstractVector{<:Real}) sx = sum(x) sinvx = sum(inv, x) - InverseGaussianStats(sx, sinvx, length(x)) + return InverseGaussianStats(sx, sinvx, length(x)) end function suffstats( - ::Type{<:InverseGaussian}, - x::AbstractVector{<:Real}, - w::AbstractVector{<:Real}, -) + ::Type{<:InverseGaussian}, + x::AbstractVector{<:Real}, + w::AbstractVector{<:Real}, + ) n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) @@ -208,11 +208,11 @@ function suffstats( sinvx += w[i] / x[i] sw += w[i] end - InverseGaussianStats(sx, sinvx, sw) + return InverseGaussianStats(sx, sinvx, sw) end function fit_mle(::Type{<:InverseGaussian}, ss::InverseGaussianStats) mu = ss.sx / ss.sw invlambda = ss.sinvx / ss.sw - inv(mu) - InverseGaussian(mu, inv(invlambda)) + return InverseGaussian(mu, inv(invlambda)) end diff --git a/src/univariate/continuous/johnsonsu.jl b/src/univariate/continuous/johnsonsu.jl index ebabb5c077..ebc3e51dce 100644 --- a/src/univariate/continuous/johnsonsu.jl +++ b/src/univariate/continuous/johnsonsu.jl @@ -22,15 +22,15 @@ External links * [Johnson's ``S_U``-distribution on Wikipedia](http://en.wikipedia.org/wiki/Johnson%27s_SU-distribution) """ -struct JohnsonSU{T<:Real} <: ContinuousUnivariateDistribution +struct JohnsonSU{T <: Real} <: ContinuousUnivariateDistribution ξ::T λ::T γ::T δ::T - JohnsonSU{T}(ξ::T, λ::T, γ::T, δ::T) where {T<:Real} = new{T}(ξ, λ, γ, δ) + JohnsonSU{T}(ξ::T, λ::T, γ::T, δ::T) where {T <: Real} = new{T}(ξ, λ, γ, δ) end -function JohnsonSU(ξ::T, λ::T, γ::T, δ::T; check_args::Bool = true) where {T<:Real} +function JohnsonSU(ξ::T, λ::T, γ::T, δ::T; check_args::Bool = true) where {T <: Real} @check_args JohnsonSU (λ, λ ≥ zero(λ)) (δ, δ ≥ zero(δ)) return JohnsonSU{T}(ξ, λ, γ, δ) end @@ -43,9 +43,9 @@ JohnsonSU(ξ::Real, λ::Real, γ::Real, δ::Real; check_args::Bool = true) = #### Conversions -Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU) where {T<:Real} = +Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU) where {T <: Real} = JohnsonSU{T}(T(d.ξ), T(d.λ), T(d.γ), T(d.δ)) -Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU{T}) where {T<:Real} = d +Base.convert(::Type{JohnsonSU{T}}, d::JohnsonSU{T}) where {T <: Real} = d #### Parameters @@ -53,23 +53,23 @@ shape(d::JohnsonSU) = d.ξ scale(d::JohnsonSU) = d.λ params(d::JohnsonSU) = (d.ξ, d.λ, d.γ, d.δ) -partype(d::JohnsonSU{T}) where {T<:Real} = T +partype(d::JohnsonSU{T}) where {T <: Real} = T #### Statistics function mean(d::JohnsonSU) a = exp(1 / (2 * d.δ^2)) r = d.γ / d.δ - d.ξ - d.λ * a * sinh(r) + return d.ξ - d.λ * a * sinh(r) end function median(d::JohnsonSU) r = d.γ / d.δ - d.ξ + d.λ * sinh(-r) + return d.ξ + d.λ * sinh(-r) end function var(d::JohnsonSU) a = d.δ^-2 r = d.γ / d.δ - d.λ^2 / 2 * expm1(a) * (exp(a) * cosh(2r) + 1) + return d.λ^2 / 2 * expm1(a) * (exp(a) * cosh(2r) + 1) end #### Evaluation diff --git a/src/univariate/continuous/kolmogorov.jl b/src/univariate/continuous/kolmogorov.jl index 835a2466b8..ad6f130e68 100644 --- a/src/univariate/continuous/kolmogorov.jl +++ b/src/univariate/continuous/kolmogorov.jl @@ -42,7 +42,7 @@ function cdf_raw(d::Kolmogorov, x::Real) f = exp(a) f2 = f * f u = (1 + f * (1 + f2)) - sqrt2π * exp(a / 8) * u / x + return sqrt2π * exp(a / 8) * u / x end function ccdf_raw(d::Kolmogorov, x::Real) @@ -52,11 +52,11 @@ function ccdf_raw(d::Kolmogorov, x::Real) f5 = f2 * f3 f7 = f2 * f5 u = (1 - f3 * (1 - f5 * (1 - f7))) - 2f * u + return 2f * u end function cdf(d::Kolmogorov, x::Real) - if x <= 0 + return if x <= 0 0 elseif x <= 1 cdf_raw(d, x) @@ -65,7 +65,7 @@ function cdf(d::Kolmogorov, x::Real) end end function ccdf(d::Kolmogorov, x::Real) - if x <= 0 + return if x <= 0 1 elseif x <= 1 1 - cdf_raw(d, x) @@ -82,14 +82,14 @@ function pdf(d::Kolmogorov, x::Real) elseif x <= 1 c = π / (2 * x) s = 0.0 - for i = 1:20 + for i in 1:20 k = ((2i - 1) * c)^2 s += (k - 1) * exp(-k / 2) end return sqrt2π * s / x^2 else s = 0.0 - for i = 1:20 + for i in 1:20 s += (iseven(i) ? -1 : 1) * i^2 * exp(-2(i * x)^2) end return 8 * x * s @@ -107,7 +107,7 @@ logpdf(d::Kolmogorov, x::Real) = log(pdf(d, x)) # Chapter IV.5, pp. 163-165. function rand(rng::AbstractRNG, d::Kolmogorov) t = 0.75 - if rand(rng) < 0.3728329582237386 # cdf(d,t) + return if rand(rng) < 0.3728329582237386 # cdf(d,t) # left interval while true g = rand_trunc_gamma(rng) @@ -163,4 +163,5 @@ function rand_trunc_gamma(rng::AbstractRNG) return g end end + return end diff --git a/src/univariate/continuous/ksdist.jl b/src/univariate/continuous/ksdist.jl index 745e7a7442..eab971eac4 100644 --- a/src/univariate/continuous/ksdist.jl +++ b/src/univariate/continuous/ksdist.jl @@ -82,18 +82,18 @@ function cdf_durbin(d::KSDist, x::Float64) m = 2 * k - 1 H = Matrix{Float64}(undef, m, m) - for i = 1:m, j = 1:m + for i in 1:m, j in 1:m H[i, j] = i - j + 1 >= 0 ? 1 : 0 end r = 1.0 - for i = 1:m + for i in 1:m # (1-h^i) = (1-h)(1+h+...+h^(i-1)) - H[i, 1] = H[m, m-i+1] = ch * r + H[i, 1] = H[m, m - i + 1] = ch * r r += h^i end H[m, 1] += h <= 0.5 ? -h^m : -h^m + (h - ch) - for i = 1:m, j = 1:m - for g = 1:max(i-j+1, 0) + for i in 1:m, j in 1:m + for g in 1:max(i - j + 1, 0) H[i, j] /= g end # we can avoid keeping track of the exponent by dividing by e @@ -102,12 +102,12 @@ function cdf_durbin(d::KSDist, x::Float64) end Q = H^n s = Q[k, k] - s * stirling(n) + return s * stirling(n) end # Miller (1956) approximation function ccdf_miller(d::KSDist, x::Real) - 2 * ccdf(KSOneSided(d.n), x) + return 2 * ccdf(KSOneSided(d.n), x) end ## these functions are used in durbin and pomeranz algorithms @@ -131,7 +131,7 @@ end function stirling(n) if n < 500 s = 1.0 - for i = 1:n + for i in 1:n s *= i / n * ℯ end return s diff --git a/src/univariate/continuous/ksonesided.jl b/src/univariate/continuous/ksonesided.jl index a57d6a5865..6f4474ce92 100644 --- a/src/univariate/continuous/ksonesided.jl +++ b/src/univariate/continuous/ksonesided.jl @@ -25,11 +25,11 @@ function ccdf(d::KSOneSided, x::Float64) end n = d.n s = 0.0 - for j = 0:floor(Int, n-n*x) + for j in 0:floor(Int, n - n * x) p = x + j / n s += pdf(Binomial(n, p), j) / p end - s * x + return s * x end cdf(d::KSOneSided, x::Float64) = 1 - ccdf(d, x) diff --git a/src/univariate/continuous/kumaraswamy.jl b/src/univariate/continuous/kumaraswamy.jl index 83094e5373..eafdf7fc28 100644 --- a/src/univariate/continuous/kumaraswamy.jl +++ b/src/univariate/continuous/kumaraswamy.jl @@ -22,7 +22,7 @@ References - Kumaraswamy, P. (1980). A generalized probability density function for double-bounded random processes. Journal of Hydrology. 46(1-2), 79-88. """ -struct Kumaraswamy{T<:Real} <: ContinuousUnivariateDistribution +struct Kumaraswamy{T <: Real} <: ContinuousUnivariateDistribution a::T b::T end diff --git a/src/univariate/continuous/laplace.jl b/src/univariate/continuous/laplace.jl index 5fc63318b0..c054f705eb 100644 --- a/src/univariate/continuous/laplace.jl +++ b/src/univariate/continuous/laplace.jl @@ -22,13 +22,13 @@ External links * [Laplace distribution on Wikipedia](http://en.wikipedia.org/wiki/Laplace_distribution) """ -struct Laplace{T<:Real} <: ContinuousUnivariateDistribution +struct Laplace{T <: Real} <: ContinuousUnivariateDistribution μ::T θ::T Laplace{T}(µ::T, θ::T) where {T} = new{T}(µ, θ) end -function Laplace(μ::T, θ::T; check_args::Bool = true) where {T<:Real} +function Laplace(μ::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Laplace (θ, θ > zero(θ)) return Laplace{T}(μ, θ) end @@ -44,20 +44,20 @@ const Biexponential = Laplace @distr_support Laplace -Inf Inf #### Conversions -function convert(::Type{Laplace{T}}, μ::S, θ::S) where {T<:Real,S<:Real} - Laplace(T(μ), T(θ)) +function convert(::Type{Laplace{T}}, μ::S, θ::S) where {T <: Real, S <: Real} + return Laplace(T(μ), T(θ)) end -function Base.convert(::Type{Laplace{T}}, d::Laplace) where {T<:Real} - Laplace{T}(T(d.μ), T(d.θ)) +function Base.convert(::Type{Laplace{T}}, d::Laplace) where {T <: Real} + return Laplace{T}(T(d.μ), T(d.θ)) end -Base.convert(::Type{Laplace{T}}, d::Laplace{T}) where {T<:Real} = d +Base.convert(::Type{Laplace{T}}, d::Laplace{T}) where {T <: Real} = d #### Parameters location(d::Laplace) = d.μ scale(d::Laplace) = d.θ params(d::Laplace) = (d.μ, d.θ) -@inline partype(d::Laplace{T}) where {T<:Real} = T +@inline partype(d::Laplace{T}) where {T <: Real} = T #### Statistics @@ -68,8 +68,8 @@ mode(d::Laplace) = d.μ var(d::Laplace) = 2d.θ^2 std(d::Laplace) = sqrt2 * d.θ -skewness(d::Laplace{T}) where {T<:Real} = zero(T) -kurtosis(d::Laplace{T}) where {T<:Real} = 3one(T) +skewness(d::Laplace{T}) where {T <: Real} = zero(T) +kurtosis(d::Laplace{T}) where {T <: Real} = 3one(T) entropy(d::Laplace) = log(2d.θ) + 1 @@ -104,20 +104,20 @@ function gradlogpdf(d::Laplace, x::Real) μ, θ = params(d) x == μ && error("Gradient is undefined at the location point") g = 1 / θ - x > μ ? -g : g + return x > μ ? -g : g end function mgf(d::Laplace, t::Real) st = d.θ * t - exp(t * d.μ) / ((1 - st) * (1 + st)) + return exp(t * d.μ) / ((1 - st) * (1 + st)) end function cgf(d::Laplace, t) μ, θ = params(d) - t * μ - log1p(-(θ * t)^2) + return t * μ - log1p(-(θ * t)^2) end function cf(d::Laplace, t::Real) st = d.θ * t - cis(t * d.μ) / (1 + st * st) + return cis(t * d.μ) / (1 + st * st) end #### Affine transformations diff --git a/src/univariate/continuous/levy.jl b/src/univariate/continuous/levy.jl index c242abc72f..795bad4e02 100644 --- a/src/univariate/continuous/levy.jl +++ b/src/univariate/continuous/levy.jl @@ -21,13 +21,13 @@ External links * [Lévy distribution on Wikipedia](http://en.wikipedia.org/wiki/Lévy_distribution) """ -struct Levy{T<:Real} <: ContinuousUnivariateDistribution +struct Levy{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T - Levy{T}(μ::T, σ::T) where {T<:Real} = new{T}(μ, σ) + Levy{T}(μ::T, σ::T) where {T <: Real} = new{T}(μ, σ) end -function Levy(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Levy(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Levy (σ, σ > zero(σ)) return Levy{T}(μ, σ) end @@ -42,9 +42,9 @@ Levy(μ::Real = 0.0) = Levy(μ, one(μ); check_args = false) #### Conversions -convert(::Type{Levy{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = Levy(T(μ), T(σ)) -Base.convert(::Type{Levy{T}}, d::Levy) where {T<:Real} = Levy{T}(T(d.μ), T(d.σ)) -Base.convert(::Type{Levy{T}}, d::Levy{T}) where {T<:Real} = d +convert(::Type{Levy{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = Levy(T(μ), T(σ)) +Base.convert(::Type{Levy{T}}, d::Levy) where {T <: Real} = Levy{T}(T(d.μ), T(d.σ)) +Base.convert(::Type{Levy{T}}, d::Levy{T}) where {T <: Real} = d #### Parameters @@ -55,51 +55,51 @@ partype(::Levy{T}) where {T} = T #### Statistics -mean(d::Levy{T}) where {T<:Real} = T(Inf) -var(d::Levy{T}) where {T<:Real} = T(Inf) -skewness(d::Levy{T}) where {T<:Real} = T(NaN) -kurtosis(d::Levy{T}) where {T<:Real} = T(NaN) +mean(d::Levy{T}) where {T <: Real} = T(Inf) +var(d::Levy{T}) where {T <: Real} = T(Inf) +skewness(d::Levy{T}) where {T <: Real} = T(NaN) +kurtosis(d::Levy{T}) where {T <: Real} = T(NaN) mode(d::Levy) = d.σ / 3 + d.μ entropy(d::Levy) = (1 - 3digamma(1) + log(16 * d.σ^2 * π)) / 2 -median(d::Levy{T}) where {T<:Real} = d.μ + d.σ / (2 * T(erfcinv(0.5))^2) +median(d::Levy{T}) where {T <: Real} = d.μ + d.σ / (2 * T(erfcinv(0.5))^2) #### Evaluation -function pdf(d::Levy{T}, x::Real) where {T<:Real} +function pdf(d::Levy{T}, x::Real) where {T <: Real} μ, σ = params(d) if x <= μ return zero(T) end z = x - μ - (sqrt(σ) / sqrt2π) * exp((-σ) / (2z)) / z^(3 // 2) + return (sqrt(σ) / sqrt2π) * exp((-σ) / (2z)) / z^(3 // 2) end -function logpdf(d::Levy{T}, x::Real) where {T<:Real} +function logpdf(d::Levy{T}, x::Real) where {T <: Real} μ, σ = params(d) if x <= μ return T(-Inf) end z = x - μ - (log(σ) - log2π - σ / z - 3log(z)) / 2 + return (log(σ) - log2π - σ / z - 3log(z)) / 2 end -cdf(d::Levy{T}, x::Real) where {T<:Real} = +cdf(d::Levy{T}, x::Real) where {T <: Real} = x <= d.μ ? zero(T) : erfc(sqrt(d.σ / (2(x - d.μ)))) -ccdf(d::Levy{T}, x::Real) where {T<:Real} = +ccdf(d::Levy{T}, x::Real) where {T <: Real} = x <= d.μ ? one(T) : erf(sqrt(d.σ / (2(x - d.μ)))) quantile(d::Levy, p::Real) = d.μ + d.σ / (2 * erfcinv(p)^2) cquantile(d::Levy, p::Real) = d.μ + d.σ / (2 * erfinv(p)^2) -mgf(d::Levy{T}, t::Real) where {T<:Real} = t == zero(t) ? one(T) : T(NaN) +mgf(d::Levy{T}, t::Real) where {T <: Real} = t == zero(t) ? one(T) : T(NaN) function cf(d::Levy, t::Real) μ, σ = params(d) - exp(im * μ * t - sqrt(-2im * σ * t)) + return exp(im * μ * t - sqrt(-2im * σ * t)) end diff --git a/src/univariate/continuous/lindley.jl b/src/univariate/continuous/lindley.jl index d47dacc2fa..34c8b3c363 100644 --- a/src/univariate/continuous/lindley.jl +++ b/src/univariate/continuous/lindley.jl @@ -18,7 +18,7 @@ respective mixing weights `p = θ/(1 + θ)` and `1 - p`. [^2]: Ghitany, M. E., Atieh, B., & Nadarajah, S. (2008). Lindley distribution and its application. Mathematics and Computers in Simulation, 78(4), 493–506. """ -struct Lindley{T<:Real} <: ContinuousUnivariateDistribution +struct Lindley{T <: Real} <: ContinuousUnivariateDistribution θ::T Lindley{T}(θ::T) where {T} = new{T}(θ) @@ -149,7 +149,7 @@ function _lambertwm1(x, n = 6) else throw(DomainError(x)) end - for i = 1:n + for i in 1:n β = β / (1 + β) * (1 + log(x / β)) end return β diff --git a/src/univariate/continuous/logistic.jl b/src/univariate/continuous/logistic.jl index 83ecc38c7d..5f0f78e424 100644 --- a/src/univariate/continuous/logistic.jl +++ b/src/univariate/continuous/logistic.jl @@ -23,14 +23,14 @@ External links * [Logistic distribution on Wikipedia](http://en.wikipedia.org/wiki/Logistic_distribution) """ -struct Logistic{T<:Real} <: ContinuousUnivariateDistribution +struct Logistic{T <: Real} <: ContinuousUnivariateDistribution μ::T θ::T Logistic{T}(µ::T, θ::T) where {T} = new{T}(µ, θ) end -function Logistic(μ::T, θ::T; check_args::Bool = true) where {T<:Real} +function Logistic(μ::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Logistic (θ, θ > zero(θ)) return Logistic{T}(μ, θ) end @@ -44,13 +44,13 @@ Logistic(μ::Real = 0.0) = Logistic(μ, one(μ); check_args = false) @distr_support Logistic -Inf Inf #### Conversions -function convert(::Type{Logistic{T}}, μ::S, θ::S) where {T<:Real,S<:Real} - Logistic(T(μ), T(θ)) +function convert(::Type{Logistic{T}}, μ::S, θ::S) where {T <: Real, S <: Real} + return Logistic(T(μ), T(θ)) end -function Base.convert(::Type{Logistic{T}}, d::Logistic) where {T<:Real} - Logistic{T}(T(d.μ), T(d.θ)) +function Base.convert(::Type{Logistic{T}}, d::Logistic) where {T <: Real} + return Logistic{T}(T(d.μ), T(d.θ)) end -Base.convert(::Type{Logistic{T}}, d::Logistic{T}) where {T<:Real} = d +Base.convert(::Type{Logistic{T}}, d::Logistic{T}) where {T <: Real} = d #### Parameters @@ -58,7 +58,7 @@ location(d::Logistic) = d.μ scale(d::Logistic) = d.θ params(d::Logistic) = (d.μ, d.θ) -@inline partype(d::Logistic{T}) where {T<:Real} = T +@inline partype(d::Logistic{T}) where {T <: Real} = T #### Statistics @@ -69,8 +69,8 @@ mode(d::Logistic) = d.μ std(d::Logistic) = π * d.θ / sqrt3 var(d::Logistic) = (π * d.θ)^2 / 3 -skewness(d::Logistic{T}) where {T<:Real} = zero(T) -kurtosis(d::Logistic{T}) where {T<:Real} = T(6) / 5 +skewness(d::Logistic{T}) where {T <: Real} = zero(T) +kurtosis(d::Logistic{T}) where {T <: Real} = T(6) / 5 entropy(d::Logistic) = log(d.θ) + 2 @@ -95,15 +95,15 @@ invlogccdf(d::Logistic, lp::Real) = xval(d, logexpm1(-lp)) function gradlogpdf(d::Logistic, x::Real) e = exp(-zval(d, x)) - ((2e) / (1 + e) - 1) / d.θ + return ((2e) / (1 + e) - 1) / d.θ end mgf(d::Logistic, t::Real) = exp(t * d.μ) / sinc(d.θ * t) function cgf(d::Logistic, t) μ, θ = params(d) - t * μ - log(sinc(θ * t)) + return t * μ - log(sinc(θ * t)) end function cf(d::Logistic, t::Real) a = (π * t) * d.θ - a == zero(a) ? complex(one(a)) : cis(t * d.μ) * (a / sinh(a)) + return a == zero(a) ? complex(one(a)) : cis(t * d.μ) * (a / sinh(a)) end diff --git a/src/univariate/continuous/logitnormal.jl b/src/univariate/continuous/logitnormal.jl index 4e74d4af36..67d23d143e 100644 --- a/src/univariate/continuous/logitnormal.jl +++ b/src/univariate/continuous/logitnormal.jl @@ -52,13 +52,13 @@ External links * [Logit normal distribution on Wikipedia](https://en.wikipedia.org/wiki/Logit-normal_distribution) """ -struct LogitNormal{T<:Real} <: ContinuousUnivariateDistribution +struct LogitNormal{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T LogitNormal{T}(μ::T, σ::T) where {T} = new{T}(μ, σ) end -function LogitNormal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function LogitNormal(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args LogitNormal (σ, σ > zero(σ)) return LogitNormal{T}(μ, σ) end @@ -76,19 +76,19 @@ LogitNormal(μ::Real = 0.0) = LogitNormal(μ, one(μ); check_args = false) #### Conversions -convert(::Type{LogitNormal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = +convert(::Type{LogitNormal{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = LogitNormal(T(μ), T(σ)) -function Base.convert(::Type{LogitNormal{T}}, d::LogitNormal) where {T<:Real} - LogitNormal{T}(T(d.μ), T(d.σ)) +function Base.convert(::Type{LogitNormal{T}}, d::LogitNormal) where {T <: Real} + return LogitNormal{T}(T(d.μ), T(d.σ)) end -Base.convert(::Type{LogitNormal{T}}, d::LogitNormal{T}) where {T<:Real} = d +Base.convert(::Type{LogitNormal{T}}, d::LogitNormal{T}) where {T <: Real} = d #### Parameters params(d::LogitNormal) = (d.μ, d.σ) location(d::LogitNormal) = d.μ scale(d::LogitNormal) = d.σ -@inline partype(d::LogitNormal{T}) where {T<:Real} = T +@inline partype(d::LogitNormal{T}) where {T <: Real} = T #### Statistics @@ -114,7 +114,7 @@ end #### Evaluation #TODO check pd and logpdf -function pdf(d::LogitNormal{T}, x::Real) where {T<:Real} +function pdf(d::LogitNormal{T}, x::Real) where {T <: Real} if zero(x) < x < one(x) return normpdf(d.μ, d.σ, logit(x)) / (x * (1 - x)) else @@ -122,7 +122,7 @@ function pdf(d::LogitNormal{T}, x::Real) where {T<:Real} end end -function logpdf(d::LogitNormal{T}, x::Real) where {T<:Real} +function logpdf(d::LogitNormal{T}, x::Real) where {T <: Real} if zero(x) < x < one(x) lx = logit(x) return normlogpdf(d.μ, d.σ, lx) - log(x) - log1p(-x) @@ -131,13 +131,13 @@ function logpdf(d::LogitNormal{T}, x::Real) where {T<:Real} end end -cdf(d::LogitNormal{T}, x::Real) where {T<:Real} = +cdf(d::LogitNormal{T}, x::Real) where {T <: Real} = x ≤ 0 ? zero(T) : x ≥ 1 ? one(T) : normcdf(d.μ, d.σ, logit(x)) -ccdf(d::LogitNormal{T}, x::Real) where {T<:Real} = +ccdf(d::LogitNormal{T}, x::Real) where {T <: Real} = x ≤ 0 ? one(T) : x ≥ 1 ? zero(T) : normccdf(d.μ, d.σ, logit(x)) -logcdf(d::LogitNormal{T}, x::Real) where {T<:Real} = +logcdf(d::LogitNormal{T}, x::Real) where {T <: Real} = x ≤ 0 ? -T(Inf) : x ≥ 1 ? zero(T) : normlogcdf(d.μ, d.σ, logit(x)) -logccdf(d::LogitNormal{T}, x::Real) where {T<:Real} = +logccdf(d::LogitNormal{T}, x::Real) where {T <: Real} = x ≤ 0 ? zero(T) : x ≥ 1 ? -T(Inf) : normlogccdf(d.μ, d.σ, logit(x)) quantile(d::LogitNormal, q::Real) = logistic(norminvcdf(d.μ, d.σ, q)) @@ -170,8 +170,8 @@ end ## Fitting -function fit_mle(::Type{<:LogitNormal}, x::AbstractArray{T}) where {T<:Real} +function fit_mle(::Type{<:LogitNormal}, x::AbstractArray{T}) where {T <: Real} lx = logit.(x) μ, σ = mean_and_std(lx) - LogitNormal(μ, σ) + return LogitNormal(μ, σ) end diff --git a/src/univariate/continuous/lognormal.jl b/src/univariate/continuous/lognormal.jl index dc9270b8bf..f64c91e292 100644 --- a/src/univariate/continuous/lognormal.jl +++ b/src/univariate/continuous/lognormal.jl @@ -26,13 +26,13 @@ External links * [Log normal distribution on Wikipedia](http://en.wikipedia.org/wiki/Log-normal_distribution) """ -struct LogNormal{T<:Real} <: ContinuousUnivariateDistribution +struct LogNormal{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T LogNormal{T}(μ::T, σ::T) where {T} = new{T}(μ, σ) end -function LogNormal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function LogNormal(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args LogNormal (σ, σ ≥ zero(σ)) return LogNormal{T}(μ, σ) end @@ -46,10 +46,10 @@ LogNormal(μ::Real = 0.0) = LogNormal(μ, one(μ); check_args = false) @distr_support LogNormal 0.0 Inf #### Conversions -convert(::Type{LogNormal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = LogNormal(T(μ), T(σ)) -Base.convert(::Type{LogNormal{T}}, d::LogNormal) where {T<:Real} = +convert(::Type{LogNormal{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = LogNormal(T(μ), T(σ)) +Base.convert(::Type{LogNormal{T}}, d::LogNormal) where {T <: Real} = LogNormal{T}(T(d.μ), T(d.σ)) -Base.convert(::Type{LogNormal{T}}, d::LogNormal{T}) where {T<:Real} = d +Base.convert(::Type{LogNormal{T}}, d::LogNormal{T}) where {T <: Real} = d #### Parameters @@ -65,18 +65,18 @@ stdlogx(d::LogNormal) = d.σ mean(d::LogNormal) = ((μ, σ) = params(d); exp(μ + σ^2 / 2)) median(d::LogNormal) = exp(d.μ) mode(d::LogNormal) = ((μ, σ) = params(d); exp(μ - σ^2)) -partype(::LogNormal{T}) where {T<:Real} = T +partype(::LogNormal{T}) where {T <: Real} = T function var(d::LogNormal) (μ, σ) = params(d) σ2 = σ^2 - (exp(σ2) - 1) * exp(2μ + σ2) + return (exp(σ2) - 1) * exp(2μ + σ2) end function skewness(d::LogNormal) σ2 = varlogx(d) e = exp(σ2) - (e + 2) * sqrt(e - 1) + return (e + 2) * sqrt(e - 1) end function kurtosis(d::LogNormal) @@ -85,12 +85,12 @@ function kurtosis(d::LogNormal) e2 = e * e e3 = e2 * e e4 = e3 * e - e4 + 2 * e3 + 3 * e2 - 6 + return e4 + 2 * e3 + 3 * e2 - 6 end function entropy(d::LogNormal) (μ, σ) = params(d) - (1 + log(twoπ * σ^2)) / 2 + μ + return (1 + log(twoπ * σ^2)) / 2 + μ end function kldivergence(p::LogNormal, q::LogNormal) @@ -171,8 +171,8 @@ end ## Fitting -function fit_mle(::Type{<:LogNormal}, x::AbstractArray{T}) where {T<:Real} +function fit_mle(::Type{<:LogNormal}, x::AbstractArray{T}) where {T <: Real} lx = log.(x) μ, σ = mean_and_std(lx) - LogNormal(μ, σ) + return LogNormal(μ, σ) end diff --git a/src/univariate/continuous/loguniform.jl b/src/univariate/continuous/loguniform.jl index 3581cf390f..12eeba8ab4 100644 --- a/src/univariate/continuous/loguniform.jl +++ b/src/univariate/continuous/loguniform.jl @@ -10,48 +10,48 @@ External links * [Log uniform distribution on Wikipedia](https://en.wikipedia.org/wiki/Reciprocal_distribution) """ -struct LogUniform{T<:Real} <: ContinuousUnivariateDistribution +struct LogUniform{T <: Real} <: ContinuousUnivariateDistribution a::T b::T - LogUniform{T}(a::T, b::T) where {T<:Real} = new{T}(a, b) + LogUniform{T}(a::T, b::T) where {T <: Real} = new{T}(a, b) end -function LogUniform(a::T, b::T; check_args::Bool = true) where {T<:Real} +function LogUniform(a::T, b::T; check_args::Bool = true) where {T <: Real} @check_args LogUniform (0 < a < b) - LogUniform{T}(a, b) + return LogUniform{T}(a, b) end LogUniform(a::Real, b::Real; check_args::Bool = true) = LogUniform(promote(a, b)...; check_args = check_args) -Base.convert(::Type{LogUniform{T}}, d::LogUniform) where {T<:Real} = +Base.convert(::Type{LogUniform{T}}, d::LogUniform) where {T <: Real} = LogUniform{T}(T(d.a), T(d.b)) -Base.convert(::Type{LogUniform{T}}, d::LogUniform{T}) where {T<:Real} = d +Base.convert(::Type{LogUniform{T}}, d::LogUniform{T}) where {T <: Real} = d Base.minimum(d::LogUniform) = d.a Base.maximum(d::LogUniform) = d.b #### Parameters params(d::LogUniform) = (d.a, d.b) -partype(::LogUniform{T}) where {T<:Real} = T +partype(::LogUniform{T}) where {T <: Real} = T #### Statistics function mean(d::LogUniform) a, b = params(d) - (b - a) / log(b / a) + return (b - a) / log(b / a) end function var(d::LogUniform) a, b = params(d) log_ba = log(b / a) - (b^2 - a^2) / (2 * log_ba) - ((b - a) / log_ba)^2 + return (b^2 - a^2) / (2 * log_ba) - ((b - a) / log_ba)^2 end mode(d::LogUniform) = d.a modes(d::LogUniform) = partype(d)[] function entropy(d::LogUniform) a, b = params(d) - log(a * b) / 2 + log(log(b / a)) + return log(a * b) / 2 + log(log(b / a)) end #### Evaluation function pdf(d::LogUniform, x::Real) @@ -68,7 +68,7 @@ logpdf(d::LogUniform, x::Real) = log(pdf(d, x)) function quantile(d::LogUniform, p::Real) p1, a, b = promote(p, params(d)...) # ensure e.g. quantile(LogUniform(1,2), 1f0)::Float32 - exp(p1 * log(b / a)) * a + return exp(p1 * log(b / a)) * a end function kldivergence(p::LogUniform, q::LogUniform) diff --git a/src/univariate/continuous/noncentralbeta.jl b/src/univariate/continuous/noncentralbeta.jl index 5be09c8b07..4c78711aa6 100644 --- a/src/univariate/continuous/noncentralbeta.jl +++ b/src/univariate/continuous/noncentralbeta.jl @@ -3,14 +3,14 @@ *Noncentral Beta distribution* with shape parameters `α > 0` and `β > 0` and noncentrality parameter `λ >= 0`. """ -struct NoncentralBeta{T<:Real} <: ContinuousUnivariateDistribution +struct NoncentralBeta{T <: Real} <: ContinuousUnivariateDistribution α::T β::T λ::T NoncentralBeta{T}(α::T, β::T, λ::T) where {T} = new{T}(α, β, λ) end -function NoncentralBeta(α::T, β::T, λ::T; check_args::Bool = true) where {T<:Real} +function NoncentralBeta(α::T, β::T, λ::T; check_args::Bool = true) where {T <: Real} @check_args NoncentralBeta (α, α > zero(α)) (β, β > zero(β)) (λ, λ >= zero(λ)) return NoncentralBeta{T}(α, β, λ) end @@ -24,10 +24,10 @@ NoncentralBeta(α::Integer, β::Integer, λ::Integer; check_args::Bool = true) = #### Conversions -function Base.convert(::Type{NoncentralBeta{T}}, d::NoncentralBeta) where {T<:Real} - NoncentralBeta{T}(T(d.α), T(d.β), T(d.λ)) +function Base.convert(::Type{NoncentralBeta{T}}, d::NoncentralBeta) where {T <: Real} + return NoncentralBeta{T}(T(d.α), T(d.β), T(d.λ)) end -Base.convert(::Type{NoncentralBeta{T}}, d::NoncentralBeta{T}) where {T<:Real} = d +Base.convert(::Type{NoncentralBeta{T}}, d::NoncentralBeta{T}) where {T <: Real} = d ### Parameters @@ -48,12 +48,12 @@ function rand(d::NoncentralBeta) β = d.β a = rand(NoncentralChisq(2d.α, β)) b = rand(Chisq(2β)) - a / (a + b) + return a / (a + b) end function rand(rng::AbstractRNG, d::NoncentralBeta) β = d.β a = rand(rng, NoncentralChisq(2d.α, β)) b = rand(rng, Chisq(2β)) - a / (a + b) + return a / (a + b) end diff --git a/src/univariate/continuous/noncentralchisq.jl b/src/univariate/continuous/noncentralchisq.jl index 3f51df4c2e..497f2775a4 100644 --- a/src/univariate/continuous/noncentralchisq.jl +++ b/src/univariate/continuous/noncentralchisq.jl @@ -23,13 +23,13 @@ External links * [Noncentral chi-squared distribution on Wikipedia](https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution) """ -struct NoncentralChisq{T<:Real} <: ContinuousUnivariateDistribution +struct NoncentralChisq{T <: Real} <: ContinuousUnivariateDistribution ν::T λ::T - NoncentralChisq{T}(ν::T, λ::T) where {T<:Real} = new{T}(ν, λ) + NoncentralChisq{T}(ν::T, λ::T) where {T <: Real} = new{T}(ν, λ) end -function NoncentralChisq(ν::T, λ::T; check_args::Bool = true) where {T<:Real} +function NoncentralChisq(ν::T, λ::T; check_args::Bool = true) where {T <: Real} @check_args NoncentralChisq (ν, ν > zero(ν)) (λ, λ >= zero(λ)) return NoncentralChisq{T}(ν, λ) end @@ -43,18 +43,18 @@ NoncentralChisq(ν::Integer, λ::Integer; check_args::Bool = true) = #### Conversions -function convert(::Type{NoncentralChisq{T}}, ν::S, λ::S) where {T<:Real,S<:Real} - NoncentralChisq(T(ν), T(λ)) +function convert(::Type{NoncentralChisq{T}}, ν::S, λ::S) where {T <: Real, S <: Real} + return NoncentralChisq(T(ν), T(λ)) end -function Base.convert(::Type{NoncentralChisq{T}}, d::NoncentralChisq) where {T<:Real} - NoncentralChisq{T}(T(d.ν), T(d.λ)) +function Base.convert(::Type{NoncentralChisq{T}}, d::NoncentralChisq) where {T <: Real} + return NoncentralChisq{T}(T(d.ν), T(d.λ)) end -Base.convert(::Type{NoncentralChisq{T}}, d::NoncentralChisq{T}) where {T<:Real} = d +Base.convert(::Type{NoncentralChisq{T}}, d::NoncentralChisq{T}) where {T <: Real} = d ### Parameters params(d::NoncentralChisq) = (d.ν, d.λ) -@inline partype(d::NoncentralChisq{T}) where {T<:Real} = T +@inline partype(d::NoncentralChisq{T}) where {T <: Real} = T ### Statistics @@ -65,7 +65,7 @@ skewness(d::NoncentralChisq) = 2sqrt2 * (d.ν + 3d.λ) / sqrt(d.ν + 2d.λ)^3 kurtosis(d::NoncentralChisq) = 12(d.ν + 4d.λ) / (d.ν + 2d.λ)^2 function mgf(d::NoncentralChisq, t::Real) - exp(d.λ * t / (1 - 2t)) * (1 - 2t)^(-d.ν / 2) + return exp(d.λ * t / (1 - 2t)) * (1 - 2t)^(-d.ν / 2) end function cgf(d::NoncentralChisq, t) ν, λ = params(d) @@ -73,7 +73,7 @@ function cgf(d::NoncentralChisq, t) end function cf(d::NoncentralChisq, t::Real) - cis(d.λ * t / (1 - 2im * t)) * (1 - 2im * t)^(-d.ν / 2) + return cis(d.λ * t / (1 - 2im * t)) * (1 - 2im * t)^(-d.ν / 2) end diff --git a/src/univariate/continuous/noncentralf.jl b/src/univariate/continuous/noncentralf.jl index 2416a1504b..eaa31a207e 100644 --- a/src/univariate/continuous/noncentralf.jl +++ b/src/univariate/continuous/noncentralf.jl @@ -3,14 +3,14 @@ *Noncentral F-distribution* with `ν1 > 0` and `ν2 > 0` degrees of freedom and noncentrality parameter `λ >= 0`. """ -struct NoncentralF{T<:Real} <: ContinuousUnivariateDistribution +struct NoncentralF{T <: Real} <: ContinuousUnivariateDistribution ν1::T ν2::T λ::T NoncentralF{T}(ν1::T, ν2::T, λ::T) where {T} = new{T}(ν1, ν2, λ) end -function NoncentralF(ν1::T, ν2::T, λ::T; check_args::Bool = true) where {T<:Real} +function NoncentralF(ν1::T, ν2::T, λ::T; check_args::Bool = true) where {T <: Real} @check_args NoncentralF (ν1, ν1 > zero(ν1)) (ν2, ν2 > zero(ν2)) (λ, λ >= zero(λ)) return NoncentralF{T}(ν1, ν2, λ) end @@ -24,13 +24,13 @@ NoncentralF(ν1::Integer, ν2::Integer, λ::Integer; check_args::Bool = true) = #### Conversions -function convert(::Type{NoncentralF{T}}, ν1::S, ν2::S, λ::S) where {T<:Real,S<:Real} - NoncentralF(T(ν1), T(ν2), T(λ)) +function convert(::Type{NoncentralF{T}}, ν1::S, ν2::S, λ::S) where {T <: Real, S <: Real} + return NoncentralF(T(ν1), T(ν2), T(λ)) end -function Base.convert(::Type{NoncentralF{T}}, d::NoncentralF) where {T<:Real} - NoncentralF{T}(T(d.ν1), T(d.ν2), T(d.λ)) +function Base.convert(::Type{NoncentralF{T}}, d::NoncentralF) where {T <: Real} + return NoncentralF{T}(T(d.ν1), T(d.ν2), T(d.λ)) end -Base.convert(::Type{NoncentralF{T}}, d::NoncentralF{T}) where {T<:Real} = d +Base.convert(::Type{NoncentralF{T}}, d::NoncentralF{T}) where {T <: Real} = d ### Parameters @@ -39,11 +39,11 @@ partype(::NoncentralF{T}) where {T} = T ### Statistics -function mean(d::NoncentralF{T}) where {T<:Real} - d.ν2 > 2 ? d.ν2 / (d.ν2 - 2) * (d.ν1 + d.λ) / d.ν1 : T(NaN) +function mean(d::NoncentralF{T}) where {T <: Real} + return d.ν2 > 2 ? d.ν2 / (d.ν2 - 2) * (d.ν1 + d.λ) / d.ν1 : T(NaN) end -var(d::NoncentralF{T}) where {T<:Real} = +var(d::NoncentralF{T}) where {T <: Real} = d.ν2 > 4 ? 2d.ν2^2 * ((d.ν1 + d.λ)^2 + (d.ν2 - 2) * (d.ν1 + 2d.λ)) / (d.ν1 * (d.ν2 - 2)^2 * (d.ν2 - 4)) : T(NaN) @@ -56,7 +56,7 @@ var(d::NoncentralF{T}) where {T<:Real} = function rand(rng::AbstractRNG, d::NoncentralF) r1 = rand(rng, NoncentralChisq(d.ν1, d.λ)) / d.ν1 r2 = rand(rng, Chisq(d.ν2)) / d.ν2 - r1 / r2 + return r1 / r2 end # TODO: remove RFunctions dependency once NoncentralChisq has its removed @@ -64,5 +64,5 @@ end function rand(d::NoncentralF) r1 = rand(NoncentralChisq(d.ν1, d.λ)) / d.ν1 r2 = rand(Chisq(d.ν2)) / d.ν2 - r1 / r2 + return r1 / r2 end diff --git a/src/univariate/continuous/noncentralt.jl b/src/univariate/continuous/noncentralt.jl index 5b5d229e04..dc319c7e2d 100644 --- a/src/univariate/continuous/noncentralt.jl +++ b/src/univariate/continuous/noncentralt.jl @@ -3,13 +3,13 @@ *Noncentral Student's t-distribution* with `v > 0` degrees of freedom and noncentrality parameter `λ`. """ -struct NoncentralT{T<:Real} <: ContinuousUnivariateDistribution +struct NoncentralT{T <: Real} <: ContinuousUnivariateDistribution ν::T λ::T NoncentralT{T}(ν::T, λ::T) where {T} = new{T}(ν, λ) end -function NoncentralT(ν::T, λ::T; check_args::Bool = true) where {T<:Real} +function NoncentralT(ν::T, λ::T; check_args::Bool = true) where {T <: Real} @check_args NoncentralT (ν, ν > zero(ν)) return NoncentralT{T}(ν, λ) end @@ -22,11 +22,11 @@ NoncentralT(ν::Integer, λ::Integer; check_args::Bool = true) = @distr_support NoncentralT -Inf Inf ### Conversions -convert(::Type{NoncentralT{T}}, ν::S, λ::S) where {T<:Real,S<:Real} = +convert(::Type{NoncentralT{T}}, ν::S, λ::S) where {T <: Real, S <: Real} = NoncentralT(T(ν), T(λ)) -Base.convert(::Type{NoncentralT{T}}, d::NoncentralT) where {T<:Real} = +Base.convert(::Type{NoncentralT{T}}, d::NoncentralT) where {T <: Real} = NoncentralT(T(d.ν), T(d.λ)) -Base.convert(::Type{NoncentralT{T}}, d::NoncentralT{T}) where {T<:Real} = d +Base.convert(::Type{NoncentralT{T}}, d::NoncentralT{T}) where {T <: Real} = d ### Parameters @@ -36,16 +36,16 @@ partype(::NoncentralT{T}) where {T} = T ### Statistics -function mean(d::NoncentralT{T}) where {T<:Real} - if d.ν > 1 +function mean(d::NoncentralT{T}) where {T <: Real} + return if d.ν > 1 isinf(d.ν) ? d.λ : sqrt(d.ν / 2) * d.λ * gamma((d.ν - 1) / 2) / gamma(d.ν / 2) else T(NaN) end end -function var(d::NoncentralT{T}) where {T<:Real} - d.ν > 2 ? d.ν * (1 + d.λ^2) / (d.ν - 2) - mean(d)^2 : T(NaN) +function var(d::NoncentralT{T}) where {T <: Real} + return d.ν > 2 ? d.ν * (1 + d.λ^2) / (d.ν - 2) - mean(d)^2 : T(NaN) end ### Evaluation & Sampling @@ -57,5 +57,5 @@ function rand(rng::AbstractRNG, d::NoncentralT) ν = d.ν z = randn(rng) v = rand(rng, Chisq(ν)) - (z + d.λ) / sqrt(v / ν) + return (z + d.λ) / sqrt(v / ν) end diff --git a/src/univariate/continuous/normal.jl b/src/univariate/continuous/normal.jl index ed20075389..986ddb57cb 100644 --- a/src/univariate/continuous/normal.jl +++ b/src/univariate/continuous/normal.jl @@ -27,13 +27,13 @@ External links * [Normal distribution on Wikipedia](http://en.wikipedia.org/wiki/Normal_distribution) """ -struct Normal{T<:Real} <: ContinuousUnivariateDistribution +struct Normal{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T - Normal{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) + Normal{T}(µ::T, σ::T) where {T <: Real} = new{T}(µ, σ) end -function Normal(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Normal(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Normal (σ, σ >= zero(σ)) return Normal{T}(μ, σ) end @@ -48,16 +48,16 @@ Normal(μ::Real = 0.0) = Normal(μ, one(μ); check_args = false) const Gaussian = Normal # #### Conversions -convert(::Type{Normal{T}}, μ::S, σ::S) where {T<:Real,S<:Real} = Normal(T(μ), T(σ)) -Base.convert(::Type{Normal{T}}, d::Normal) where {T<:Real} = Normal{T}(T(d.μ), T(d.σ)) -Base.convert(::Type{Normal{T}}, d::Normal{T}) where {T<:Real} = d +convert(::Type{Normal{T}}, μ::S, σ::S) where {T <: Real, S <: Real} = Normal(T(μ), T(σ)) +Base.convert(::Type{Normal{T}}, d::Normal) where {T <: Real} = Normal{T}(T(d.μ), T(d.σ)) +Base.convert(::Type{Normal{T}}, d::Normal{T}) where {T <: Real} = d @distr_support Normal -Inf Inf #### Parameters params(d::Normal) = (d.μ, d.σ) -@inline partype(d::Normal{T}) where {T<:Real} = T +@inline partype(d::Normal{T}) where {T <: Real} = T location(d::Normal) = d.μ scale(d::Normal) = d.σ @@ -72,8 +72,8 @@ mode(d::Normal) = d.μ var(d::Normal) = abs2(d.σ) std(d::Normal) = d.σ -skewness(d::Normal{T}) where {T<:Real} = zero(T) -kurtosis(d::Normal{T}) where {T<:Real} = zero(T) +skewness(d::Normal{T}) where {T <: Real} = zero(T) +kurtosis(d::Normal{T}) where {T <: Real} = zero(T) entropy(d::Normal) = (log2π + 1) / 2 + log(d.σ) @@ -105,7 +105,7 @@ gradlogpdf(d::Normal, x::Real) = (d.μ - x) / d.σ^2 mgf(d::Normal, t::Real) = exp(t * d.μ + d.σ^2 / 2 * t^2) function cgf(d::Normal, t) μ, σ = params(d) - t * μ + (σ * t)^2 / 2 + return t * μ + (σ * t)^2 / 2 end cf(d::Normal, t::Real) = exp(im * t * d.μ - d.σ^2 / 2 * t^2) @@ -134,7 +134,7 @@ struct NormalStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where {T<:Real} +function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where {T <: Real} n = length(x) # compute s @@ -150,14 +150,14 @@ function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where {T<:Real} @inbounds s2 += abs2(x[i] - m) end - NormalStats(s, m, s2, n) + return NormalStats(s, m, s2, n) end function suffstats( - ::Type{<:Normal}, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Real} + ::Type{<:Normal}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Real} n = length(x) # compute s @@ -176,7 +176,7 @@ function suffstats( @inbounds s2 += w[i] * abs2(x[i] - m) end - NormalStats(s, m, s2, tw) + return NormalStats(s, m, s2, tw) end # Cases where μ or σ is known @@ -191,20 +191,20 @@ struct NormalKnownMuStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where {T<:Real} +function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where {T <: Real} μ = g.μ s2 = zero(T) + zero(μ) for i in eachindex(x) @inbounds s2 += abs2(x[i] - μ) end - NormalKnownMuStats(g.μ, s2, length(x)) + return NormalKnownMuStats(g.μ, s2, length(x)) end function suffstats( - g::NormalKnownMu, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Real} + g::NormalKnownMu, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Real} μ = g.μ s2 = 0.0 * abs2(zero(T) - zero(μ)) tw = 0.0 @@ -213,7 +213,7 @@ function suffstats( @inbounds s2 += abs2(x[i] - μ) * wi tw += wi end - NormalKnownMuStats(g.μ, s2, tw) + return NormalKnownMuStats(g.μ, s2, tw) end struct NormalKnownSigma <: IncompleteDistribution @@ -221,7 +221,7 @@ struct NormalKnownSigma <: IncompleteDistribution function NormalKnownSigma(σ::Float64) σ > 0 || throw(ArgumentError("σ must be a positive value.")) - new(σ) + return new(σ) end end @@ -231,16 +231,16 @@ struct NormalKnownSigmaStats <: SufficientStats tw::Float64 # total sample weight end -function suffstats(g::NormalKnownSigma, x::AbstractArray{T}) where {T<:Real} - NormalKnownSigmaStats(g.σ, sum(x), Float64(length(x))) +function suffstats(g::NormalKnownSigma, x::AbstractArray{T}) where {T <: Real} + return NormalKnownSigmaStats(g.σ, sum(x), Float64(length(x))) end function suffstats( - g::NormalKnownSigma, - x::AbstractArray{T}, - w::AbstractArray{T}, -) where {T<:Real} - NormalKnownSigmaStats(g.σ, dot(x, w), sum(w)) + g::NormalKnownSigma, + x::AbstractArray{T}, + w::AbstractArray{T}, + ) where {T <: Real} + return NormalKnownSigmaStats(g.σ, dot(x, w), sum(w)) end # fit_mle based on sufficient statistics @@ -252,12 +252,12 @@ fit_mle(g::NormalKnownSigma, ss::NormalKnownSigmaStats) = Normal(ss.sx / ss.tw, # generic fit_mle methods function fit_mle( - ::Type{<:Normal}, - x::AbstractArray{T}; - mu::Float64 = NaN, - sigma::Float64 = NaN, -) where {T<:Real} - if isnan(mu) + ::Type{<:Normal}, + x::AbstractArray{T}; + mu::Float64 = NaN, + sigma::Float64 = NaN, + ) where {T <: Real} + return if isnan(mu) if isnan(sigma) fit_mle(Normal, suffstats(Normal, x)) else @@ -275,13 +275,13 @@ function fit_mle( end function fit_mle( - ::Type{<:Normal}, - x::AbstractArray{T}, - w::AbstractArray{Float64}; - mu::Float64 = NaN, - sigma::Float64 = NaN, -) where {T<:Real} - if isnan(mu) + ::Type{<:Normal}, + x::AbstractArray{T}, + w::AbstractArray{Float64}; + mu::Float64 = NaN, + sigma::Float64 = NaN, + ) where {T <: Real} + return if isnan(mu) if isnan(sigma) fit_mle(Normal, suffstats(Normal, x, w)) else diff --git a/src/univariate/continuous/normalcanon.jl b/src/univariate/continuous/normalcanon.jl index e0fa393a3e..50e019bc78 100644 --- a/src/univariate/continuous/normalcanon.jl +++ b/src/univariate/continuous/normalcanon.jl @@ -6,18 +6,18 @@ Canonical parametrisation of the Normal distribution with canonical parameters ` The two *canonical parameters* of a normal distribution ``\\mathcal{N}(\\mu, \\sigma^2)`` with mean ``\\mu`` and standard deviation ``\\sigma`` are ``\\eta = \\sigma^{-2} \\mu`` and ``\\lambda = \\sigma^{-2}``. """ -struct NormalCanon{T<:Real} <: ContinuousUnivariateDistribution +struct NormalCanon{T <: Real} <: ContinuousUnivariateDistribution η::T # σ^(-2) * μ λ::T # σ^(-2) μ::T # μ function NormalCanon{T}(η, λ; check_args::Bool = true) where {T} @check_args NormalCanon (λ, λ > zero(λ)) - new{T}(η, λ, η / λ) + return new{T}(η, λ, η / λ) end end -NormalCanon(η::T, λ::T; check_args::Bool = true) where {T<:Real} = +NormalCanon(η::T, λ::T; check_args::Bool = true) where {T <: Real} = NormalCanon{typeof(η / λ)}(η, λ; check_args = check_args) NormalCanon(η::Real, λ::Real; check_args::Bool = true) = NormalCanon(promote(η, λ)...; check_args = check_args) @@ -28,11 +28,11 @@ NormalCanon() = NormalCanon{Float64}(0.0, 1.0; check_args = false) @distr_support NormalCanon -Inf Inf #### Type Conversions -convert(::Type{NormalCanon{T}}, η::S, λ::S) where {T<:Real,S<:Real} = +convert(::Type{NormalCanon{T}}, η::S, λ::S) where {T <: Real, S <: Real} = NormalCanon(T(η), T(λ)) -Base.convert(::Type{NormalCanon{T}}, d::NormalCanon) where {T<:Real} = +Base.convert(::Type{NormalCanon{T}}, d::NormalCanon) where {T <: Real} = NormalCanon{T}(T(d.η), T(d.λ); check_args = false) -Base.convert(::Type{NormalCanon{T}}, d::NormalCanon{T}) where {T<:Real} = d +Base.convert(::Type{NormalCanon{T}}, d::NormalCanon{T}) where {T <: Real} = d ## conversion between Normal and NormalCanon @@ -45,7 +45,7 @@ canonform(d::Normal) = convert(NormalCanon, d) #### Parameters params(d::NormalCanon) = (d.η, d.λ) -@inline partype(d::NormalCanon{T}) where {T<:Real} = T +@inline partype(d::NormalCanon{T}) where {T <: Real} = T #### Statistics @@ -53,8 +53,8 @@ mean(d::NormalCanon) = d.μ median(d::NormalCanon) = mean(d) mode(d::NormalCanon) = mean(d) -skewness(d::NormalCanon{T}) where {T<:Real} = zero(T) -kurtosis(d::NormalCanon{T}) where {T<:Real} = zero(T) +skewness(d::NormalCanon{T}) where {T <: Real} = zero(T) +kurtosis(d::NormalCanon{T}) where {T <: Real} = zero(T) var(d::NormalCanon) = 1 / d.λ std(d::NormalCanon) = sqrt(var(d)) diff --git a/src/univariate/continuous/normalinversegaussian.jl b/src/univariate/continuous/normalinversegaussian.jl index 7ef40dff9a..9c884173da 100644 --- a/src/univariate/continuous/normalinversegaussian.jl +++ b/src/univariate/continuous/normalinversegaussian.jl @@ -14,7 +14,7 @@ External links * [Normal-inverse Gaussian distribution on Wikipedia](http://en.wikipedia.org/wiki/Normal-inverse_Gaussian_distribution) """ -struct NormalInverseGaussian{T<:Real} <: ContinuousUnivariateDistribution +struct NormalInverseGaussian{T <: Real} <: ContinuousUnivariateDistribution μ::T α::T β::T @@ -23,11 +23,11 @@ struct NormalInverseGaussian{T<:Real} <: ContinuousUnivariateDistribution function NormalInverseGaussian{T}(μ::T, α::T, β::T, δ::T) where {T} γ = sqrt(α^2 - β^2) - new{T}(μ, α, β, δ, γ) + return new{T}(μ, α, β, δ, γ) end end -NormalInverseGaussian(μ::T, α::T, β::T, δ::T) where {T<:Real} = +NormalInverseGaussian(μ::T, α::T, β::T, δ::T) where {T <: Real} = NormalInverseGaussian{T}(μ, α, β, δ) NormalInverseGaussian(μ::Real, α::Real, β::Real, δ::Real) = NormalInverseGaussian(promote(μ, α, β, δ)...) @@ -39,27 +39,27 @@ end #### Conversions function convert( - ::Type{NormalInverseGaussian{T}}, - μ::Real, - α::Real, - β::Real, - δ::Real, -) where {T<:Real} - NormalInverseGaussian(T(μ), T(α), T(β), T(δ)) + ::Type{NormalInverseGaussian{T}}, + μ::Real, + α::Real, + β::Real, + δ::Real, + ) where {T <: Real} + return NormalInverseGaussian(T(μ), T(α), T(β), T(δ)) end function Base.convert( - ::Type{NormalInverseGaussian{T}}, - d::NormalInverseGaussian, -) where {T<:Real} - NormalInverseGaussian{T}(T(d.μ), T(d.α), T(d.β), T(d.δ)) + ::Type{NormalInverseGaussian{T}}, + d::NormalInverseGaussian, + ) where {T <: Real} + return NormalInverseGaussian{T}(T(d.μ), T(d.α), T(d.β), T(d.δ)) end Base.convert( ::Type{NormalInverseGaussian{T}}, d::NormalInverseGaussian{T}, -) where {T<:Real} = d +) where {T <: Real} = d params(d::NormalInverseGaussian) = (d.μ, d.α, d.β, d.δ) -@inline partype(d::NormalInverseGaussian{T}) where {T<:Real} = T +@inline partype(d::NormalInverseGaussian{T}) where {T <: Real} = T mean(d::NormalInverseGaussian) = d.μ + d.δ * d.β / d.γ var(d::NormalInverseGaussian) = d.δ * d.α^2 / d.γ^3 @@ -68,16 +68,16 @@ kurtosis(d::NormalInverseGaussian) = 3 * (1 + 4 * d.β^2 / d.α^2) / (d.δ * d. function pdf(d::NormalInverseGaussian, x::Real) μ, α, β, δ = params(d) - α * δ * besselk(1, α * sqrt(δ^2 + (x - μ)^2)) / (π * sqrt(δ^2 + (x - μ)^2)) * - exp(δ * d.γ + β * (x - μ)) + return α * δ * besselk(1, α * sqrt(δ^2 + (x - μ)^2)) / (π * sqrt(δ^2 + (x - μ)^2)) * + exp(δ * d.γ + β * (x - μ)) end function logpdf(d::NormalInverseGaussian, x::Real) μ, α, β, δ = params(d) - log(α * δ) + log(besselk(1, α * sqrt(δ^2 + (x - μ)^2))) - - log(π * sqrt(δ^2 + (x - μ)^2)) + - δ * d.γ + - β * (x - μ) + return log(α * δ) + log(besselk(1, α * sqrt(δ^2 + (x - μ)^2))) - + log(π * sqrt(δ^2 + (x - μ)^2)) + + δ * d.γ + + β * (x - μ) end diff --git a/src/univariate/continuous/pareto.jl b/src/univariate/continuous/pareto.jl index cd8f056dcd..cee7c8bfe7 100644 --- a/src/univariate/continuous/pareto.jl +++ b/src/univariate/continuous/pareto.jl @@ -21,13 +21,13 @@ External links * [Pareto distribution on Wikipedia](http://en.wikipedia.org/wiki/Pareto_distribution) """ -struct Pareto{T<:Real} <: ContinuousUnivariateDistribution +struct Pareto{T <: Real} <: ContinuousUnivariateDistribution α::T θ::T Pareto{T}(α::T, θ::T) where {T} = new{T}(α, θ) end -function Pareto(α::T, θ::T; check_args::Bool = true) where {T<:Real} +function Pareto(α::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Pareto (α, α > zero(α)) (θ, θ > zero(θ)) return Pareto{T}(α, θ) end @@ -42,9 +42,9 @@ Pareto() = Pareto{Float64}(1.0, 1.0) @distr_support Pareto d.θ Inf #### Conversions -convert(::Type{Pareto{T}}, α::Real, θ::Real) where {T<:Real} = Pareto(T(α), T(θ)) -Base.convert(::Type{Pareto{T}}, d::Pareto) where {T<:Real} = Pareto{T}(T(d.α), T(d.θ)) -Base.convert(::Type{Pareto{T}}, d::Pareto{T}) where {T<:Real} = d +convert(::Type{Pareto{T}}, α::Real, θ::Real) where {T <: Real} = Pareto(T(α), T(θ)) +Base.convert(::Type{Pareto{T}}, d::Pareto) where {T <: Real} = Pareto{T}(T(d.α), T(d.θ)) +Base.convert(::Type{Pareto{T}}, d::Pareto{T}) where {T <: Real} = d #### Parameters @@ -52,31 +52,31 @@ shape(d::Pareto) = d.α scale(d::Pareto) = d.θ params(d::Pareto) = (d.α, d.θ) -@inline partype(d::Pareto{T}) where {T<:Real} = T +@inline partype(d::Pareto{T}) where {T <: Real} = T #### Statistics -function mean(d::Pareto{T}) where {T<:Real} +function mean(d::Pareto{T}) where {T <: Real} (α, θ) = params(d) - α > 1 ? α * θ / (α - 1) : T(Inf) + return α > 1 ? α * θ / (α - 1) : T(Inf) end median(d::Pareto) = ((α, θ) = params(d); θ * 2^(1 / α)) mode(d::Pareto) = d.θ -function var(d::Pareto{T}) where {T<:Real} +function var(d::Pareto{T}) where {T <: Real} (α, θ) = params(d) - α > 2 ? (θ^2 * α) / ((α - 1)^2 * (α - 2)) : T(Inf) + return α > 2 ? (θ^2 * α) / ((α - 1)^2 * (α - 2)) : T(Inf) end -function skewness(d::Pareto{T}) where {T<:Real} +function skewness(d::Pareto{T}) where {T <: Real} α = shape(d) - α > 3 ? ((2(1 + α)) / (α - 3)) * sqrt((α - 2) / α) : T(NaN) + return α > 3 ? ((2(1 + α)) / (α - 3)) * sqrt((α - 2) / α) : T(NaN) end -function kurtosis(d::Pareto{T}) where {T<:Real} +function kurtosis(d::Pareto{T}) where {T <: Real} α = shape(d) - α > 4 ? (6(α^3 + α^2 - 6α - 2)) / (α * (α - 3) * (α - 4)) : T(NaN) + return α > 4 ? (6(α^3 + α^2 - 6α - 2)) / (α * (α - 3) * (α - 4)) : T(NaN) end entropy(d::Pareto) = ((α, θ) = params(d); log(θ / α) + 1 / α + 1) @@ -84,14 +84,14 @@ entropy(d::Pareto) = ((α, θ) = params(d); log(θ / α) + 1 / α + 1) #### Evaluation -function pdf(d::Pareto{T}, x::Real) where {T<:Real} +function pdf(d::Pareto{T}, x::Real) where {T <: Real} (α, θ) = params(d) - x >= θ ? α * (θ / x)^α * (1 / x) : zero(T) + return x >= θ ? α * (θ / x)^α * (1 / x) : zero(T) end -function logpdf(d::Pareto{T}, x::Real) where {T<:Real} +function logpdf(d::Pareto{T}, x::Real) where {T <: Real} (α, θ) = params(d) - x >= θ ? log(α) + α * log(θ) - (α + 1) * log(x) : -T(Inf) + return x >= θ ? log(α) + α * log(θ) - (α + 1) * log(x) : -T(Inf) end function ccdf(d::Pareto, x::Real) @@ -123,7 +123,7 @@ end ## Fitting -function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where {T<:Real} +function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where {T <: Real} # Based on # https://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation @@ -132,7 +132,7 @@ function fit_mle(::Type{<:Pareto}, x::AbstractArray{T}) where {T<:Real} n = length(x) lθ = log(θ) temp1 = zero(T) - for i = 1:n + for i in 1:n temp1 += log(x[i]) - lθ end α = n / temp1 diff --git a/src/univariate/continuous/pgeneralizedgaussian.jl b/src/univariate/continuous/pgeneralizedgaussian.jl index 0faa11d25d..47e92e8755 100644 --- a/src/univariate/continuous/pgeneralizedgaussian.jl +++ b/src/univariate/continuous/pgeneralizedgaussian.jl @@ -27,15 +27,15 @@ External Links * [Generalized Gaussian on Wikipedia](http://en.wikipedia.org/wiki/Generalized_normal_distribution) * [Reference implementation](https://www.researchgate.net/publication/254282790_Simulation_of_the_p-generalized_Gaussian_distribution) """ -struct PGeneralizedGaussian{T<:Real} <: ContinuousUnivariateDistribution +struct PGeneralizedGaussian{T <: Real} <: ContinuousUnivariateDistribution μ::T α::T p::T - PGeneralizedGaussian{T}(μ::T, α::T, p::T) where {T<:Real} = new{T}(µ, α, p) + PGeneralizedGaussian{T}(μ::T, α::T, p::T) where {T <: Real} = new{T}(µ, α, p) end -function PGeneralizedGaussian(μ::T, α::T, p::T; check_args::Bool = true) where {T<:Real} +function PGeneralizedGaussian(μ::T, α::T, p::T; check_args::Bool = true) where {T <: Real} @check_args PGeneralizedGaussian (α, α > zero(α)) (p, p > zero(p)) return PGeneralizedGaussian{T}(μ, α, p) end @@ -64,19 +64,19 @@ PGeneralizedGaussian() = PGeneralizedGaussian{Float64}(0.0, √2, 2.0) # approxi #### Conversions function Base.convert( - ::Type{PGeneralizedGaussian{T}}, - d::PGeneralizedGaussian, -) where {T<:Real} + ::Type{PGeneralizedGaussian{T}}, + d::PGeneralizedGaussian, + ) where {T <: Real} return PGeneralizedGaussian{T}(T(d.μ), T(d.α), T(d.p)) end -Base.convert(::Type{PGeneralizedGaussian{T}}, d::PGeneralizedGaussian{T}) where {T<:Real} = +Base.convert(::Type{PGeneralizedGaussian{T}}, d::PGeneralizedGaussian{T}) where {T <: Real} = d @distr_support PGeneralizedGaussian -Inf Inf #### Parameters -partype(::PGeneralizedGaussian{T}) where {T<:Real} = T +partype(::PGeneralizedGaussian{T}) where {T <: Real} = T params(d::PGeneralizedGaussian) = (d.μ, d.α, d.p) location(d::PGeneralizedGaussian) = d.μ diff --git a/src/univariate/continuous/rayleigh.jl b/src/univariate/continuous/rayleigh.jl index df84e29778..0fcf8124c6 100644 --- a/src/univariate/continuous/rayleigh.jl +++ b/src/univariate/continuous/rayleigh.jl @@ -23,9 +23,9 @@ External links * [Rayleigh distribution on Wikipedia](http://en.wikipedia.org/wiki/Rayleigh_distribution) """ -struct Rayleigh{T<:Real} <: ContinuousUnivariateDistribution +struct Rayleigh{T <: Real} <: ContinuousUnivariateDistribution σ::T - Rayleigh{T}(σ::T) where {T<:Real} = new{T}(σ) + Rayleigh{T}(σ::T) where {T <: Real} = new{T}(σ) end function Rayleigh(σ::Real; check_args::Bool = true) @@ -40,43 +40,43 @@ Rayleigh() = Rayleigh{Float64}(1.0) #### Conversions -convert(::Type{Rayleigh{T}}, σ::S) where {T<:Real,S<:Real} = Rayleigh(T(σ)) -Base.convert(::Type{Rayleigh{T}}, d::Rayleigh) where {T<:Real} = Rayleigh{T}(T(d.σ)) -Base.convert(::Type{Rayleigh{T}}, d::Rayleigh{T}) where {T<:Real} = d +convert(::Type{Rayleigh{T}}, σ::S) where {T <: Real, S <: Real} = Rayleigh(T(σ)) +Base.convert(::Type{Rayleigh{T}}, d::Rayleigh) where {T <: Real} = Rayleigh{T}(T(d.σ)) +Base.convert(::Type{Rayleigh{T}}, d::Rayleigh{T}) where {T <: Real} = d #### Parameters scale(d::Rayleigh) = d.σ params(d::Rayleigh) = (d.σ,) -partype(::Rayleigh{T}) where {T<:Real} = T +partype(::Rayleigh{T}) where {T <: Real} = T #### Statistics mean(d::Rayleigh) = sqrthalfπ * d.σ -median(d::Rayleigh{T}) where {T<:Real} = sqrt2 * sqrt(T(logtwo)) * d.σ # sqrt(log(4)) +median(d::Rayleigh{T}) where {T <: Real} = sqrt2 * sqrt(T(logtwo)) * d.σ # sqrt(log(4)) mode(d::Rayleigh) = d.σ -var(d::Rayleigh{T}) where {T<:Real} = (2 - T(π) / 2) * d.σ^2 -std(d::Rayleigh{T}) where {T<:Real} = sqrt(2 - T(π) / 2) * d.σ +var(d::Rayleigh{T}) where {T <: Real} = (2 - T(π) / 2) * d.σ^2 +std(d::Rayleigh{T}) where {T <: Real} = sqrt(2 - T(π) / 2) * d.σ -skewness(d::Rayleigh{T}) where {T<:Real} = 2 * sqrtπ * (T(π) - 3) / (4 - T(π))^(3 / 2) -kurtosis(d::Rayleigh{T}) where {T<:Real} = -(6 * T(π)^2 - 24 * T(π) + 16) / (4 - T(π))^2 +skewness(d::Rayleigh{T}) where {T <: Real} = 2 * sqrtπ * (T(π) - 3) / (4 - T(π))^(3 / 2) +kurtosis(d::Rayleigh{T}) where {T <: Real} = -(6 * T(π)^2 - 24 * T(π) + 16) / (4 - T(π))^2 -entropy(d::Rayleigh{T}) where {T<:Real} = +entropy(d::Rayleigh{T}) where {T <: Real} = 1 - T(logtwo) / 2 + T(MathConstants.γ) / 2 + log(d.σ) #### Evaluation -function pdf(d::Rayleigh{T}, x::Real) where {T<:Real} +function pdf(d::Rayleigh{T}, x::Real) where {T <: Real} σ2 = d.σ^2 - x > 0 ? (x / σ2) * exp(-(x^2) / (2σ2)) : zero(T) + return x > 0 ? (x / σ2) * exp(-(x^2) / (2σ2)) : zero(T) end -function logpdf(d::Rayleigh{T}, x::Real) where {T<:Real} +function logpdf(d::Rayleigh{T}, x::Real) where {T <: Real} σ2 = d.σ^2 - x > 0 ? log(x / σ2) - (x^2) / (2σ2) : -T(Inf) + return x > 0 ? log(x / σ2) - (x^2) / (2σ2) : -T(Inf) end function logccdf(d::Rayleigh, x::Real) @@ -99,7 +99,7 @@ rand(rng::AbstractRNG, d::Rayleigh) = d.σ * sqrt(2 * randexp(rng)) #### Fitting -function fit_mle(::Type{<:Rayleigh}, x::AbstractArray{T}) where {T<:Real} +function fit_mle(::Type{<:Rayleigh}, x::AbstractArray{T}) where {T <: Real} # Compute MLE (and unbiased estimator) of σ^2 s2 = zero(T) for xi in x diff --git a/src/univariate/continuous/rician.jl b/src/univariate/continuous/rician.jl index f79abd872f..d2d1c5f311 100644 --- a/src/univariate/continuous/rician.jl +++ b/src/univariate/continuous/rician.jl @@ -27,13 +27,13 @@ External links: * [Rician distribution on Wikipedia](https://en.wikipedia.org/wiki/Rice_distribution) """ -struct Rician{T<:Real} <: ContinuousUnivariateDistribution +struct Rician{T <: Real} <: ContinuousUnivariateDistribution ν::T σ::T Rician{T}(ν, σ) where {T} = new{T}(ν, σ) end -function Rician(ν::T, σ::T; check_args::Bool = true) where {T<:Real} +function Rician(ν::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Rician (ν, ν ≥ zero(ν)) (σ, σ ≥ zero(σ)) return Rician{T}(ν, σ) end @@ -48,12 +48,12 @@ Rician(ν::Integer, σ::Integer; check_args::Bool = true) = #### Conversions -function convert(::Type{Rician{T}}, ν::Real, σ::Real) where {T<:Real} - Rician(T(ν), T(σ)) +function convert(::Type{Rician{T}}, ν::Real, σ::Real) where {T <: Real} + return Rician(T(ν), T(σ)) end -Base.convert(::Type{Rician{T}}, d::Rician) where {T<:Real} = Rician{T}(T(d.ν), T(d.σ)) -Base.convert(::Type{Rician{T}}, d::Rician{T}) where {T<:Real} = d +Base.convert(::Type{Rician{T}}, d::Rician) where {T <: Real} = Rician{T}(T(d.ν), T(d.σ)) +Base.convert(::Type{Rician{T}}, d::Rician{T}) where {T <: Real} = d #### Parameters @@ -61,7 +61,7 @@ shape(d::Rician) = d.ν^2 / (2 * d.σ^2) scale(d::Rician) = d.ν^2 + 2 * d.σ^2 params(d::Rician) = (d.ν, d.σ) -partype(d::Rician{T}) where {T<:Real} = T +partype(d::Rician{T}) where {T <: Real} = T #### Statistics @@ -74,11 +74,11 @@ var(d::Rician) = 2 * d.σ^2 + d.ν^2 - halfπ * d.σ^2 * _Lhalf(-d.ν^2 / (2 * d function mode(d::Rician) m = mean(d) - _minimize_gss(x -> -pdf(d, x), zero(m), m) + return _minimize_gss(x -> -pdf(d, x), zero(m), m) end # helper: 1D minimization using Golden-section search -function _minimize_gss(f, a, b; tol = 1e-12) +function _minimize_gss(f, a, b; tol = 1.0e-12) ϕ = (√5 + 1) / 2 c = b - (b - a) / ϕ d = a + (b - a) / ϕ @@ -91,7 +91,7 @@ function _minimize_gss(f, a, b; tol = 1e-12) c = b - (b - a) / ϕ d = a + (b - a) / ϕ end - (b + a) / 2 + return (b + a) / 2 end #### PDF/CDF/quantile delegated to NoncentralChisq @@ -135,13 +135,13 @@ end function rand(rng::AbstractRNG, d::Rician) x = randn(rng) * d.σ + d.ν y = randn(rng) * d.σ - hypot(x, y) + return hypot(x, y) end #### Fitting # implementation based on the Koay inversion technique -function fit(::Type{<:Rician}, x::AbstractArray{T}; tol = 1e-12, maxiters = 500) where {T} +function fit(::Type{<:Rician}, x::AbstractArray{T}; tol = 1.0e-12, maxiters = 500) where {T} μ₁ = mean(x) μ₂ = var(x) r = μ₁ / √μ₂ @@ -156,7 +156,7 @@ function fit(::Type{<:Rician}, x::AbstractArray{T}; tol = 1e-12, maxiters = 500) ((2 + θ^2) * besseli(0, θ^2 / 4) + θ^2 * besseli(1, θ^2 / 4))^2 g(θ) = sqrt(ξ(θ) * (1 + r^2) - 2) θ = g(1) - for j = 1:maxiters + for j in 1:maxiters θ⁻ = θ θ = g(θ) abs(θ - θ⁻) < tol && break @@ -165,7 +165,7 @@ function fit(::Type{<:Rician}, x::AbstractArray{T}; tol = 1e-12, maxiters = 500) σ = convert(float(T), sqrt(μ₂ / ξθ)) ν = convert(float(T), sqrt(μ₁^2 + (ξθ - 2) * σ^2)) end - Rician(ν, σ) + return Rician(ν, σ) end # Not implemented: diff --git a/src/univariate/continuous/semicircle.jl b/src/univariate/continuous/semicircle.jl index 7543084ebb..a87a8cf62e 100644 --- a/src/univariate/continuous/semicircle.jl +++ b/src/univariate/continuous/semicircle.jl @@ -18,9 +18,9 @@ External links * [Wigner semicircle distribution on Wikipedia](https://en.wikipedia.org/wiki/Wigner_semicircle_distribution) """ -struct Semicircle{T<:Real} <: ContinuousUnivariateDistribution +struct Semicircle{T <: Real} <: ContinuousUnivariateDistribution r::T - Semicircle{T}(r::T) where {T<:Real} = new{T}(r) + Semicircle{T}(r::T) where {T <: Real} = new{T}(r) end @@ -35,7 +35,7 @@ Semicircle(r::Integer; check_args::Bool = true) = @distr_support Semicircle -d.r +d.r params(d::Semicircle) = (d.r,) -partype(::Semicircle{T}) where {T<:Real} = T +partype(::Semicircle{T}) where {T <: Real} = T mean(d::Semicircle) = zero(d.r) var(d::Semicircle) = d.r^2 / 4 diff --git a/src/univariate/continuous/skewedexponentialpower.jl b/src/univariate/continuous/skewedexponentialpower.jl index c144982867..4e2ae2dedd 100644 --- a/src/univariate/continuous/skewedexponentialpower.jl +++ b/src/univariate/continuous/skewedexponentialpower.jl @@ -29,7 +29,7 @@ location(d) # Get the location parameter, i.e. μ scale(d) # Get the scale parameter, i.e. σ ``` """ -struct SkewedExponentialPower{T<:Real} <: ContinuousUnivariateDistribution +struct SkewedExponentialPower{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T p::T @@ -38,12 +38,12 @@ struct SkewedExponentialPower{T<:Real} <: ContinuousUnivariateDistribution end function SkewedExponentialPower( - µ::T, - σ::T, - p::T, - α::T; - check_args::Bool = true, -) where {T<:Real} + µ::T, + σ::T, + p::T, + α::T; + check_args::Bool = true, + ) where {T <: Real} @check_args SkewedExponentialPower (σ, σ > zero(σ)) (p, p > zero(p)) ( α, zero(α) < α < one(α), @@ -52,12 +52,12 @@ function SkewedExponentialPower( end function SkewedExponentialPower( - μ::Real, - σ::Real, - p::Real = 2, - α::Real = 1 // 2; - check_args::Bool = true, -) + μ::Real, + σ::Real, + p::Real = 2, + α::Real = 1 // 2; + check_args::Bool = true, + ) return SkewedExponentialPower(promote(μ, σ, p, α)...; check_args = check_args) end SkewedExponentialPower(μ::Real = 0) = @@ -67,18 +67,18 @@ SkewedExponentialPower(μ::Real = 0) = ### Conversions function Base.convert( - ::Type{SkewedExponentialPower{T}}, - d::SkewedExponentialPower, -) where {T<:Real} - SkewedExponentialPower{T}(T(d.μ), T(d.σ), T(d.p), T(d.α)) + ::Type{SkewedExponentialPower{T}}, + d::SkewedExponentialPower, + ) where {T <: Real} + return SkewedExponentialPower{T}(T(d.μ), T(d.σ), T(d.p), T(d.α)) end Base.convert( ::Type{SkewedExponentialPower{T}}, d::SkewedExponentialPower{T}, -) where {T<:Real} = d +) where {T <: Real} = d ### Parameters -@inline partype(::SkewedExponentialPower{T}) where {T<:Real} = T +@inline partype(::SkewedExponentialPower{T}) where {T <: Real} = T params(d::SkewedExponentialPower) = (d.μ, d.σ, d.p, d.α) location(d::SkewedExponentialPower) = d.μ @@ -92,7 +92,7 @@ function m_k(d::SkewedExponentialPower, k::Integer) _, σ, p, α = params(d) inv_p = inv(p) return k * (logtwo + inv_p * log(p) + log(σ)) + loggamma((1 + k) * inv_p) - - loggamma(inv_p) + log(abs((-1)^k * α^(1 + k) + (1 - α)^(1 + k))) + loggamma(inv_p) + log(abs((-1)^k * α^(1 + k) + (1 - α)^(1 + k))) end # needed for odd moments in log scale @@ -111,16 +111,16 @@ function logpdf(d::SkewedExponentialPower, x::Real) inv_p = inv(p) return -( logtwo + - log(σ) + - loggamma(inv_p) + - ((1 - p) * log(p) + (abs(μ - x) / (2 * σ * a))^p) / p + log(σ) + + loggamma(inv_p) + + ((1 - p) * log(p) + (abs(μ - x) / (2 * σ * a))^p) / p ) end function cdf(d::SkewedExponentialPower, x::Real) μ, σ, p, α = params(d) inv_p = inv(p) - if x <= μ + return if x <= μ α * ccdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * α))^p) else α + (1 - α) * cdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * (1 - α)))^p) @@ -129,7 +129,7 @@ end function logcdf(d::SkewedExponentialPower, x::Real) μ, σ, p, α = params(d) inv_p = inv(p) - if x <= μ + return if x <= μ log(α) + logccdf(Gamma(inv_p), inv_p * (abs((x - μ) / σ) / (2 * α))^p) else log1mexp( @@ -141,7 +141,7 @@ end function quantile(d::SkewedExponentialPower, p::Real) μ, σ, _, α = params(d) inv_p = inv(d.p) - if p <= α + return if p <= α μ - 2 * α * σ * (d.p * quantile(Gamma(inv_p), (α - p) / α))^inv_p else μ + 2 * (1 - α) * σ * (d.p * quantile(Gamma(inv_p), (p - α) / (1 - α)))^inv_p diff --git a/src/univariate/continuous/skewnormal.jl b/src/univariate/continuous/skewnormal.jl index a0756a221d..714008267b 100644 --- a/src/univariate/continuous/skewnormal.jl +++ b/src/univariate/continuous/skewnormal.jl @@ -18,79 +18,77 @@ External links * [Discourse](https://discourse.julialang.org/t/skew-normal-distribution/21549/7) * [SkewDist.jl](https://github.com/STOR-i/SkewDist.jl) -""" -struct SkewNormal{T<:Real} <: ContinuousUnivariateDistribution - ξ::T - ω::T - α::T - SkewNormal{T}(ξ::T, ω::T, α::T) where {T} = new{T}(ξ, ω, α) -end - -function SkewNormal(ξ::T, ω::T, α::T; check_args::Bool = true) where {T<:Real} - @check_args SkewNormal (ω, ω > zero(ω)) - return SkewNormal{T}(ξ, ω, α) -end - -SkewNormal(ξ::Real, ω::Real, α::Real; check_args::Bool = true) = - SkewNormal(promote(ξ, ω, α)...; check_args = check_args) -SkewNormal(ξ::Integer, ω::Integer, α::Integer; check_args::Bool = true) = - SkewNormal(float(ξ), float(ω), float(α); check_args = check_args) -SkewNormal(α::Real = 0.0) = SkewNormal(zero(α), one(α), α; check_args = false) - -@distr_support SkewNormal -Inf Inf - -#### Conversions -convert(::Type{SkewNormal{T}}, ξ::S, ω::S, α::S) where {T<:Real,S<:Real} = - SkewNormal(T(ξ), T(ω), T(α)) -Base.convert(::Type{SkewNormal{T}}, d::SkewNormal) where {T<:Real} = - SkewNormal{T}(T(d.ξ), T(d.ω), T(d.α)) -Base.convert(::Type{SkewNormal{T}}, d::SkewNormal{T}) where {T<:Real} = d - -#### Parameters -params(d::SkewNormal) = (d.ξ, d.ω, d.α) -@inline partype(d::SkewNormal{T}) where {T<:Real} = T - -#### Statistics -delta(d::SkewNormal) = d.α / √(1 + d.α^2) -mean_z(d::SkewNormal) = √(2 / π) * delta(d) -std_z(d::SkewNormal) = 1 - (2 / π) * delta(d)^2 - -mean(d::SkewNormal) = d.ξ + d.ω * mean_z(d) -var(d::SkewNormal) = abs2(d.ω) * (1 - mean_z(d)^2) -std(d::SkewNormal) = √var(d) -skewness(d::SkewNormal) = ((4 - π) / 2) * (mean_z(d)^3 / (1 - mean_z(d)^2)^(3 / 2)) -kurtosis(d::SkewNormal) = - 2 * (π - 3) * ((delta(d) * sqrt(2 / π))^4 / (1 - 2 * (delta(d)^2) / π)^2) - -# no analytic expression for max m_0(d) but accurate numerical approximation -m_0(d::SkewNormal) = - mean_z(d) - (skewness(d) * std_z(d)) / 2 - (sign(d.α) / 2) * exp(-2π / abs(d.α)) -mode(d::SkewNormal) = d.ξ + d.ω * m_0(d) - -#### Evaluation -pdf(d::SkewNormal, x::Real) = - (2 / d.ω) * normpdf((x - d.ξ) / d.ω) * normcdf(d.α * (x - d.ξ) / d.ω) -logpdf(d::SkewNormal, x::Real) = - log(2) - log(d.ω) + normlogpdf((x - d.ξ) / d.ω) + normlogcdf(d.α * (x - d.ξ) / d.ω) -#cdf requires Owen's T function. -#cdf/quantile etc - -mgf(d::SkewNormal, t::Real) = - 2 * exp(d.ξ * t + (d.ω^2 * t^2) / 2) * normcdf(d.ω * delta(d) * t) - -cf(d::SkewNormal, t::Real) = - exp(im * t * d.ξ - (d.ω^2 * t^2) / 2) * - (1 + im * erfi((d.ω * delta(d) * t) / (sqrt(2)))) - -#### Sampling -function rand(rng::AbstractRNG, d::SkewNormal) - u0 = randn(rng) - v = randn(rng) - δ = delta(d) - u1 = δ * u0 + √(1 - δ^2) * v - return d.ξ + d.ω * sign(u0) * u1 -end - -## Fitting # to be added see: https://github.com/STOR-i/SkewDist.jl/issues/3 - - +""" +struct SkewNormal{T <: Real} <: ContinuousUnivariateDistribution + ξ::T + ω::T + α::T + SkewNormal{T}(ξ::T, ω::T, α::T) where {T} = new{T}(ξ, ω, α) +end + +function SkewNormal(ξ::T, ω::T, α::T; check_args::Bool = true) where {T <: Real} + @check_args SkewNormal (ω, ω > zero(ω)) + return SkewNormal{T}(ξ, ω, α) +end + +SkewNormal(ξ::Real, ω::Real, α::Real; check_args::Bool = true) = + SkewNormal(promote(ξ, ω, α)...; check_args = check_args) +SkewNormal(ξ::Integer, ω::Integer, α::Integer; check_args::Bool = true) = + SkewNormal(float(ξ), float(ω), float(α); check_args = check_args) +SkewNormal(α::Real = 0.0) = SkewNormal(zero(α), one(α), α; check_args = false) + +@distr_support SkewNormal -Inf Inf + +#### Conversions +convert(::Type{SkewNormal{T}}, ξ::S, ω::S, α::S) where {T <: Real, S <: Real} = + SkewNormal(T(ξ), T(ω), T(α)) +Base.convert(::Type{SkewNormal{T}}, d::SkewNormal) where {T <: Real} = + SkewNormal{T}(T(d.ξ), T(d.ω), T(d.α)) +Base.convert(::Type{SkewNormal{T}}, d::SkewNormal{T}) where {T <: Real} = d + +#### Parameters +params(d::SkewNormal) = (d.ξ, d.ω, d.α) +@inline partype(d::SkewNormal{T}) where {T <: Real} = T + +#### Statistics +delta(d::SkewNormal) = d.α / √(1 + d.α^2) +mean_z(d::SkewNormal) = √(2 / π) * delta(d) +std_z(d::SkewNormal) = 1 - (2 / π) * delta(d)^2 + +mean(d::SkewNormal) = d.ξ + d.ω * mean_z(d) +var(d::SkewNormal) = abs2(d.ω) * (1 - mean_z(d)^2) +std(d::SkewNormal) = √var(d) +skewness(d::SkewNormal) = ((4 - π) / 2) * (mean_z(d)^3 / (1 - mean_z(d)^2)^(3 / 2)) +kurtosis(d::SkewNormal) = + 2 * (π - 3) * ((delta(d) * sqrt(2 / π))^4 / (1 - 2 * (delta(d)^2) / π)^2) + +# no analytic expression for max m_0(d) but accurate numerical approximation +m_0(d::SkewNormal) = + mean_z(d) - (skewness(d) * std_z(d)) / 2 - (sign(d.α) / 2) * exp(-2π / abs(d.α)) +mode(d::SkewNormal) = d.ξ + d.ω * m_0(d) + +#### Evaluation +pdf(d::SkewNormal, x::Real) = + (2 / d.ω) * normpdf((x - d.ξ) / d.ω) * normcdf(d.α * (x - d.ξ) / d.ω) +logpdf(d::SkewNormal, x::Real) = + log(2) - log(d.ω) + normlogpdf((x - d.ξ) / d.ω) + normlogcdf(d.α * (x - d.ξ) / d.ω) +#cdf requires Owen's T function. +#cdf/quantile etc + +mgf(d::SkewNormal, t::Real) = + 2 * exp(d.ξ * t + (d.ω^2 * t^2) / 2) * normcdf(d.ω * delta(d) * t) + +cf(d::SkewNormal, t::Real) = + exp(im * t * d.ξ - (d.ω^2 * t^2) / 2) * + (1 + im * erfi((d.ω * delta(d) * t) / (sqrt(2)))) + +#### Sampling +function rand(rng::AbstractRNG, d::SkewNormal) + u0 = randn(rng) + v = randn(rng) + δ = delta(d) + u1 = δ * u0 + √(1 - δ^2) * v + return d.ξ + d.ω * sign(u0) * u1 +end + +## Fitting # to be added see: https://github.com/STOR-i/SkewDist.jl/issues/3 diff --git a/src/univariate/continuous/studentizedrange.jl b/src/univariate/continuous/studentizedrange.jl index 743a05d32e..0f8af09d2d 100644 --- a/src/univariate/continuous/studentizedrange.jl +++ b/src/univariate/continuous/studentizedrange.jl @@ -27,13 +27,13 @@ External links * [Studentized range distribution on Wikipedia](http://en.wikipedia.org/wiki/Studentized_range_distribution) """ -struct StudentizedRange{T<:Real} <: ContinuousUnivariateDistribution +struct StudentizedRange{T <: Real} <: ContinuousUnivariateDistribution ν::T k::T - StudentizedRange{T}(ν::T, k::T) where {T<:Real} = new{T}(ν, k) + StudentizedRange{T}(ν::T, k::T) where {T <: Real} = new{T}(ν, k) end -function StudentizedRange(ν::T, k::T; check_args::Bool = true) where {T<:Real} +function StudentizedRange(ν::T, k::T; check_args::Bool = true) where {T <: Real} @check_args StudentizedRange (ν, ν > zero(ν)) (k, k > one(k)) return StudentizedRange{T}(ν, k) end @@ -48,18 +48,18 @@ StudentizedRange(ν::Real, k::Real; check_args::Bool = true) = ### Conversions -function convert(::Type{StudentizedRange{T}}, ν::S, k::S) where {T<:Real,S<:Real} - StudentizedRange(T(ν), T(k)) +function convert(::Type{StudentizedRange{T}}, ν::S, k::S) where {T <: Real, S <: Real} + return StudentizedRange(T(ν), T(k)) end -function Base.convert(::Type{StudentizedRange{T}}, d::StudentizedRange) where {T<:Real} - StudentizedRange{T}(T(d.ν), T(d.k)) +function Base.convert(::Type{StudentizedRange{T}}, d::StudentizedRange) where {T <: Real} + return StudentizedRange{T}(T(d.ν), T(d.k)) end -Base.convert(::Type{StudentizedRange{T}}, d::StudentizedRange{T}) where {T<:Real} = d +Base.convert(::Type{StudentizedRange{T}}, d::StudentizedRange{T}) where {T <: Real} = d ### Parameters params(d::StudentizedRange) = (d.ν, d.k) -@inline partype(d::StudentizedRange{T}) where {T<:Real} = T +@inline partype(d::StudentizedRange{T}) where {T <: Real} = T ### Evaluation & Sampling diff --git a/src/univariate/continuous/symtriangular.jl b/src/univariate/continuous/symtriangular.jl index df1217d992..1d013693a0 100644 --- a/src/univariate/continuous/symtriangular.jl +++ b/src/univariate/continuous/symtriangular.jl @@ -17,13 +17,13 @@ location(d) # Get the location parameter, i.e. μ scale(d) # Get the scale parameter, i.e. σ ``` """ -struct SymTriangularDist{T<:Real} <: ContinuousUnivariateDistribution +struct SymTriangularDist{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T - SymTriangularDist{T}(µ::T, σ::T) where {T<:Real} = new{T}(µ, σ) + SymTriangularDist{T}(µ::T, σ::T) where {T <: Real} = new{T}(µ, σ) end -function SymTriangularDist(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function SymTriangularDist(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args SymTriangularDist (σ, σ > zero(σ)) return SymTriangularDist{T}(μ, σ) end @@ -38,13 +38,13 @@ SymTriangularDist(μ::Real = 0.0) = SymTriangularDist(μ, one(μ); check_args = #### Conversions -function convert(::Type{SymTriangularDist{T}}, μ::Real, σ::Real) where {T<:Real} - SymTriangularDist(T(μ), T(σ)) +function convert(::Type{SymTriangularDist{T}}, μ::Real, σ::Real) where {T <: Real} + return SymTriangularDist(T(μ), T(σ)) end -function Base.convert(::Type{SymTriangularDist{T}}, d::SymTriangularDist) where {T<:Real} - SymTriangularDist{T}(T(d.μ), T(d.σ)) +function Base.convert(::Type{SymTriangularDist{T}}, d::SymTriangularDist) where {T <: Real} + return SymTriangularDist{T}(T(d.μ), T(d.σ)) end -Base.convert(::Type{SymTriangularDist{T}}, d::SymTriangularDist{T}) where {T<:Real} = d +Base.convert(::Type{SymTriangularDist{T}}, d::SymTriangularDist{T}) where {T <: Real} = d #### Parameters @@ -52,7 +52,7 @@ location(d::SymTriangularDist) = d.μ scale(d::SymTriangularDist) = d.σ params(d::SymTriangularDist) = (d.μ, d.σ) -@inline partype(d::SymTriangularDist{T}) where {T<:Real} = T +@inline partype(d::SymTriangularDist{T}) where {T <: Real} = T #### Statistics @@ -62,8 +62,8 @@ median(d::SymTriangularDist) = d.μ mode(d::SymTriangularDist) = d.μ var(d::SymTriangularDist) = d.σ^2 / 6 -skewness(d::SymTriangularDist{T}) where {T<:Real} = zero(T) -kurtosis(d::SymTriangularDist{T}) where {T<:Real} = T(-3) / 5 +skewness(d::SymTriangularDist{T}) where {T <: Real} = zero(T) +kurtosis(d::SymTriangularDist{T}) where {T <: Real} = T(-3) / 5 entropy(d::SymTriangularDist) = 1 // 2 + log(d.σ) @@ -106,7 +106,7 @@ invlogcdf(d::SymTriangularDist, lp::Real) = lp < loghalf ? xval(d, expm1(1 / 2 * (lp - loghalf))) : xval(d, 1 - sqrt(-2expm1(lp))) function invlogccdf(d::SymTriangularDist, lp::Real) - lp > loghalf ? xval(d, sqrt(-2 * expm1(lp)) - 1) : xval(d, -(expm1((lp - loghalf) / 2))) + return lp > loghalf ? xval(d, sqrt(-2 * expm1(lp)) - 1) : xval(d, -(expm1((lp - loghalf) / 2))) end @@ -114,14 +114,14 @@ function mgf(d::SymTriangularDist, t::Real) (μ, σ) = params(d) a = σ * t a == zero(a) && return one(a) - 4 * exp(μ * t) * (sinh(a / 2) / a)^2 + return 4 * exp(μ * t) * (sinh(a / 2) / a)^2 end function cf(d::SymTriangularDist, t::Real) (μ, σ) = params(d) a = σ * t a == zero(a) && return complex(one(a)) - 4 * cis(μ * t) * (sin(a / 2) / a)^2 + return 4 * cis(μ * t) * (sin(a / 2) / a)^2 end diff --git a/src/univariate/continuous/tdist.jl b/src/univariate/continuous/tdist.jl index 90ba4e87e4..58f257e6da 100644 --- a/src/univariate/continuous/tdist.jl +++ b/src/univariate/continuous/tdist.jl @@ -20,9 +20,9 @@ External links [Student's T distribution on Wikipedia](https://en.wikipedia.org/wiki/Student%27s_t-distribution) """ -struct TDist{T<:Real} <: ContinuousUnivariateDistribution +struct TDist{T <: Real} <: ContinuousUnivariateDistribution ν::T - TDist{T}(ν::T) where {T<:Real} = new{T}(ν) + TDist{T}(ν::T) where {T <: Real} = new{T}(ν) end function TDist(ν::Real; check_args::Bool = true) @@ -35,41 +35,41 @@ TDist(ν::Integer; check_args::Bool = true) = TDist(float(ν); check_args = chec @distr_support TDist -Inf Inf #### Conversions -convert(::Type{TDist{T}}, ν::Real) where {T<:Real} = TDist(T(ν)) -Base.convert(::Type{TDist{T}}, d::TDist) where {T<:Real} = TDist{T}(T(d.ν)) -Base.convert(::Type{TDist{T}}, d::TDist{T}) where {T<:Real} = d +convert(::Type{TDist{T}}, ν::Real) where {T <: Real} = TDist(T(ν)) +Base.convert(::Type{TDist{T}}, d::TDist) where {T <: Real} = TDist{T}(T(d.ν)) +Base.convert(::Type{TDist{T}}, d::TDist{T}) where {T <: Real} = d #### Parameters dof(d::TDist) = d.ν params(d::TDist) = (d.ν,) -@inline partype(d::TDist{T}) where {T<:Real} = T +@inline partype(d::TDist{T}) where {T <: Real} = T #### Statistics -mean(d::TDist{T}) where {T<:Real} = d.ν > 1 ? zero(T) : T(NaN) -median(d::TDist{T}) where {T<:Real} = zero(T) -mode(d::TDist{T}) where {T<:Real} = zero(T) +mean(d::TDist{T}) where {T <: Real} = d.ν > 1 ? zero(T) : T(NaN) +median(d::TDist{T}) where {T <: Real} = zero(T) +mode(d::TDist{T}) where {T <: Real} = zero(T) -function var(d::TDist{T}) where {T<:Real} +function var(d::TDist{T}) where {T <: Real} ν = d.ν isinf(ν) && return one(T) - ν > 2 ? ν / (ν - 2) : ν > 1 ? T(Inf) : T(NaN) + return ν > 2 ? ν / (ν - 2) : ν > 1 ? T(Inf) : T(NaN) end -skewness(d::TDist{T}) where {T<:Real} = d.ν > 3 ? zero(T) : T(NaN) +skewness(d::TDist{T}) where {T <: Real} = d.ν > 3 ? zero(T) : T(NaN) -function kurtosis(d::TDist{T}) where {T<:Real} +function kurtosis(d::TDist{T}) where {T <: Real} ν = d.ν - ν > 4 ? 6 / (ν - 4) : ν > 2 ? T(Inf) : T(NaN) + return ν > 4 ? 6 / (ν - 4) : ν > 2 ? T(Inf) : T(NaN) end -function entropy(d::TDist{T}) where {T<:Real} +function entropy(d::TDist{T}) where {T <: Real} isinf(d.ν) && return entropy(Normal(zero(T), one(T))) h = d.ν / 2 h1 = h + 1 // 2 - h1 * (digamma(h1) - digamma(h)) + log(d.ν) / 2 + logbeta(h, 1 // 2) + return h1 * (digamma(h1) - digamma(h)) + log(d.ν) / 2 + logbeta(h, 1 // 2) end @@ -83,13 +83,13 @@ function rand(rng::AbstractRNG, d::TDist) return randn(rng, typeof(z)) / (isinf(ν) ? one(z) : z) end -function cf(d::TDist{T}, t::Real) where {T<:Real} +function cf(d::TDist{T}, t::Real) where {T <: Real} isinf(d.ν) && return cf(Normal(zero(T), one(T)), t) t == 0 && return complex(1) h = d.ν / 2 q = d.ν / 4 - complex(2(q * t^2)^q * besselk(h, sqrt(d.ν) * abs(t)) / gamma(h)) + return complex(2(q * t^2)^q * besselk(h, sqrt(d.ν) * abs(t)) / gamma(h)) end -gradlogpdf(d::TDist{T}, x::Real) where {T<:Real} = +gradlogpdf(d::TDist{T}, x::Real) where {T <: Real} = isinf(d.ν) ? gradlogpdf(Normal(zero(T), one(T)), x) : -((d.ν + 1) * x) / (x^2 + d.ν) diff --git a/src/univariate/continuous/triangular.jl b/src/univariate/continuous/triangular.jl index 6df23a11c8..8b123e55b0 100644 --- a/src/univariate/continuous/triangular.jl +++ b/src/univariate/continuous/triangular.jl @@ -27,14 +27,14 @@ External links * [Triangular distribution on Wikipedia](http://en.wikipedia.org/wiki/Triangular_distribution) """ -struct TriangularDist{T<:Real} <: ContinuousUnivariateDistribution +struct TriangularDist{T <: Real} <: ContinuousUnivariateDistribution a::T b::T c::T - TriangularDist{T}(a::T, b::T, c::T) where {T<:Real} = new{T}(a, b, c) + TriangularDist{T}(a::T, b::T, c::T) where {T <: Real} = new{T}(a, b, c) end -function TriangularDist(a::T, b::T, c::T; check_args::Bool = true) where {T<:Real} +function TriangularDist(a::T, b::T, c::T; check_args::Bool = true) where {T <: Real} @check_args TriangularDist (a <= c <= b) return TriangularDist{T}(a, b, c) end @@ -42,7 +42,7 @@ end TriangularDist(a::Real, b::Real, c::Real; check_args::Bool = true) = TriangularDist(promote(a, b, c)...; check_args = check_args) function TriangularDist(a::Integer, b::Integer, c::Integer; check_args::Bool = true) - TriangularDist(float(a), float(b), float(c); check_args = check_args) + return TriangularDist(float(a), float(b), float(c); check_args = check_args) end TriangularDist(a::Real, b::Real) = TriangularDist(a, b, middle(a, b); check_args = false) @@ -50,16 +50,16 @@ TriangularDist(a::Real, b::Real) = TriangularDist(a, b, middle(a, b); check_args @distr_support TriangularDist d.a d.b #### Conversions -convert(::Type{TriangularDist{T}}, a::Real, b::Real, c::Real) where {T<:Real} = +convert(::Type{TriangularDist{T}}, a::Real, b::Real, c::Real) where {T <: Real} = TriangularDist(T(a), T(b), T(c)) -Base.convert(::Type{TriangularDist{T}}, d::TriangularDist) where {T<:Real} = +Base.convert(::Type{TriangularDist{T}}, d::TriangularDist) where {T <: Real} = TriangularDist{T}(T(d.a), T(d.b), T(d.c)) -Base.convert(::Type{TriangularDist{T}}, d::TriangularDist{T}) where {T<:Real} = d +Base.convert(::Type{TriangularDist{T}}, d::TriangularDist{T}) where {T <: Real} = d #### Parameters params(d::TriangularDist) = (d.a, d.b, d.c) -partype(::TriangularDist{T}) where {T<:Real} = T +partype(::TriangularDist{T}) where {T <: Real} = T #### Statistics @@ -71,24 +71,24 @@ mean(d::TriangularDist) = (d.a + d.b + d.c) / 3 function median(d::TriangularDist) (a, b, c) = params(d) m = middle(a, b) - c >= m ? a + sqrt((b - a) * (c - a) / 2) : b - sqrt((b - a) * (b - c) / 2) + return c >= m ? a + sqrt((b - a) * (c - a) / 2) : b - sqrt((b - a) * (b - c) / 2) end _pretvar(a::Real, b::Real, c::Real) = a * a + b * b + c * c - a * b - a * c - b * c function var(d::TriangularDist) (a, b, c) = params(d) - _pretvar(a, b, c) / 18 + return _pretvar(a, b, c) / 18 end -function skewness(d::TriangularDist{T}) where {T<:Real} +function skewness(d::TriangularDist{T}) where {T <: Real} (a, b, c) = params(d) - sqrt2 * (a + b - 2c) * (2a - b - c) * (a - 2b + c) / (5 * _pretvar(a, b, c)^(T(3) / 2)) + return sqrt2 * (a + b - 2c) * (2a - b - c) * (a - 2b + c) / (5 * _pretvar(a, b, c)^(T(3) / 2)) end -kurtosis(d::TriangularDist{T}) where {T<:Real} = T(-3) / 5 +kurtosis(d::TriangularDist{T}) where {T <: Real} = T(-3) / 5 -entropy(d::TriangularDist{T}) where {T<:Real} = one(T) / 2 + log((d.b - d.a) / 2) +entropy(d::TriangularDist{T}) where {T <: Real} = one(T) / 2 + log((d.b - d.a) / 2) #### Evaluation @@ -123,7 +123,7 @@ function quantile(d::TriangularDist, p::Real) c_m_a = c - a b_m_a = b - a rl = c_m_a / b_m_a - p <= rl ? a + sqrt(b_m_a * c_m_a * p) : b - sqrt(b_m_a * (b - c) * (1 - p)) + return p <= rl ? a + sqrt(b_m_a * c_m_a * p) : b - sqrt(b_m_a * (b - c) * (1 - p)) end """ @@ -147,7 +147,7 @@ function mgf(d::TriangularDist, t::Real) if c < b # Case: a < c < b return exp(c * t) * - ((c - a) * _phi2((a - c) * t) + (b - c) * _phi2((b - c) * t)) / (b - a) + ((c - a) * _phi2((a - c) * t) + (b - c) * _phi2((b - c) * t)) / (b - a) else # Case: a < c = b return exp(c * t) * _phi2((a - c) * t) @@ -183,8 +183,8 @@ function cf(d::TriangularDist, t::Real) if c < b # Case: a < c < b return cis(c * t) * - ((c - a) * _cisphi2((a - c) * t) + (b - c) * _cisphi2((b - c) * t)) / - (b - a) + ((c - a) * _cisphi2((a - c) * t) + (b - c) * _cisphi2((b - c) * t)) / + (b - a) else # Case: a < c = b return cis(c * t) * _cisphi2((a - c) * t) @@ -205,6 +205,6 @@ function rand(rng::AbstractRNG, d::TriangularDist) (a, b, c) = params(d) b_m_a = b - a u = rand(rng) - b_m_a * u < (c - a) ? d.a + sqrt(u * b_m_a * (c - a)) : - d.b - sqrt((1 - u) * b_m_a * (b - c)) + return b_m_a * u < (c - a) ? d.a + sqrt(u * b_m_a * (c - a)) : + d.b - sqrt((1 - u) * b_m_a * (b - c)) end diff --git a/src/univariate/continuous/triweight.jl b/src/univariate/continuous/triweight.jl index a9cb0aaf12..203f0e0589 100644 --- a/src/univariate/continuous/triweight.jl +++ b/src/univariate/continuous/triweight.jl @@ -1,13 +1,13 @@ """ Triweight(μ, σ) """ -struct Triweight{T<:Real} <: ContinuousUnivariateDistribution +struct Triweight{T <: Real} <: ContinuousUnivariateDistribution μ::T σ::T Triweight{T}(µ::T, σ::T) where {T} = new{T}(µ, σ) end -function Triweight(μ::T, σ::T; check_args::Bool = true) where {T<:Real} +function Triweight(μ::T, σ::T; check_args::Bool = true) where {T <: Real} @check_args Triweight (σ, σ > zero(σ)) return Triweight{T}(μ, σ) end @@ -22,17 +22,17 @@ Triweight(μ::Real = 0.0) = Triweight(μ, one(μ); check_args = false) ## Conversions -convert(::Type{Triweight{T}}, μ::Real, σ::Real) where {T<:Real} = Triweight(T(μ), T(σ)) -Base.convert(::Type{Triweight{T}}, d::Triweight) where {T<:Real} = +convert(::Type{Triweight{T}}, μ::Real, σ::Real) where {T <: Real} = Triweight(T(μ), T(σ)) +Base.convert(::Type{Triweight{T}}, d::Triweight) where {T <: Real} = Triweight{T}(T(d.μ), T(d.σ)) -Base.convert(::Type{Triweight{T}}, d::Triweight{T}) where {T<:Real} = d +Base.convert(::Type{Triweight{T}}, d::Triweight{T}) where {T <: Real} = d ## Parameters location(d::Triweight) = d.μ scale(d::Triweight) = d.σ params(d::Triweight) = (d.μ, d.σ) -@inline partype(d::Triweight{T}) where {T<:Real} = T +@inline partype(d::Triweight{T}) where {T <: Real} = T ## Properties @@ -41,38 +41,38 @@ median(d::Triweight) = d.μ mode(d::Triweight) = d.μ var(d::Triweight) = d.σ^2 / 9 -skewness(d::Triweight{T}) where {T<:Real} = zero(T) -kurtosis(d::Triweight{T}) where {T<:Real} = T(1) / 33 - 3 +skewness(d::Triweight{T}) where {T <: Real} = zero(T) +kurtosis(d::Triweight{T}) where {T <: Real} = T(1) / 33 - 3 ## Functions -function pdf(d::Triweight{T}, x::Real) where {T<:Real} +function pdf(d::Triweight{T}, x::Real) where {T <: Real} u = abs(x - d.μ) / d.σ - u >= 1 ? zero(T) : 1.09375 * (1 - u * u)^3 / d.σ + return u >= 1 ? zero(T) : 1.09375 * (1 - u * u)^3 / d.σ end logpdf(d::Triweight, x::Real) = log(pdf(d, x)) -function cdf(d::Triweight{T}, x::Real) where {T<:Real} +function cdf(d::Triweight{T}, x::Real) where {T <: Real} u = (x - d.μ) / d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) + return u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) end -function ccdf(d::Triweight{T}, x::Real) where {T<:Real} +function ccdf(d::Triweight{T}, x::Real) where {T <: Real} u = (d.μ - x) / d.σ - u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) + return u ≤ -1 ? zero(T) : u ≥ 1 ? one(T) : 0.03125 * (1 + u)^4 * @horner(u, 16, -29, 20, -5) end @quantile_newton Triweight -function mgf(d::Triweight{T}, t::Float64) where {T<:Real} +function mgf(d::Triweight{T}, t::Float64) where {T <: Real} a = d.σ * t a2 = a * a - a == 0 ? one(T) : - 105 * exp(d.μ * t) * ((15 / a2 + 1) * cosh(a) - (15 / a2 - 6) / a * sinh(a)) / (a2 * a2) + return a == 0 ? one(T) : + 105 * exp(d.μ * t) * ((15 / a2 + 1) * cosh(a) - (15 / a2 - 6) / a * sinh(a)) / (a2 * a2) end -function cf(d::Triweight{T}, t::Float64) where {T<:Real} +function cf(d::Triweight{T}, t::Float64) where {T <: Real} a = d.σ * t a2 = a * a - a == 0 ? complex(one(T)) : - 105 * cis(d.μ * t) * ((1 - 15 / a2) * cos(a) + (15 / a2 - 6) / a * sin(a)) / (a2 * a2) + return a == 0 ? complex(one(T)) : + 105 * cis(d.μ * t) * ((1 - 15 / a2) * cos(a) + (15 / a2 - 6) / a * sin(a)) / (a2 * a2) end diff --git a/src/univariate/continuous/uniform.jl b/src/univariate/continuous/uniform.jl index fa8c599cbc..8cd93929fb 100644 --- a/src/univariate/continuous/uniform.jl +++ b/src/univariate/continuous/uniform.jl @@ -23,13 +23,13 @@ External links * [Uniform distribution (continuous) on Wikipedia](http://en.wikipedia.org/wiki/Uniform_distribution_(continuous)) """ -struct Uniform{T<:Real} <: ContinuousUnivariateDistribution +struct Uniform{T <: Real} <: ContinuousUnivariateDistribution a::T b::T - Uniform{T}(a::Real, b::Real) where {T<:Real} = new{T}(a, b) + Uniform{T}(a::Real, b::Real) where {T <: Real} = new{T}(a, b) end -function Uniform(a::T, b::T; check_args::Bool = true) where {T<:Real} +function Uniform(a::T, b::T; check_args::Bool = true) where {T <: Real} @check_args Uniform (a < b) return Uniform{T}(a, b) end @@ -43,14 +43,14 @@ Uniform() = Uniform{Float64}(0.0, 1.0) @distr_support Uniform d.a d.b #### Conversions -convert(::Type{Uniform{T}}, a::Real, b::Real) where {T<:Real} = Uniform(T(a), T(b)) -Base.convert(::Type{Uniform{T}}, d::Uniform) where {T<:Real} = Uniform{T}(T(d.a), T(d.b)) -Base.convert(::Type{Uniform{T}}, d::Uniform{T}) where {T<:Real} = d +convert(::Type{Uniform{T}}, a::Real, b::Real) where {T <: Real} = Uniform(T(a), T(b)) +Base.convert(::Type{Uniform{T}}, d::Uniform) where {T <: Real} = Uniform{T}(T(d.a), T(d.b)) +Base.convert(::Type{Uniform{T}}, d::Uniform{T}) where {T <: Real} = d #### Parameters params(d::Uniform) = (d.a, d.b) -partype(::Uniform{T}) where {T<:Real} = T +partype(::Uniform{T}) where {T <: Real} = T location(d::Uniform) = d.a scale(d::Uniform) = d.b - d.a @@ -65,8 +65,8 @@ modes(d::Uniform) = Float64[] var(d::Uniform) = (w = d.b - d.a; w^2 / 12) -skewness(d::Uniform{T}) where {T<:Real} = zero(T) -kurtosis(d::Uniform{T}) where {T<:Real} = -6 / 5 * one(T) +skewness(d::Uniform{T}) where {T <: Real} = zero(T) +kurtosis(d::Uniform{T}) where {T <: Real} = -6 / 5 * one(T) entropy(d::Uniform) = log(d.b - d.a) @@ -105,7 +105,7 @@ function mgf(d::Uniform, t::Real) u = (b - a) * t / 2 u == zero(u) && return one(u) v = (a + b) * t / 2 - exp(v) * (sinh(u) / u) + return exp(v) * (sinh(u) / u) end function cgf_uniform_around_zero_kernel(x) # taylor series of (exp(x) - x - 1) / x @@ -114,14 +114,14 @@ function cgf_uniform_around_zero_kernel(x) a1 = inv(T(6)) a2 = inv(T(24)) a3 = inv(T(120)) - x * @evalpoly(x, a0, a1, a2, a3) + return x * @evalpoly(x, a0, a1, a2, a3) end function cgf(d::Uniform, t) # log((exp(t*b) - exp(t*a))/ (t*(b-a))) a, b = params(d) x = t * (b - a) - if abs(x) <= sqrt(eps(float(one(x)))) + return if abs(x) <= sqrt(eps(float(one(x)))) cgf_around_zero(d, t) else cgf_away_from_zero(d, t) @@ -130,12 +130,12 @@ end function cgf_around_zero(d::Uniform, t) a, b = params(d) x = t * (b - a) - t * a + log1p(cgf_uniform_around_zero_kernel(x)) + return t * a + log1p(cgf_uniform_around_zero_kernel(x)) end function cgf_away_from_zero(d::Uniform, t) a, b = params(d) x = t * (b - a) - logsubexp(t * b, t * a) - log(abs(x)) + return logsubexp(t * b, t * a) - log(abs(x)) end function cf(d::Uniform, t::Real) @@ -143,7 +143,7 @@ function cf(d::Uniform, t::Real) u = (b - a) * t / 2 u == zero(u) && return complex(one(u)) v = (a + b) * t / 2 - cis(v) * (sin(u) / u) + return cis(v) * (sin(u) / u) end #### Affine transformations @@ -161,7 +161,7 @@ _rand!(rng::AbstractRNG, d::Uniform, A::AbstractArray{<:Real}) = #### Fitting -function fit_mle(::Type{T}, x::AbstractArray{<:Real}) where {T<:Uniform} +function fit_mle(::Type{T}, x::AbstractArray{<:Real}) where {T <: Uniform} if isempty(x) throw(ArgumentError("x cannot be empty.")) end diff --git a/src/univariate/continuous/vonmises.jl b/src/univariate/continuous/vonmises.jl index cb4adfb643..fa47485eaf 100644 --- a/src/univariate/continuous/vonmises.jl +++ b/src/univariate/continuous/vonmises.jl @@ -18,13 +18,13 @@ External links * [von Mises distribution on Wikipedia](http://en.wikipedia.org/wiki/Von_Mises_distribution) """ -struct VonMises{T<:Real} <: ContinuousUnivariateDistribution +struct VonMises{T <: Real} <: ContinuousUnivariateDistribution μ::T # mean κ::T # concentration I0κx::T # I0(κ) * exp(-κ), where I0 is the modified Bessel function of order 0 end -function VonMises(μ::T, κ::T; check_args::Bool = true) where {T<:Real} +function VonMises(μ::T, κ::T; check_args::Bool = true) where {T <: Real} @check_args VonMises (κ, κ > zero(κ)) return VonMises{T}(μ, κ, besselix(zero(T), κ)) end @@ -42,15 +42,15 @@ show(io::IO, d::VonMises) = show(io, d, (:μ, :κ)) #### Conversions -convert(::Type{VonMises{T}}, μ::Real, κ::Real) where {T<:Real} = VonMises(T(μ), T(κ)) -Base.convert(::Type{VonMises{T}}, d::VonMises) where {T<:Real} = +convert(::Type{VonMises{T}}, μ::Real, κ::Real) where {T <: Real} = VonMises(T(μ), T(κ)) +Base.convert(::Type{VonMises{T}}, d::VonMises) where {T <: Real} = VonMises{T}(T(d.μ), T(d.κ), T(d.I0κx)) -Base.convert(::Type{VonMises{T}}, d::VonMises{T}) where {T<:Real} = d +Base.convert(::Type{VonMises{T}}, d::VonMises{T}) where {T <: Real} = d #### Parameters params(d::VonMises) = (d.μ, d.κ) -partype(::VonMises{T}) where {T<:Real} = T +partype(::VonMises{T}) where {T <: Real} = T #### Statistics @@ -69,15 +69,15 @@ cf(d::VonMises, t::Real) = (besselix(abs(t), d.κ) / d.I0κx) * cis(t * d.μ) #### Evaluations #pdf(d::VonMises, x::Real) = exp(d.κ * (cos(x - d.μ) - 1)) / (twoπ * d.I0κx) -pdf(d::VonMises{T}, x::Real) where {T<:Real} = +pdf(d::VonMises{T}, x::Real) where {T <: Real} = minimum(d) ≤ x ≤ maximum(d) ? exp(d.κ * (cos(x - d.μ) - 1)) / (twoπ * d.I0κx) : zero(T) -logpdf(d::VonMises{T}, x::Real) where {T<:Real} = +logpdf(d::VonMises{T}, x::Real) where {T <: Real} = minimum(d) ≤ x ≤ maximum(d) ? d.κ * (cos(x - d.μ) - 1) - log(d.I0κx) - log2π : -T(Inf) function cdf(d::VonMises, x::Real) # handle `±Inf` for which `sin` can't be evaluated z = clamp(x, extrema(d)...) - return _vmcdf(d.κ, d.I0κx, z - d.μ, 1e-15) + return _vmcdf(d.κ, d.I0κx, z - d.μ, 1.0e-15) end function _vmcdf(κ::Real, I0κx::Real, x::Real, tol::Real) diff --git a/src/univariate/continuous/weibull.jl b/src/univariate/continuous/weibull.jl index c07a901245..976227c731 100644 --- a/src/univariate/continuous/weibull.jl +++ b/src/univariate/continuous/weibull.jl @@ -23,16 +23,16 @@ External links * [Weibull distribution on Wikipedia](http://en.wikipedia.org/wiki/Weibull_distribution) """ -struct Weibull{T<:Real} <: ContinuousUnivariateDistribution +struct Weibull{T <: Real} <: ContinuousUnivariateDistribution α::T # shape θ::T # scale - function Weibull{T}(α::T, θ::T) where {T<:Real} - new{T}(α, θ) + function Weibull{T}(α::T, θ::T) where {T <: Real} + return new{T}(α, θ) end end -function Weibull(α::T, θ::T; check_args::Bool = true) where {T<:Real} +function Weibull(α::T, θ::T; check_args::Bool = true) where {T <: Real} @check_args Weibull (α, α > zero(α)) (θ, θ > zero(θ)) return Weibull{T}(α, θ) end @@ -47,9 +47,9 @@ Weibull(α::Real = 1.0) = Weibull(α, one(α); check_args = false) #### Conversions -convert(::Type{Weibull{T}}, α::Real, θ::Real) where {T<:Real} = Weibull(T(α), T(θ)) -Base.convert(::Type{Weibull{T}}, d::Weibull) where {T<:Real} = Weibull{T}(T(d.α), T(d.θ)) -Base.convert(::Type{Weibull{T}}, d::Weibull{T}) where {T<:Real} = d +convert(::Type{Weibull{T}}, α::Real, θ::Real) where {T <: Real} = Weibull(T(α), T(θ)) +Base.convert(::Type{Weibull{T}}, d::Weibull) where {T <: Real} = Weibull{T}(T(d.α), T(d.θ)) +Base.convert(::Type{Weibull{T}}, d::Weibull{T}) where {T <: Real} = d #### Parameters @@ -57,14 +57,14 @@ shape(d::Weibull) = d.α scale(d::Weibull) = d.θ params(d::Weibull) = (d.α, d.θ) -partype(::Weibull{T}) where {T<:Real} = T +partype(::Weibull{T}) where {T <: Real} = T #### Statistics mean(d::Weibull) = d.θ * gamma(1 + 1 / d.α) median(d::Weibull) = d.θ * logtwo^(1 / d.α) -mode(d::Weibull{T}) where {T<:Real} = d.α > 1 ? (iα = 1 / d.α; d.θ * (1 - iα)^iα) : zero(T) +mode(d::Weibull{T}) where {T <: Real} = d.α > 1 ? (iα = 1 / d.α; d.θ * (1 - iα)^iα) : zero(T) var(d::Weibull) = d.θ^2 * gamma(1 + 2 / d.α) - mean(d)^2 @@ -73,7 +73,7 @@ function skewness(d::Weibull) σ2 = var(d) σ = sqrt(σ2) r = μ / σ - gamma(1 + 3 / d.α) * (d.θ / σ)^3 - 3r - r^3 + return gamma(1 + 3 / d.α) * (d.θ / σ)^3 - 3r - r^3 end function kurtosis(d::Weibull) @@ -84,12 +84,12 @@ function kurtosis(d::Weibull) r = μ / σ r2 = r^2 r4 = r2^2 - (θ / σ)^4 * gamma(1 + 4 / α) - 4γ * r - 6r2 - r4 - 3 + return (θ / σ)^4 * gamma(1 + 4 / α) - 4γ * r - 6r2 - r4 - 3 end function entropy(d::Weibull) α, θ = params(d) - 0.5772156649015328606 * (1 - 1 / α) + log(θ / α) + 1 + return 0.5772156649015328606 * (1 - 1 / α) + log(θ / α) + 1 end @@ -99,14 +99,14 @@ function pdf(d::Weibull, x::Real) α, θ = params(d) z = abs(x) / θ res = (α / θ) * z^(α - 1) * exp(-z^α) - x < 0 || isinf(x) ? zero(res) : res + return x < 0 || isinf(x) ? zero(res) : res end function logpdf(d::Weibull, x::Real) α, θ = params(d) z = abs(x) / θ res = log(α / θ) + xlogy(α - 1, z) - z^α - x < 0 || isinf(x) ? oftype(res, -Inf) : res + return x < 0 || isinf(x) ? oftype(res, -Inf) : res end zval(d::Weibull, x::Real) = (max(x, 0) / d.θ)^d.α @@ -122,8 +122,8 @@ cquantile(d::Weibull, p::Real) = xval(d, -log(p)) invlogcdf(d::Weibull, lp::Real) = xval(d, -log1mexp(lp)) invlogccdf(d::Weibull, lp::Real) = xval(d, -lp) -function gradlogpdf(d::Weibull{T}, x::Real) where {T<:Real} - if insupport(Weibull, x) +function gradlogpdf(d::Weibull{T}, x::Real) where {T <: Real} + return if insupport(Weibull, x) α, θ = params(d) (α - 1) / x - α * x^(α - 1) / (θ^α) else @@ -145,12 +145,12 @@ rand(rng::AbstractRNG, d::Weibull) = xval(d, randexp(rng)) Compute the maximum likelihood estimate of the [`Weibull`](@ref) distribution with Newton's method. """ function fit_mle( - ::Type{<:Weibull}, - x::AbstractArray{<:Real}; - alpha0::Real = 1, - maxiter::Int = 1000, - tol::Real = 1e-16, -) + ::Type{<:Weibull}, + x::AbstractArray{<:Real}; + alpha0::Real = 1, + maxiter::Int = 1000, + tol::Real = 1.0e-16, + ) N = 0 diff --git a/src/univariate/discrete/bernoulli.jl b/src/univariate/discrete/bernoulli.jl index 0559136fe2..158f88ff15 100644 --- a/src/univariate/discrete/bernoulli.jl +++ b/src/univariate/discrete/bernoulli.jl @@ -24,10 +24,10 @@ External links: * [Bernoulli distribution on Wikipedia](http://en.wikipedia.org/wiki/Bernoulli_distribution) """ -struct Bernoulli{T<:Real} <: DiscreteUnivariateDistribution +struct Bernoulli{T <: Real} <: DiscreteUnivariateDistribution p::T - Bernoulli{T}(p::Real) where {T<:Real} = new{T}(p) + Bernoulli{T}(p::Real) where {T <: Real} = new{T}(p) end function Bernoulli(p::Real; check_args::Bool = true) @@ -44,9 +44,9 @@ Bernoulli() = Bernoulli{Float64}(0.5) Base.eltype(::Type{<:Bernoulli}) = Bool #### Conversions -convert(::Type{Bernoulli{T}}, p::Real) where {T<:Real} = Bernoulli(T(p)) -Base.convert(::Type{Bernoulli{T}}, d::Bernoulli) where {T<:Real} = Bernoulli{T}(T(d.p)) -Base.convert(::Type{Bernoulli{T}}, d::Bernoulli{T}) where {T<:Real} = d +convert(::Type{Bernoulli{T}}, p::Real) where {T <: Real} = Bernoulli(T(p)) +Base.convert(::Type{Bernoulli{T}}, d::Bernoulli) where {T <: Real} = Bernoulli{T}(T(d.p)) +Base.convert(::Type{Bernoulli{T}}, d::Bernoulli{T}) where {T <: Real} = d #### Parameters @@ -69,7 +69,7 @@ mode(d::Bernoulli) = ifelse(succprob(d) > 1 / 2, 1, 0) function modes(d::Bernoulli) p = succprob(d) - p < 1 / 2 ? [0] : p > 1 / 2 ? [1] : [0, 1] + return p < 1 / 2 ? [0] : p > 1 / 2 ? [1] : [0, 1] end median(d::Bernoulli) = ifelse(succprob(d) <= 1 / 2, 0, 1) @@ -77,7 +77,7 @@ median(d::Bernoulli) = ifelse(succprob(d) <= 1 / 2, 0, 1) function entropy(d::Bernoulli) p0 = failprob(d) p1 = succprob(d) - (p0 == 0 || p0 == 1) ? zero(d.p) : -(p0 * log(p0) + p1 * log(p1)) + return (p0 == 0 || p0 == 1) ? zero(d.p) : -(p0 * log(p0) + p1 * log(p1)) end #### Evaluation @@ -93,18 +93,18 @@ cdf(d::Bernoulli, x::Int) = x < 0 ? zero(d.p) : x < 1 ? failprob(d) : one(d.p) ccdf(d::Bernoulli, x::Bool) = x ? zero(d.p) : succprob(d) ccdf(d::Bernoulli, x::Int) = x < 0 ? one(d.p) : x < 1 ? succprob(d) : zero(d.p) -function quantile(d::Bernoulli{T}, p::Real) where {T<:Real} - 0 <= p <= 1 ? (p <= failprob(d) ? zero(T) : one(T)) : T(NaN) +function quantile(d::Bernoulli{T}, p::Real) where {T <: Real} + return 0 <= p <= 1 ? (p <= failprob(d) ? zero(T) : one(T)) : T(NaN) end -function cquantile(d::Bernoulli{T}, p::Real) where {T<:Real} - 0 <= p <= 1 ? (p >= succprob(d) ? zero(T) : one(T)) : T(NaN) +function cquantile(d::Bernoulli{T}, p::Real) where {T <: Real} + return 0 <= p <= 1 ? (p >= succprob(d) ? zero(T) : one(T)) : T(NaN) end mgf(d::Bernoulli, t::Real) = failprob(d) + succprob(d) * exp(t) function cgf(d::Bernoulli, t) p, = params(d) # log(1-p+p*exp(t)) - logaddexp(log1p(-p), t + log(p)) + return logaddexp(log1p(-p), t + log(p)) end cf(d::Bernoulli, t::Real) = failprob(d) + succprob(d) * cis(t) @@ -115,14 +115,14 @@ rand(rng::AbstractRNG, d::Bernoulli) = rand(rng) <= succprob(d) #### MLE fitting -struct BernoulliStats{C<:Real} <: SufficientStats +struct BernoulliStats{C <: Real} <: SufficientStats cnt0::C cnt1::C end BernoulliStats(c0::Real, c1::Real) = BernoulliStats(promote(c0, c1)...) -fit_mle(::Type{T}, ss::BernoulliStats) where {T<:Bernoulli} = +fit_mle(::Type{T}, ss::BernoulliStats) where {T <: Bernoulli} = T(ss.cnt1 / (ss.cnt0 + ss.cnt1)) function suffstats(::Type{<:Bernoulli}, x::AbstractArray{<:Integer}) @@ -136,14 +136,14 @@ function suffstats(::Type{<:Bernoulli}, x::AbstractArray{<:Integer}) throw(DomainError(xi, "samples must be 0 or 1")) end end - BernoulliStats(c0, c1) + return BernoulliStats(c0, c1) end function suffstats( - ::Type{<:Bernoulli}, - x::AbstractArray{<:Integer}, - w::AbstractArray{<:Real}, -) + ::Type{<:Bernoulli}, + x::AbstractArray{<:Integer}, + w::AbstractArray{<:Real}, + ) length(x) == length(w) || throw(DimensionMismatch("inconsistent argument dimensions")) z = zero(eltype(w)) c0 = c1 = z + z # possibly widened and different from `z`, e.g., if `z = true` @@ -156,5 +156,5 @@ function suffstats( throw(DomainError(xi, "samples must be 0 or 1")) end end - BernoulliStats(c0, c1) + return BernoulliStats(c0, c1) end diff --git a/src/univariate/discrete/bernoullilogit.jl b/src/univariate/discrete/bernoullilogit.jl index 37ec5de1df..65010917cd 100644 --- a/src/univariate/discrete/bernoullilogit.jl +++ b/src/univariate/discrete/bernoullilogit.jl @@ -16,7 +16,7 @@ External links: See also [`Bernoulli`](@ref) """ -struct BernoulliLogit{T<:Real} <: DiscreteUnivariateDistribution +struct BernoulliLogit{T <: Real} <: DiscreteUnivariateDistribution logitp::T end @@ -27,9 +27,9 @@ BernoulliLogit() = BernoulliLogit(0.0) Base.eltype(::Type{<:BernoulliLogit}) = Bool #### Conversions -Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit) where {T<:Real} = +Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit) where {T <: Real} = BernoulliLogit{T}(T(d.logitp)) -Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit{T}) where {T<:Real} = d +Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit{T}) where {T <: Real} = d #### Parameters @@ -57,18 +57,18 @@ mode(d::BernoulliLogit) = d.logitp > 0 ? 1 : 0 function modes(d::BernoulliLogit) logitp = d.logitp z = zero(logitp) - logitp < z ? [false] : (logitp > z ? [true] : [false, true]) + return logitp < z ? [false] : (logitp > z ? [true] : [false, true]) end median(d::BernoulliLogit) = d.logitp > 0 function entropy(d::BernoulliLogit) logitp = d.logitp - (logitp == -Inf || logitp == Inf) ? float(zero(logitp)) : - ( - logitp > 0 ? -(succprob(d) * logitp + logfailprob(d)) : - -(logsuccprob(d) - failprob(d) * logitp) - ) + return (logitp == -Inf || logitp == Inf) ? float(zero(logitp)) : + ( + logitp > 0 ? -(succprob(d) * logitp + logfailprob(d)) : + -(logsuccprob(d) - failprob(d) * logitp) + ) end #### Evaluation @@ -99,17 +99,17 @@ logccdf(d::BernoulliLogit, x::Int) = function quantile(d::BernoulliLogit, p::Real) T = float(partype(d)) - 0 <= p <= 1 ? (p <= failprob(d) ? zero(T) : one(T)) : T(NaN) + return 0 <= p <= 1 ? (p <= failprob(d) ? zero(T) : one(T)) : T(NaN) end function cquantile(d::BernoulliLogit, p::Real) T = float(partype(d)) - 0 <= p <= 1 ? (p >= succprob(d) ? zero(T) : one(T)) : T(NaN) + return 0 <= p <= 1 ? (p >= succprob(d) ? zero(T) : one(T)) : T(NaN) end mgf(d::BernoulliLogit, t::Real) = failprob(d) + exp(t + logsuccprob(d)) function cgf(d::BernoulliLogit, t) # log(1-p+p*exp(t)) = logaddexp(log(1-p), t + log(p)) - logaddexp(logfailprob(d), t + logsuccprob(d)) + return logaddexp(logfailprob(d), t + logsuccprob(d)) end cf(d::BernoulliLogit, t::Real) = failprob(d) + succprob(d) * cis(t) diff --git a/src/univariate/discrete/betabinomial.jl b/src/univariate/discrete/betabinomial.jl index 1a42b1eec4..017b6cab3a 100644 --- a/src/univariate/discrete/betabinomial.jl +++ b/src/univariate/discrete/betabinomial.jl @@ -18,15 +18,15 @@ External links: * [Beta-binomial distribution on Wikipedia](https://en.wikipedia.org/wiki/Beta-binomial_distribution) """ -struct BetaBinomial{T<:Real} <: DiscreteUnivariateDistribution +struct BetaBinomial{T <: Real} <: DiscreteUnivariateDistribution n::Int α::T β::T - BetaBinomial{T}(n::Integer, α::T, β::T) where {T<:Real} = new{T}(n, α, β) + BetaBinomial{T}(n::Integer, α::T, β::T) where {T <: Real} = new{T}(n, α, β) end -function BetaBinomial(n::Integer, α::T, β::T; check_args::Bool = true) where {T<:Real} +function BetaBinomial(n::Integer, α::T, β::T; check_args::Bool = true) where {T <: Real} @check_args BetaBinomial (n, n >= zero(n)) (α, α > zero(α)) (β, β > zero(β)) return BetaBinomial{T}(n, α, β) end @@ -39,13 +39,13 @@ BetaBinomial(n::Integer, α::Integer, β::Integer; check_args::Bool = true) = @distr_support BetaBinomial 0 d.n #### Conversions -function convert(::Type{BetaBinomial{T}}, n::Int, α::S, β::S) where {T<:Real,S<:Real} - BetaBinomial(n, T(α), T(β)) +function convert(::Type{BetaBinomial{T}}, n::Int, α::S, β::S) where {T <: Real, S <: Real} + return BetaBinomial(n, T(α), T(β)) end -function Base.convert(::Type{BetaBinomial{T}}, d::BetaBinomial) where {T<:Real} - BetaBinomial{T}(d.n, T(d.α), T(d.β)) +function Base.convert(::Type{BetaBinomial{T}}, d::BetaBinomial) where {T <: Real} + return BetaBinomial{T}(d.n, T(d.α), T(d.β)) end -Base.convert(::Type{BetaBinomial{T}}, d::BetaBinomial{T}) where {T<:Real} = d +Base.convert(::Type{BetaBinomial{T}}, d::BetaBinomial{T}) where {T <: Real} = d #### Parameters diff --git a/src/univariate/discrete/binomial.jl b/src/univariate/discrete/binomial.jl index f15174543c..0ee0d0649a 100644 --- a/src/univariate/discrete/binomial.jl +++ b/src/univariate/discrete/binomial.jl @@ -22,11 +22,11 @@ External links: * [Binomial distribution on Wikipedia](http://en.wikipedia.org/wiki/Binomial_distribution) """ -struct Binomial{T<:Real} <: DiscreteUnivariateDistribution +struct Binomial{T <: Real} <: DiscreteUnivariateDistribution n::Int p::T - Binomial{T}(n, p) where {T<:Real} = new{T}(n, p) + Binomial{T}(n, p) where {T <: Real} = new{T}(n, p) end function Binomial(n::Integer, p::Real; check_args::Bool = true) @@ -38,7 +38,7 @@ Binomial(n::Integer, p::Integer; check_args::Bool = true) = Binomial(n, float(p); check_args = check_args) function Binomial(n::Integer; check_args::Bool = true) @check_args Binomial (n, n >= zero(n)) - Binomial{Float64}(n, 0.5) + return Binomial{Float64}(n, 0.5) end Binomial() = Binomial{Float64}(1, 0.5) @@ -46,13 +46,13 @@ Binomial() = Binomial{Float64}(1, 0.5) #### Conversions -function convert(::Type{Binomial{T}}, n::Int, p::Real) where {T<:Real} +function convert(::Type{Binomial{T}}, n::Int, p::Real) where {T <: Real} return Binomial(n, T(p)) end -function Base.convert(::Type{Binomial{T}}, d::Binomial) where {T<:Real} +function Base.convert(::Type{Binomial{T}}, d::Binomial) where {T <: Real} return Binomial{T}(d.n, T(d.p)) end -Base.convert(::Type{Binomial{T}}, d::Binomial{T}) where {T<:Real} = d +Base.convert(::Type{Binomial{T}}, d::Binomial{T}) where {T <: Real} = d #### Parameters @@ -61,16 +61,16 @@ succprob(d::Binomial) = d.p failprob(d::Binomial{T}) where {T} = one(T) - d.p params(d::Binomial) = (d.n, d.p) -@inline partype(::Binomial{T}) where {T<:Real} = T +@inline partype(::Binomial{T}) where {T <: Real} = T #### Properties mean(d::Binomial) = ntrials(d) * succprob(d) var(d::Binomial) = ntrials(d) * succprob(d) * failprob(d) -function mode(d::Binomial{T}) where {T<:Real} +function mode(d::Binomial{T}) where {T <: Real} (n, p) = params(d) - n > 0 ? floor(Int, (n + 1) * d.p) : zero(T) + return n > 0 ? floor(Int, (n + 1) * d.p) : zero(T) end modes(d::Binomial) = Int[mode(d)] @@ -88,7 +88,7 @@ function median(dist::Binomial) floor_mean = floor(Int, dist_mean) difference = dist_mean - floor_mean - if difference <= bound + return if difference <= bound # The only case where the median satisfies |median - mean| <= 1 - bound with equality # is p = 1/2 and n odd # However, in that case we also want to return floor(mean) @@ -108,13 +108,13 @@ end function skewness(d::Binomial) n, p1 = params(d) p0 = 1 - p1 - (p0 - p1) / sqrt(n * p0 * p1) + return (p0 - p1) / sqrt(n * p0 * p1) end function kurtosis(d::Binomial) n, p = params(d) u = p * (1 - p) - (1 - 6u) / (n * u) + return (1 - 6u) / (n * u) end function entropy(d::Binomial; approx::Bool = false) @@ -127,7 +127,7 @@ function entropy(d::Binomial; approx::Bool = false) lg = log(p1 / p0) lp = n * log(p0) s = exp(lp) * lp - for k = 1:n + for k in 1:n lp += log((n - k + 1) / k) + lg s += exp(lp) * lp end @@ -171,27 +171,27 @@ function rand(rng::AbstractRNG, d::Binomial) else y = rand(rng, BinomialTPESampler(n, r)) end - p <= 0.5 ? y : n - y + return p <= 0.5 ? y : n - y end function mgf(d::Binomial, t::Real) n, p = params(d) - (one(p) - p + p * exp(t))^n + return (one(p) - p + p * exp(t))^n end function cgf(d::Binomial, t) n, p = params(d) - n * cgf(Bernoulli{typeof(p)}(p), t) + return n * cgf(Bernoulli{typeof(p)}(p), t) end function cf(d::Binomial, t::Real) n, p = params(d) - (one(p) - p + p * cis(t))^n + return (one(p) - p + p * cis(t))^n end #### Fit model -struct BinomialStats{N<:Real} <: SufficientStats +struct BinomialStats{N <: Real} <: SufficientStats ns::N # the total number of successes ne::N # the number of experiments n::Int # the number of trials in each experiment @@ -206,15 +206,15 @@ function suffstats(::Type{<:Binomial}, n::Integer, x::AbstractArray{<:Integer}) 0 <= xi <= n || throw(DomainError(xi, "samples must be between 0 and $n")) ns += xi end - BinomialStats(ns, length(x), n) + return BinomialStats(ns, length(x), n) end function suffstats( - ::Type{<:Binomial}, - n::Integer, - x::AbstractArray{<:Integer}, - w::AbstractArray{<:Real}, -) + ::Type{<:Binomial}, + n::Integer, + x::AbstractArray{<:Integer}, + w::AbstractArray{<:Real}, + ) z = zero(eltype(x)) * zero(eltype(w)) ns = ne = z + z # possibly widened and different from `z`, e.g., if `z = true` for (xi, wi) in zip(x, w) @@ -222,29 +222,29 @@ function suffstats( ns += xi * wi ne += wi end - BinomialStats(ns, ne, n) + return BinomialStats(ns, ne, n) end -const BinomData = Tuple{Int,AbstractArray} +const BinomData = Tuple{Int, AbstractArray} -suffstats(::Type{T}, data::BinomData) where {T<:Binomial} = suffstats(T, data...) -suffstats(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = +suffstats(::Type{T}, data::BinomData) where {T <: Binomial} = suffstats(T, data...) +suffstats(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T <: Binomial} = suffstats(T, data..., w) -fit_mle(::Type{T}, ss::BinomialStats) where {T<:Binomial} = T(ss.n, ss.ns / (ss.ne * ss.n)) +fit_mle(::Type{T}, ss::BinomialStats) where {T <: Binomial} = T(ss.n, ss.ns / (ss.ne * ss.n)) -fit_mle(::Type{T}, n::Integer, x::AbstractArray{<:Integer}) where {T<:Binomial} = +fit_mle(::Type{T}, n::Integer, x::AbstractArray{<:Integer}) where {T <: Binomial} = fit_mle(T, suffstats(T, n, x)) fit_mle( ::Type{T}, n::Integer, x::AbstractArray{<:Integer}, w::AbstractArray{<:Real}, -) where {T<:Binomial} = fit_mle(T, suffstats(T, n, x, w)) -fit_mle(::Type{T}, data::BinomData) where {T<:Binomial} = fit_mle(T, suffstats(T, data)) -fit_mle(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = +) where {T <: Binomial} = fit_mle(T, suffstats(T, n, x, w)) +fit_mle(::Type{T}, data::BinomData) where {T <: Binomial} = fit_mle(T, suffstats(T, data)) +fit_mle(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T <: Binomial} = fit_mle(T, suffstats(T, data, w)) -fit(::Type{T}, data::BinomData) where {T<:Binomial} = fit_mle(T, data) -fit(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T<:Binomial} = +fit(::Type{T}, data::BinomData) where {T <: Binomial} = fit_mle(T, data) +fit(::Type{T}, data::BinomData, w::AbstractArray{<:Real}) where {T <: Binomial} = fit_mle(T, data, w) diff --git a/src/univariate/discrete/categorical.jl b/src/univariate/discrete/categorical.jl index e6cf29e772..87913cee80 100644 --- a/src/univariate/discrete/categorical.jl +++ b/src/univariate/discrete/categorical.jl @@ -24,23 +24,23 @@ External links: * [Categorical distribution on Wikipedia](http://en.wikipedia.org/wiki/Categorical_distribution) """ -const Categorical{P<:Real,Ps<:AbstractVector{P}} = - DiscreteNonParametric{Int,P,Base.OneTo{Int},Ps} +const Categorical{P <: Real, Ps <: AbstractVector{P}} = + DiscreteNonParametric{Int, P, Base.OneTo{Int}, Ps} -function Categorical{P,Ps}( - p::Ps; - check_args::Bool = true, -) where {P<:Real,Ps<:AbstractVector{P}} +function Categorical{P, Ps}( + p::Ps; + check_args::Bool = true, + ) where {P <: Real, Ps <: AbstractVector{P}} @check_args Categorical (p, isprobvec(p), "vector p is not a probability vector") - return Categorical{P,Ps}(Base.OneTo(length(p)), p; check_args = check_args) + return Categorical{P, Ps}(Base.OneTo(length(p)), p; check_args = check_args) end -Categorical(p::AbstractVector{P}; check_args::Bool = true) where {P<:Real} = - Categorical{P,typeof(p)}(p; check_args = check_args) +Categorical(p::AbstractVector{P}; check_args::Bool = true) where {P <: Real} = + Categorical{P, typeof(p)}(p; check_args = check_args) function Categorical(k::Integer; check_args::Bool = true) @check_args Categorical (k, k >= 1, "at least one category is required") - return Categorical{Float64,Vector{Float64}}( + return Categorical{Float64, Vector{Float64}}( Base.OneTo(k), fill(1 / k, k); check_args = false, @@ -53,15 +53,15 @@ Categorical(probabilities::Real...; check_args::Bool = true) = ### Conversions convert( - ::Type{Categorical{P,Ps}}, + ::Type{Categorical{P, Ps}}, x::AbstractVector{<:Real}, -) where {P<:Real,Ps<:AbstractVector{P}} = Categorical{P,Ps}(Ps(x)) +) where {P <: Real, Ps <: AbstractVector{P}} = Categorical{P, Ps}(Ps(x)) ### Parameters ncategories(d::Categorical) = support(d).stop -params(d::Categorical{P,Ps}) where {P<:Real,Ps<:AbstractVector{P}} = (probs(d),) -partype(::Categorical{T}) where {T<:Real} = T +params(d::Categorical{P, Ps}) where {P <: Real, Ps <: AbstractVector{P}} = (probs(d),) +partype(::Categorical{T}) where {T <: Real} = T function Base.isapprox(c1::Categorical, c2::Categorical; kwargs...) # support are of type Base.OneTo, so comparing the cardinality of the support @@ -69,12 +69,12 @@ function Base.isapprox(c1::Categorical, c2::Categorical; kwargs...) # we explicitly redefine the method for `DiscreteNonParametric` which also compares # the support since `isapprox(::OneTo, ::OneTo)` is broken on Julia 1.6 (issue #1675) return length(support(c1)) == length(support(c2)) && - isapprox(probs(c1), probs(c2); kwargs...) + isapprox(probs(c1), probs(c2); kwargs...) end ### Statistics -function median(d::Categorical{T}) where {T<:Real} +function median(d::Categorical{T}) where {T <: Real} k = ncategories(d) p = probs(d) cp = zero(T) @@ -83,7 +83,7 @@ function median(d::Categorical{T}) where {T<:Real} i += 1 @inbounds cp += p[i] end - i + return i end ### Evaluation @@ -100,23 +100,23 @@ function pdf(d::Categorical, x::Real) return insupport(d, x) ? ps[round(Int, x)] : zero(eltype(ps)) end -function _pdf!(r::AbstractArray{<:Real}, d::Categorical{T}, rgn::UnitRange) where {T<:Real} +function _pdf!(r::AbstractArray{<:Real}, d::Categorical{T}, rgn::UnitRange) where {T <: Real} vfirst = round(Int, first(rgn)) vlast = round(Int, last(rgn)) vl = max(vfirst, 1) vr = min(vlast, ncategories(d)) p = probs(d) if vl > vfirst - for i = 1:(vl-vfirst) + for i in 1:(vl - vfirst) r[i] = zero(T) end end fm1 = vfirst - 1 - for v = vl:vr - r[v-fm1] = p[v] + for v in vl:vr + r[v - fm1] = p[v] end if vr < vlast - for i = (vr-vfirst+2):length(rgn) + for i in (vr - vfirst + 2):length(rgn) r[i] = zero(T) end end @@ -126,7 +126,7 @@ end # sampling -sampler(d::Categorical{P,Ps}) where {P<:Real,Ps<:AbstractVector{P}} = AliasTable(probs(d)) +sampler(d::Categorical{P, Ps}) where {P <: Real, Ps <: AbstractVector{P}} = AliasTable(probs(d)) ### sufficient statistics @@ -135,45 +135,45 @@ struct CategoricalStats <: SufficientStats h::Vector{Float64} end -function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}) where {T<:Integer} - for i = 1:length(x) +function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}) where {T <: Integer} + for i in 1:length(x) @inbounds xi = x[i] h[xi] += 1.0 # cannot use @inbounds, as no guarantee that x[i] is in bound end - h + return h end function add_categorical_counts!( - h::Vector{Float64}, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Integer} + h::Vector{Float64}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Integer} n = length(x) if n != length(w) throw(DimensionMismatch("Inconsistent array lengths.")) end - for i = 1:n + for i in 1:n @inbounds xi = x[i] @inbounds wi = w[i] h[xi] += wi # cannot use @inbounds, as no guarantee that x[i] is in bound end - h + return h end -function suffstats(::Type{<:Categorical}, k::Int, x::AbstractArray{T}) where {T<:Integer} - CategoricalStats(add_categorical_counts!(zeros(k), x)) +function suffstats(::Type{<:Categorical}, k::Int, x::AbstractArray{T}) where {T <: Integer} + return CategoricalStats(add_categorical_counts!(zeros(k), x)) end function suffstats( - ::Type{<:Categorical}, - k::Int, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Integer} - CategoricalStats(add_categorical_counts!(zeros(k), x, w)) + ::Type{<:Categorical}, + k::Int, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Integer} + return CategoricalStats(add_categorical_counts!(zeros(k), x, w)) end -const CategoricalData = Tuple{Int,AbstractArray} +const CategoricalData = Tuple{Int, AbstractArray} suffstats(::Type{<:Categorical}, data::CategoricalData) = suffstats(Categorical, data...) suffstats(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = @@ -182,33 +182,33 @@ suffstats(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64 # Model fitting function fit_mle(::Type{<:Categorical}, ss::CategoricalStats) - Categorical(normalize!(ss.h, 1)) + return Categorical(normalize!(ss.h, 1)) end -function fit_mle(::Type{<:Categorical}, k::Integer, x::AbstractArray{T}) where {T<:Integer} - Categorical(normalize!(add_categorical_counts!(zeros(k), x), 1), check_args = false) +function fit_mle(::Type{<:Categorical}, k::Integer, x::AbstractArray{T}) where {T <: Integer} + return Categorical(normalize!(add_categorical_counts!(zeros(k), x), 1), check_args = false) end function fit_mle( - ::Type{<:Categorical}, - k::Integer, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Integer} - Categorical(normalize!(add_categorical_counts!(zeros(k), x, w), 1), check_args = false) + ::Type{<:Categorical}, + k::Integer, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Integer} + return Categorical(normalize!(add_categorical_counts!(zeros(k), x, w), 1), check_args = false) end fit_mle(::Type{<:Categorical}, data::CategoricalData) = fit_mle(Categorical, data...) fit_mle(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = fit_mle(Categorical, data..., w) -fit_mle(::Type{<:Categorical}, x::AbstractArray{T}) where {T<:Integer} = +fit_mle(::Type{<:Categorical}, x::AbstractArray{T}) where {T <: Integer} = fit_mle(Categorical, maximum(x), x) fit_mle( ::Type{<:Categorical}, x::AbstractArray{T}, w::AbstractArray{Float64}, -) where {T<:Integer} = fit_mle(Categorical, maximum(x), x, w) +) where {T <: Integer} = fit_mle(Categorical, maximum(x), x, w) fit(::Type{<:Categorical}, data::CategoricalData) = fit_mle(Categorical, data) fit(::Type{<:Categorical}, data::CategoricalData, w::AbstractArray{Float64}) = diff --git a/src/univariate/discrete/discretenonparametric.jl b/src/univariate/discrete/discretenonparametric.jl index dcd8ee7ca1..e0496ebaf0 100644 --- a/src/univariate/discrete/discretenonparametric.jl +++ b/src/univariate/discrete/discretenonparametric.jl @@ -17,17 +17,17 @@ External links * [Probability mass function on Wikipedia](http://en.wikipedia.org/wiki/Probability_mass_function) """ -struct DiscreteNonParametric{T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} <: - DiscreteUnivariateDistribution +struct DiscreteNonParametric{T <: Real, P <: Real, Ts <: AbstractVector{T}, Ps <: AbstractVector{P}} <: + DiscreteUnivariateDistribution support::Ts p::Ps - function DiscreteNonParametric{T,P,Ts,Ps}( - xs::Ts, - ps::Ps; - check_args::Bool = true, - ) where {T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractVector{P}} - check_args || return new{T,P,Ts,Ps}(xs, ps) + function DiscreteNonParametric{T, P, Ts, Ps}( + xs::Ts, + ps::Ps; + check_args::Bool = true, + ) where {T <: Real, P <: Real, Ts <: AbstractVector{T}, Ps <: AbstractVector{P}} + check_args || return new{T, P, Ts, Ps}(xs, ps) @check_args( DiscreteNonParametric, ( @@ -38,7 +38,7 @@ struct DiscreteNonParametric{T<:Real,P<:Real,Ts<:AbstractVector{T},Ps<:AbstractV (xs, allunique(xs), "support must contain only unique elements"), ) sort_order = sortperm(xs) - new{T,P,Ts,Ps}(xs[sort_order], ps[sort_order]) + return new{T, P, Ts, Ps}(xs[sort_order], ps[sort_order]) end end @@ -46,24 +46,24 @@ DiscreteNonParametric( vs::AbstractVector{T}, ps::AbstractVector{P}; check_args::Bool = true, -) where {T<:Real,P<:Real} = - DiscreteNonParametric{T,P,typeof(vs),typeof(ps)}(vs, ps; check_args = check_args) +) where {T <: Real, P <: Real} = + DiscreteNonParametric{T, P, typeof(vs), typeof(ps)}(vs, ps; check_args = check_args) Base.eltype(::Type{<:DiscreteNonParametric{T}}) where {T} = T # Conversion convert( - ::Type{DiscreteNonParametric{T,P,Ts,Ps}}, + ::Type{DiscreteNonParametric{T, P, Ts, Ps}}, d::DiscreteNonParametric, -) where {T,P,Ts,Ps} = DiscreteNonParametric{T,P,Ts,Ps}( +) where {T, P, Ts, Ps} = DiscreteNonParametric{T, P, Ts, Ps}( convert(Ts, support(d)), convert(Ps, probs(d)), check_args = false, ) Base.convert( - ::Type{DiscreteNonParametric{T,P,Ts,Ps}}, - d::DiscreteNonParametric{T,P,Ts,Ps}, -) where {T,P,Ts,Ps} = d + ::Type{DiscreteNonParametric{T, P, Ts, Ps}}, + d::DiscreteNonParametric{T, P, Ts, Ps}, +) where {T, P, Ts, Ps} = d # Accessors params(d::DiscreteNonParametric) = (d.support, d.p) @@ -86,8 +86,8 @@ function Base.isapprox(c1::DiscreteNonParametric, c2::DiscreteNonParametric; kwa support_c1 = support(c1) support_c2 = support(c2) return length(support_c1) == length(support_c2) && - isapprox(support_c1, support_c2; kwargs...) && - isapprox(probs(c1), probs(c2); kwargs...) + isapprox(support_c1, support_c2; kwargs...) && + isapprox(probs(c1), probs(c2); kwargs...) end # Sampling @@ -100,7 +100,7 @@ function rand(rng::AbstractRNG, d::DiscreteNonParametric) cp = p[1] i = 1 while cp <= draw && i < n - @inbounds cp += p[i+=1] + @inbounds cp += p[i += 1] end return x[i] end @@ -140,11 +140,11 @@ function cdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i = 1:stop_idx + @inbounds for i in 1:stop_idx s += ps[i] end else - @inbounds for i = (stop_idx+1):n + @inbounds for i in (stop_idx + 1):n s += ps[i] end s = 1 - s @@ -166,12 +166,12 @@ function ccdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i = 1:stop_idx + @inbounds for i in 1:stop_idx s += ps[i] end s = 1 - s else - @inbounds for i = (stop_idx+1):n + @inbounds for i in (stop_idx + 1):n s += ps[i] end end @@ -190,7 +190,7 @@ function quantile(d::DiscreteNonParametric, q::Real) i += 1 @inbounds cp += p[i] end - x[i] + return x[i] end minimum(d::DiscreteNonParametric) = first(support(d)) @@ -235,35 +235,35 @@ function mgf(d::DiscreteNonParametric, t::Real) x = support(d) p = probs(d) s = zero(Float64) - for i = 1:length(x) + for i in 1:length(x) s += p[i] * exp(t * x[i]) end - s + return s end function cf(d::DiscreteNonParametric, t::Real) x = support(d) p = probs(d) s = zero(Complex{Float64}) - for i = 1:length(x) + for i in 1:length(x) s += p[i] * cis(t * x[i]) end - s + return s end # Sufficient statistics struct DiscreteNonParametricStats{ - T<:Real, - W<:Real, - Ts<:AbstractVector{T}, - Ws<:AbstractVector{W}, -} <: SufficientStats + T <: Real, + W <: Real, + Ts <: AbstractVector{T}, + Ws <: AbstractVector{W}, + } <: SufficientStats support::Ts freq::Ws end -function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where {T<:Real} +function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where {T <: Real} N = length(x) N == 0 && return DiscreteNonParametricStats(T[], Float64[]) @@ -277,7 +277,7 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { ps[1] += 1.0 xprev = x[1] - @inbounds for i = 2:N + @inbounds for i in 2:N xi = x[i] if xi != xprev n += 1 @@ -289,16 +289,16 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { resize!(vs, n) resize!(ps, n) - DiscreteNonParametricStats(vs, ps) + return DiscreteNonParametricStats(vs, ps) end function suffstats( - ::Type{<:DiscreteNonParametric}, - x::AbstractArray{T}, - w::AbstractArray{W}; - check_args::Bool = true, -) where {T<:Real,W<:Real} + ::Type{<:DiscreteNonParametric}, + x::AbstractArray{T}, + w::AbstractArray{W}; + check_args::Bool = true, + ) where {T <: Real, W <: Real} @check_args DiscreteNonParametric (length(x) == length(w)) @@ -317,7 +317,7 @@ function suffstats( ps[1] += w[1] xprev = x[1] - @inbounds for i = 2:N + @inbounds for i in 2:N xi = x[i] wi = w[i] if xi != xprev @@ -330,7 +330,7 @@ function suffstats( resize!(vs, n) resize!(ps, n) - DiscreteNonParametricStats(vs, ps) + return DiscreteNonParametricStats(vs, ps) end @@ -338,8 +338,8 @@ end fit_mle( ::Type{<:DiscreteNonParametric}, - ss::DiscreteNonParametricStats{T,W,Ts,Ws}, -) where {T,W,Ts,Ws} = DiscreteNonParametric{T,W,Ts,Ws}( + ss::DiscreteNonParametricStats{T, W, Ts, Ws}, +) where {T, W, Ts, Ws} = DiscreteNonParametric{T, W, Ts, Ws}( ss.support, normalize!(copy(ss.freq), 1), check_args = false, diff --git a/src/univariate/discrete/discreteuniform.jl b/src/univariate/discrete/discreteuniform.jl index 98414c2a30..02d4543427 100644 --- a/src/univariate/discrete/discreteuniform.jl +++ b/src/univariate/discrete/discreteuniform.jl @@ -28,7 +28,7 @@ struct DiscreteUniform <: DiscreteUnivariateDistribution function DiscreteUniform(a::Real, b::Real; check_args::Bool = true) @check_args DiscreteUniform (a <= b) - new(a, b, 1 / (b - a + 1)) + return new(a, b, 1 / (b - a + 1)) end DiscreteUniform(b::Real; check_args::Bool = true) = DiscreteUniform(0, b; check_args = check_args) @@ -62,7 +62,7 @@ skewness(d::DiscreteUniform) = 0.0 function kurtosis(d::DiscreteUniform) n2 = span(d)^2 - -1.2 * (n2 + 1.0) / (n2 - 1.0) + return -1.2 * (n2 + 1.0) / (n2 - 1.0) end entropy(d::DiscreteUniform) = log(span(d)) diff --git a/src/univariate/discrete/geometric.jl b/src/univariate/discrete/geometric.jl index e36db7e810..9689256e22 100644 --- a/src/univariate/discrete/geometric.jl +++ b/src/univariate/discrete/geometric.jl @@ -21,11 +21,11 @@ External links * [Geometric distribution on Wikipedia](http://en.wikipedia.org/wiki/Geometric_distribution) """ -struct Geometric{T<:Real} <: DiscreteUnivariateDistribution +struct Geometric{T <: Real} <: DiscreteUnivariateDistribution p::T - function Geometric{T}(p::T) where {T<:Real} - new{T}(p) + function Geometric{T}(p::T) where {T <: Real} + return new{T}(p) end end @@ -39,16 +39,16 @@ Geometric() = Geometric{Float64}(0.5) @distr_support Geometric 0 Inf ### Conversions -convert(::Type{Geometric{T}}, p::Real) where {T<:Real} = Geometric(T(p)) -Base.convert(::Type{Geometric{T}}, d::Geometric) where {T<:Real} = Geometric{T}(T(d.p)) -Base.convert(::Type{Geometric{T}}, d::Geometric{T}) where {T<:Real} = d +convert(::Type{Geometric{T}}, p::Real) where {T <: Real} = Geometric(T(p)) +Base.convert(::Type{Geometric{T}}, d::Geometric) where {T <: Real} = Geometric{T}(T(d.p)) +Base.convert(::Type{Geometric{T}}, d::Geometric{T}) where {T <: Real} = d ### Parameters succprob(d::Geometric) = d.p failprob(d::Geometric) = 1 - d.p params(d::Geometric) = (d.p,) -partype(::Geometric{T}) where {T<:Real} = T +partype(::Geometric{T}) where {T <: Real} = T ### Statistics @@ -57,7 +57,7 @@ mean(d::Geometric) = failprob(d) / succprob(d) median(d::Geometric) = -fld(logtwo, log1p(-d.p)) - 1 -mode(d::Geometric{T}) where {T<:Real} = zero(T) +mode(d::Geometric{T}) where {T <: Real} = zero(T) var(d::Geometric) = (1 - d.p) / abs2(d.p) @@ -83,20 +83,20 @@ end ### Evaluations function logpdf(d::Geometric, x::Real) - insupport(d, x) ? log(d.p) + log1p(-d.p) * x : log(zero(d.p)) + return insupport(d, x) ? log(d.p) + log1p(-d.p) * x : log(zero(d.p)) end function cdf(d::Geometric, x::Int) p = succprob(d) n = max(x + 1, 0) - p < 1 / 2 ? -expm1(log1p(-p) * n) : 1 - (1 - p)^n + return p < 1 / 2 ? -expm1(log1p(-p) * n) : 1 - (1 - p)^n end ccdf(d::Geometric, x::Real) = ccdf_int(d, x) function ccdf(d::Geometric, x::Int) p = succprob(d) n = max(x + 1, 0) - p < 1 / 2 ? exp(log1p(-p) * n) : (1 - p)^n + return p < 1 / 2 ? exp(log1p(-p) * n) : (1 - p)^n end logcdf(d::Geometric, x::Real) = logcdf_int(d, x) @@ -111,7 +111,7 @@ cquantile(d::Geometric, p::Real) = invlogccdf(d, log(p)) invlogcdf(d::Geometric, lp::Real) = invlogccdf(d, log1mexp(lp)) -function invlogccdf(d::Geometric{T}, lp::Real) where {T<:Real} +function invlogccdf(d::Geometric{T}, lp::Real) where {T <: Real} if (lp > zero(d.p)) || isnan(lp) return T(NaN) elseif isinf(lp) @@ -119,18 +119,18 @@ function invlogccdf(d::Geometric{T}, lp::Real) where {T<:Real} elseif lp == zero(d.p) return zero(T) end - max(ceil(lp / log1p(-d.p)) - 1, zero(T)) + return max(ceil(lp / log1p(-d.p)) - 1, zero(T)) end function laplace_transform(d::Geometric, t) p = succprob(d) - p / (p - (1 - p) * expm1(-t)) + return p / (p - (1 - p) * expm1(-t)) end mgf(d::Geometric, t::Real) = laplace_transform(d, -t) function cgf(d::Geometric, t) p = succprob(d) # log(p / (1 - (1-p) * exp(t))) - log(p) - log1mexp(t + log1p(-p)) + return log(p) - log1mexp(t + log1p(-p)) end cf(d::Geometric, t::Real) = laplace_transform(d, -t * im) @@ -138,7 +138,7 @@ cf(d::Geometric, t::Real) = laplace_transform(d, -t * im) # Inlining is required to hoist the d.p == 1//2 check when generating in bulk @inline function rand(rng::AbstractRNG, d::Geometric) - if d.p == 1 // 2 + return if d.p == 1 // 2 leading_zeros(rand(rng, UInt)) # This branch is a performance optimization else floor(Int, -randexp(rng) / log1p(-d.p)) @@ -154,26 +154,26 @@ struct GeometricStats <: SufficientStats GeometricStats(sx::Real, tw::Real) = new(sx, tw) end -suffstats(::Type{<:Geometric}, x::AbstractArray{T}) where {T<:Integer} = +suffstats(::Type{<:Geometric}, x::AbstractArray{T}) where {T <: Integer} = GeometricStats(sum(x), length(x)) function suffstats( - ::Type{<:Geometric}, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Integer} + ::Type{<:Geometric}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Integer} n = length(x) if length(w) != n throw(DimensionMismatch("Inconsistent argument dimensions.")) end sx = 0.0 tw = 0.0 - for i = 1:n + for i in 1:n wi = w[i] sx += wi * x[i] tw += wi end - GeometricStats(sx, tw) + return GeometricStats(sx, tw) end fit_mle(::Type{<:Geometric}, ss::GeometricStats) = Geometric(1 / (ss.sx / ss.tw + 1)) diff --git a/src/univariate/discrete/hypergeometric.jl b/src/univariate/discrete/hypergeometric.jl index 55959a9b36..73358a5fe6 100644 --- a/src/univariate/discrete/hypergeometric.jl +++ b/src/univariate/discrete/hypergeometric.jl @@ -31,7 +31,7 @@ struct Hypergeometric <: DiscreteUnivariateDistribution (nf, nf >= zero(nf)), zero(n) <= n <= ns + nf, ) - new(ns, nf, n) + return new(ns, nf, n) end end @@ -52,12 +52,12 @@ mean(d::Hypergeometric) = d.n * d.ns / (d.ns + d.nf) function var(d::Hypergeometric) N = d.ns + d.nf p = d.ns / N - d.n * p * (1.0 - p) * (N - d.n) / (N - 1.0) + return d.n * p * (1.0 - p) * (N - d.n) / (N - 1.0) end mode(d::Hypergeometric) = floor(Int, (d.n + 1) * (d.ns + 1) / (d.ns + d.nf + 2)) function modes(d::Hypergeometric) - if (d.ns == d.nf) && mod(d.n, 2) == 1 + return if (d.ns == d.nf) && mod(d.n, 2) == 1 [(d.n - 1) / 2, (d.n + 1) / 2] else [mode(d)] @@ -77,7 +77,7 @@ function kurtosis(d::Hypergeometric) (N - 1) * N^2 * (N * (N + 1) - 6 * ns * (N - ns) - 6 * n * (N - n)) + 6 * n * ns * (nf) * (N - n) * (5 * N - 6) b = (n * ns * (N - ns) * (N - n) * (N - 2) * (N - 3)) - a / b + return a / b end entropy(d::Hypergeometric) = entropy(map(Base.Fix1(pdf, d), support(d))) diff --git a/src/univariate/discrete/negativebinomial.jl b/src/univariate/discrete/negativebinomial.jl index c957ff6785..557931b56d 100644 --- a/src/univariate/discrete/negativebinomial.jl +++ b/src/univariate/discrete/negativebinomial.jl @@ -26,16 +26,16 @@ External links: * [Negative binomial distribution on Wolfram](https://reference.wolfram.com/language/ref/NegativeBinomialDistribution.html) """ -struct NegativeBinomial{T<:Real} <: DiscreteUnivariateDistribution +struct NegativeBinomial{T <: Real} <: DiscreteUnivariateDistribution r::T p::T - function NegativeBinomial{T}(r::T, p::T) where {T<:Real} + function NegativeBinomial{T}(r::T, p::T) where {T <: Real} return new{T}(r, p) end end -function NegativeBinomial(r::T, p::T; check_args::Bool = true) where {T<:Real} +function NegativeBinomial(r::T, p::T; check_args::Bool = true) where {T <: Real} @check_args NegativeBinomial (r, r > zero(r)) (p, zero(p) < p <= one(p)) return NegativeBinomial{T}(r, p) end @@ -52,13 +52,13 @@ NegativeBinomial() = NegativeBinomial{Float64}(1.0, 0.5) #### Conversions -function convert(::Type{NegativeBinomial{T}}, r::Real, p::Real) where {T<:Real} +function convert(::Type{NegativeBinomial{T}}, r::Real, p::Real) where {T <: Real} return NegativeBinomial(T(r), T(p)) end -function Base.convert(::Type{NegativeBinomial{T}}, d::NegativeBinomial) where {T<:Real} +function Base.convert(::Type{NegativeBinomial{T}}, d::NegativeBinomial) where {T <: Real} return NegativeBinomial{T}(T(d.r), T(d.p)) end -Base.convert(::Type{NegativeBinomial{T}}, d::NegativeBinomial{T}) where {T<:Real} = d +Base.convert(::Type{NegativeBinomial{T}}, d::NegativeBinomial{T}) where {T <: Real} = d #### Parameters @@ -94,7 +94,7 @@ function kldivergence(p::NegativeBinomial, q::NegativeBinomial; kwargs...) # this case. Hence we fall back to the numerical approximation. return invoke( kldivergence, - Tuple{UnivariateDistribution{Discrete},UnivariateDistribution{Discrete}}, + Tuple{UnivariateDistribution{Discrete}, UnivariateDistribution{Discrete}}, p, q; kwargs..., @@ -146,6 +146,6 @@ end mgf(d::NegativeBinomial, t::Real) = laplace_transform(d, -t) function cgf(d::NegativeBinomial, t) r, p = params(d) - r * cgf(Geometric{typeof(p)}(p), t) + return r * cgf(Geometric{typeof(p)}(p), t) end cf(d::NegativeBinomial, t::Real) = laplace_transform(d, -t * im) diff --git a/src/univariate/discrete/noncentralhypergeometric.jl b/src/univariate/discrete/noncentralhypergeometric.jl index 10c92a16c1..543d11e15a 100644 --- a/src/univariate/discrete/noncentralhypergeometric.jl +++ b/src/univariate/discrete/noncentralhypergeometric.jl @@ -1,7 +1,7 @@ # Noncentral hypergeometric distribution # TODO: this distribution needs clean-up and testing -abstract type NoncentralHypergeometric{T<:Real} <: DiscreteUnivariateDistribution end +abstract type NoncentralHypergeometric{T <: Real} <: DiscreteUnivariateDistribution end ### handling support @@ -9,8 +9,8 @@ abstract type NoncentralHypergeometric{T<:Real} <: DiscreteUnivariateDistributio # Functions -function quantile(d::NoncentralHypergeometric{T}, q::Real) where {T<:Real} - if !(zero(q) <= q <= one(q)) +function quantile(d::NoncentralHypergeometric{T}, q::Real) where {T <: Real} + return if !(zero(q) <= q <= one(q)) T(NaN) else range = support(d) @@ -29,23 +29,23 @@ function quantile(d::NoncentralHypergeometric{T}, q::Real) where {T<:Real} end params(d::NoncentralHypergeometric) = (d.ns, d.nf, d.n, d.ω) -@inline partype(d::NoncentralHypergeometric{T}) where {T<:Real} = T +@inline partype(d::NoncentralHypergeometric{T}) where {T <: Real} = T ## Fisher's noncentral hypergeometric distribution -struct FisherNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} +struct FisherNoncentralHypergeometric{T <: Real} <: NoncentralHypergeometric{T} ns::Int # number of successes in population nf::Int # number of failures in population n::Int # sample size ω::T # odds ratio function FisherNoncentralHypergeometric{T}( - ns::Real, - nf::Real, - n::Real, - ω::T; - check_args::Bool = true, - ) where {T} + ns::Real, + nf::Real, + n::Real, + ω::T; + check_args::Bool = true, + ) where {T} @check_args( FisherNoncentralHypergeometric, (ns, ns >= zero(ns)), @@ -53,7 +53,7 @@ struct FisherNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} zero(n) < n < ns + nf, (ω, ω > zero(ω)), ) - new{T}(ns, nf, n, ω) + return new{T}(ns, nf, n, ω) end end @@ -80,17 +80,17 @@ convert( nf::Real, n::Real, ω::Real, -) where {T<:Real} = FisherNoncentralHypergeometric(ns, nf, n, T(ω)) +) where {T <: Real} = FisherNoncentralHypergeometric(ns, nf, n, T(ω)) function Base.convert( - ::Type{FisherNoncentralHypergeometric{T}}, - d::FisherNoncentralHypergeometric, -) where {T<:Real} - FisherNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) + ::Type{FisherNoncentralHypergeometric{T}}, + d::FisherNoncentralHypergeometric, + ) where {T <: Real} + return FisherNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) end Base.convert( ::Type{FisherNoncentralHypergeometric{T}}, d::FisherNoncentralHypergeometric{T}, -) where {T<:Real} = d +) where {T <: Real} = d function _mode(d::FisherNoncentralHypergeometric) A = d.ω - 1 @@ -121,7 +121,7 @@ function pdf(d::FisherNoncentralHypergeometric, k::Integer) s = one(ω) fᵢ = one(ω) fₖ = one(ω) - for i = (η+1):u + for i in (η + 1):u rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ @@ -137,7 +137,7 @@ function pdf(d::FisherNoncentralHypergeometric, k::Integer) end end fᵢ = one(ω) - for i = (η-1):-1:l + for i in (η - 1):-1:l rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ @@ -171,7 +171,7 @@ function cdf(d::FisherNoncentralHypergeometric, k::Integer) s = one(ω) fᵢ = one(ω) Fₖ = k >= η ? one(ω) : zero(ω) - for i = (η+1):u + for i in (η + 1):u rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ @@ -186,7 +186,7 @@ function cdf(d::FisherNoncentralHypergeometric, k::Integer) end end fᵢ = one(ω) - for i = (η-1):-1:l + for i in (η - 1):-1:l rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ @@ -212,7 +212,7 @@ function _expectation(f, d::FisherNoncentralHypergeometric) s = one(ω) m = f(η) * s fᵢ = one(ω) - for i = (η+1):u + for i in (η + 1):u rᵢ = (d.ns - i + 1) * ω / (i * (d.nf - d.n + i)) * (d.n - i + 1) fᵢ *= rᵢ @@ -226,7 +226,7 @@ function _expectation(f, d::FisherNoncentralHypergeometric) m += f(i) * fᵢ end fᵢ = one(ω) - for i = (η-1):-1:l + for i in (η - 1):-1:l rᵢ₊ = (d.ns - i) * ω / ((i + 1) * (d.nf - d.n + i + 1)) * (d.n - i) fᵢ /= rᵢ₊ @@ -252,19 +252,19 @@ end ## Wallenius' noncentral hypergeometric distribution -struct WalleniusNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} +struct WalleniusNoncentralHypergeometric{T <: Real} <: NoncentralHypergeometric{T} ns::Int # number of successes in population nf::Int # number of failures in population n::Int # sample size ω::T # odds ratio function WalleniusNoncentralHypergeometric{T}( - ns::Real, - nf::Real, - n::Real, - ω::T; - check_args::Bool = true, - ) where {T} + ns::Real, + nf::Real, + n::Real, + ω::T; + check_args::Bool = true, + ) where {T} @check_args( WalleniusNoncentralHypergeometric, (ns, ns >= zero(ns)), @@ -272,7 +272,7 @@ struct WalleniusNoncentralHypergeometric{T<:Real} <: NoncentralHypergeometric{T} zero(n) < n < ns + nf, (ω, ω > zero(ω)), ) - new{T}(ns, nf, n, ω) + return new{T}(ns, nf, n, ω) end end @@ -299,17 +299,17 @@ convert( nf::Real, n::Real, ω::Real, -) where {T<:Real} = WalleniusNoncentralHypergeometric(ns, nf, n, T(ω)) +) where {T <: Real} = WalleniusNoncentralHypergeometric(ns, nf, n, T(ω)) function Base.convert( - ::Type{WalleniusNoncentralHypergeometric{T}}, - d::WalleniusNoncentralHypergeometric, -) where {T<:Real} - WalleniusNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) + ::Type{WalleniusNoncentralHypergeometric{T}}, + d::WalleniusNoncentralHypergeometric, + ) where {T <: Real} + return WalleniusNoncentralHypergeometric{T}(d.ns, d.nf, d.n, T(d.ω)) end Base.convert( ::Type{WalleniusNoncentralHypergeometric{T}}, d::WalleniusNoncentralHypergeometric{T}, -) where {T<:Real} = d +) where {T <: Real} = d # Properties function _discretenonparametric(d::WalleniusNoncentralHypergeometric) @@ -329,7 +329,7 @@ function logpdf(d::WalleniusNoncentralHypergeometric, k::Real) f(t) = (1 - t^(d.ω / D))^k * (1 - t^(1 / D))^(d.n - k) I, _ = quadgk(f, 0, 1) return -log(d.ns + 1) - logbeta(d.ns - k + 1, k + 1) - log(d.nf + 1) - - logbeta(d.nf - d.n + k + 1, d.n - k + 1) + log(I) + logbeta(d.nf - d.n + k + 1, d.n - k + 1) + log(I) else return log(zero(k)) end diff --git a/src/univariate/discrete/poisson.jl b/src/univariate/discrete/poisson.jl index 3f1ddd83b9..a2e285502f 100644 --- a/src/univariate/discrete/poisson.jl +++ b/src/univariate/discrete/poisson.jl @@ -20,10 +20,10 @@ External links: * [Poisson distribution on Wikipedia](http://en.wikipedia.org/wiki/Poisson_distribution) """ -struct Poisson{T<:Real} <: DiscreteUnivariateDistribution +struct Poisson{T <: Real} <: DiscreteUnivariateDistribution λ::T - Poisson{T}(λ::Real) where {T<:Real} = new{T}(λ) + Poisson{T}(λ::Real) where {T <: Real} = new{T}(λ) end function Poisson(λ::Real; check_args::Bool = true) @@ -37,9 +37,9 @@ Poisson() = Poisson{Float64}(1.0) @distr_support Poisson 0 (d.λ == zero(typeof(d.λ)) ? 0 : Inf) #### Conversions -convert(::Type{Poisson{T}}, λ::S) where {T<:Real,S<:Real} = Poisson(T(λ)) -Base.convert(::Type{Poisson{T}}, d::Poisson) where {T<:Real} = Poisson{T}(T(d.λ)) -Base.convert(::Type{Poisson{T}}, d::Poisson{T}) where {T<:Real} = d +convert(::Type{Poisson{T}}, λ::S) where {T <: Real, S <: Real} = Poisson(T(λ)) +Base.convert(::Type{Poisson{T}}, d::Poisson) where {T <: Real} = Poisson{T}(T(d.λ)) +Base.convert(::Type{Poisson{T}}, d::Poisson{T}) where {T <: Real} = d ### Parameters @@ -56,7 +56,7 @@ mode(d::Poisson) = floor(Int, d.λ) function modes(d::Poisson) λ = d.λ - isinteger(λ) ? [round(Int, λ) - 1, round(Int, λ)] : [floor(Int, λ)] + return isinteger(λ) ? [round(Int, λ) - 1, round(Int, λ)] : [floor(Int, λ)] end var(d::Poisson) = d.λ @@ -65,21 +65,21 @@ skewness(d::Poisson) = one(typeof(d.λ)) / sqrt(d.λ) kurtosis(d::Poisson) = one(typeof(d.λ)) / d.λ -function entropy(d::Poisson{T}) where {T<:Real} +function entropy(d::Poisson{T}) where {T <: Real} λ = rate(d) if λ == zero(T) return zero(T) elseif λ < 50 s = zero(T) λk = one(T) - for k = 1:100 + for k in 1:100 λk *= λ s += λk * loggamma(k + 1) / gamma(k + 1) end return λ * (1 - log(λ)) + exp(-λ) * s else return log(2 * pi * ℯ * λ) / 2 - (1 / (12 * λ)) - (1 / (24 * λ * λ)) - - (19 / (360 * λ * λ * λ)) + (19 / (360 * λ * λ * λ)) end end @@ -114,14 +114,14 @@ struct PoissonStats <: SufficientStats tw::Float64 # total sample weight end -suffstats(::Type{<:Poisson}, x::AbstractArray{T}) where {T<:Integer} = +suffstats(::Type{<:Poisson}, x::AbstractArray{T}) where {T <: Integer} = PoissonStats(sum(x), length(x)) function suffstats( - ::Type{<:Poisson}, - x::AbstractArray{T}, - w::AbstractArray{Float64}, -) where {T<:Integer} + ::Type{<:Poisson}, + x::AbstractArray{T}, + w::AbstractArray{Float64}, + ) where {T <: Integer} n = length(x) n == length(w) || throw(DimensionMismatch("Inconsistent array lengths.")) sx = 0.0 @@ -131,7 +131,7 @@ function suffstats( @inbounds sx += x[i] * wi tw += wi end - PoissonStats(sx, tw) + return PoissonStats(sx, tw) end fit_mle(::Type{<:Poisson}, ss::PoissonStats) = Poisson(ss.sx / ss.tw) diff --git a/src/univariate/discrete/poissonbinomial.jl b/src/univariate/discrete/poissonbinomial.jl index 259a46a57e..58f1364945 100644 --- a/src/univariate/discrete/poissonbinomial.jl +++ b/src/univariate/discrete/poissonbinomial.jl @@ -23,15 +23,15 @@ External links: * [Poisson-binomial distribution on Wikipedia](http://en.wikipedia.org/wiki/Poisson_binomial_distribution) """ -mutable struct PoissonBinomial{T<:Real,P<:AbstractVector{T}} <: - DiscreteUnivariateDistribution +mutable struct PoissonBinomial{T <: Real, P <: AbstractVector{T}} <: + DiscreteUnivariateDistribution p::P - pmf::Union{Nothing,Vector{T}} # lazy computation of the probability mass function + pmf::Union{Nothing, Vector{T}} # lazy computation of the probability mass function function PoissonBinomial{T}( - p::AbstractVector{T}; - check_args::Bool = true, - ) where {T<:Real} + p::AbstractVector{T}; + check_args::Bool = true, + ) where {T <: Real} @check_args( PoissonBinomial, ( @@ -40,11 +40,11 @@ mutable struct PoissonBinomial{T<:Real,P<:AbstractVector{T}} <: "p must be a vector of success probabilities", ), ) - return new{T,typeof(p)}(p, nothing) + return new{T, typeof(p)}(p, nothing) end end -function PoissonBinomial(p::AbstractVector{T}; check_args::Bool = true) where {T<:Real} +function PoissonBinomial(p::AbstractVector{T}; check_args::Bool = true) where {T <: Real} return PoissonBinomial{T}(p; check_args = check_args) end @@ -68,10 +68,10 @@ end #### Conversions -function PoissonBinomial(::Type{PoissonBinomial{T}}, p::AbstractVector{S}) where {T,S} +function PoissonBinomial(::Type{PoissonBinomial{T}}, p::AbstractVector{S}) where {T, S} return PoissonBinomial(AbstractVector{T}(p)) end -function PoissonBinomial(::Type{PoissonBinomial{T}}, d::PoissonBinomial{S}) where {T,S} +function PoissonBinomial(::Type{PoissonBinomial{T}}, d::PoissonBinomial{S}) where {T, S} return PoissonBinomial(AbstractVector{T}(d.p), check_args = false) end @@ -108,7 +108,7 @@ function kurtosis(d::PoissonBinomial{T}) where {T} v += p[i] * (one(T) - p[i]) s += p[i] * (one(T) - p[i]) * (one(T) - T(6) * (one(T) - p[i]) * p[i]) end - s / v / v + return s / v / v end entropy(d::PoissonBinomial) = entropy(d.pmf) @@ -122,14 +122,14 @@ quantile(d::PoissonBinomial, x::Float64) = quantile(Categorical(d.pmf), x) - 1 function mgf(d::PoissonBinomial, t::Real) expm1_t = expm1(t) - mapreduce(*, succprob(d)) do p + return mapreduce(*, succprob(d)) do p 1 + p * expm1_t end end function cf(d::PoissonBinomial, t::Real) cis_t = cis(t) - mapreduce(*, succprob(d)) do p + return mapreduce(*, succprob(d)) do p 1 - p + p * cis_t end end @@ -159,8 +159,8 @@ function poissonbinomial_pdf(p) S[1] = 1 @inbounds for (col, p_col) in enumerate(p) q_col = 1 - p_col - for row = col:(-1):1 - S[row+1] = q_col * S[row+1] + p_col * S[row] + for row in col:(-1):1 + S[row + 1] = q_col * S[row + 1] + p_col * S[row] end S[1] *= q_col end @@ -174,28 +174,28 @@ end # On computing the distribution function for the Poisson binomial # distribution. Computational Statistics and Data Analysis, 59, 41–51. # -function poissonbinomial_pdf_fft(p::AbstractArray{T}) where {T<:Real} +function poissonbinomial_pdf_fft(p::AbstractArray{T}) where {T <: Real} n = length(p) ω = 2 * one(T) / (n + 1) x = Vector{Complex{T}}(undef, n + 1) lmax = ceil(Int, n / 2) x[1] = one(T) / (n + 1) - for l = 1:lmax + for l in 1:lmax logz = zero(T) argz = zero(T) - for j = 1:n + for j in 1:n zjl = 1 - p[j] + p[j] * cospi(ω * l) + im * p[j] * sinpi(ω * l) logz += log(abs(zjl)) argz += atan(imag(zjl), real(zjl)) end dl = exp(logz) - x[l+1] = dl * cos(argz) / (n + 1) + dl * sin(argz) * im / (n + 1) + x[l + 1] = dl * cos(argz) / (n + 1) + dl * sin(argz) * im / (n + 1) if n + 1 - l > l - x[n+1-l+1] = conj(x[l+1]) + x[n + 1 - l + 1] = conj(x[l + 1]) end end - [max(0, real(xi)) for xi in _dft(x)] + return [max(0, real(xi)) for xi in _dft(x)] end # A simple implementation of a DFT to avoid introducing a dependency @@ -203,8 +203,8 @@ end function _dft(x::Vector{T}) where {T} n = length(x) y = zeros(complex(float(T)), n) - @inbounds for j = 0:(n-1), k = 0:(n-1) - y[k+1] += x[j+1] * cis(-π * float(T)(2 * mod(j * k, n)) / n) + @inbounds for j in 0:(n - 1), k in 0:(n - 1) + y[k + 1] += x[j + 1] * cis(-π * float(T)(2 * mod(j * k, n)) / n) end return y end @@ -226,29 +226,29 @@ sampler(d::PoissonBinomial) = PoissBinAliasSampler(d) function poissonbinomial_pdf_partialderivatives(p::AbstractVector{<:Real}) n = length(p) A = zeros(eltype(p), n, n + 1) - @inbounds for j = 1:n + @inbounds for j in 1:n A[j, end] = 1 end @inbounds for (i, pi) in enumerate(p) qi = 1 - pi - for k = (n-i+1):n + for k in (n - i + 1):n kp1 = k + 1 - for j = 1:(i-1) + for j in 1:(i - 1) A[j, k] = pi * A[j, k] + qi * A[j, kp1] end - for j = (i+1):n + for j in (i + 1):n A[j, k] = pi * A[j, k] + qi * A[j, kp1] end end - for j = 1:(i-1) + for j in 1:(i - 1) A[j, end] *= pi end - for j = (i+1):n + for j in (i + 1):n A[j, end] *= pi end end - @inbounds for j = 1:n, i = 1:n - A[i, j] -= A[i, j+1] + @inbounds for j in 1:n, i in 1:n + A[i, j] -= A[i, j + 1] end return A end diff --git a/src/univariate/discrete/skellam.jl b/src/univariate/discrete/skellam.jl index 64aa9acb7b..c9c6463329 100644 --- a/src/univariate/discrete/skellam.jl +++ b/src/univariate/discrete/skellam.jl @@ -22,17 +22,17 @@ External links: * [Skellam distribution on Wikipedia](http://en.wikipedia.org/wiki/Skellam_distribution) """ -struct Skellam{T<:Real} <: DiscreteUnivariateDistribution +struct Skellam{T <: Real} <: DiscreteUnivariateDistribution μ1::T μ2::T - function Skellam{T}(μ1::T, μ2::T) where {T<:Real} + function Skellam{T}(μ1::T, μ2::T) where {T <: Real} return new{T}(μ1, μ2) end end -function Skellam(μ1::T, μ2::T; check_args::Bool = true) where {T<:Real} +function Skellam(μ1::T, μ2::T; check_args::Bool = true) where {T <: Real} @check_args Skellam (μ1, μ1 > zero(μ1)) (μ2, μ2 > zero(μ2)) return Skellam{T}(μ1, μ2) end @@ -43,7 +43,7 @@ Skellam(μ1::Integer, μ2::Integer; check_args::Bool = true) = Skellam(float(μ1), float(μ2); check_args = check_args) function Skellam(μ::Real; check_args::Bool = true) @check_args Skellam (μ, μ > zero(μ)) - Skellam(μ, μ; check_args = false) + return Skellam(μ, μ; check_args = false) end Skellam() = Skellam{Float64}(1.0, 1.0) @@ -51,9 +51,9 @@ Skellam() = Skellam{Float64}(1.0, 1.0) #### Conversions -convert(::Type{Skellam{T}}, μ1::S, μ2::S) where {T<:Real,S<:Real} = Skellam(T(μ1), T(μ2)) -Base.convert(::Type{Skellam{T}}, d::Skellam) where {T<:Real} = Skellam{T}(T(d.μ1), T(d.μ2)) -Base.convert(::Type{Skellam{T}}, d::Skellam{T}) where {T<:Real} = d +convert(::Type{Skellam{T}}, μ1::S, μ2::S) where {T <: Real, S <: Real} = Skellam(T(μ1), T(μ2)) +Base.convert(::Type{Skellam{T}}, d::Skellam) where {T <: Real} = Skellam{T}(T(d.μ1), T(d.μ2)) +Base.convert(::Type{Skellam{T}}, d::Skellam{T}) where {T <: Real} = d #### Parameters @@ -78,8 +78,8 @@ function logpdf(d::Skellam, x::Real) μ1, μ2 = params(d) if insupport(d, x) return -(μ1 + μ2) + - (x / 2) * log(μ1 / μ2) + - log(besseli(x, 2 * sqrt(μ1) * sqrt(μ2))) + (x / 2) * log(μ1 / μ2) + + log(besseli(x, 2 * sqrt(μ1) * sqrt(μ2))) else return one(x) / 2 * log(zero(μ1 / μ2)) end @@ -87,12 +87,12 @@ end function mgf(d::Skellam, t::Real) μ1, μ2 = params(d) - exp(μ1 * (exp(t) - 1) + μ2 * (exp(-t) - 1)) + return exp(μ1 * (exp(t) - 1) + μ2 * (exp(-t) - 1)) end function cf(d::Skellam, t::Real) μ1, μ2 = params(d) - exp(μ1 * (cis(t) - 1) + μ2 * (cis(-t) - 1)) + return exp(μ1 * (cis(t) - 1) + μ2 * (cis(-t) - 1)) end """ diff --git a/src/univariate/discrete/soliton.jl b/src/univariate/discrete/soliton.jl index db9fce7609..f360f6ac72 100644 --- a/src/univariate/discrete/soliton.jl +++ b/src/univariate/discrete/soliton.jl @@ -38,12 +38,12 @@ struct Soliton <: DiscreteUnivariateDistribution 0 < δ < 1 || throw(DomainError(δ, "Expected 0 < δ < 1.")) 0 < M <= K || throw(DomainError(M, "Expected 0 < M <= K.")) 0 <= atol < 1 || throw(DomainError(atol, "Expected 0 <= atol < 1.")) - PDF = [soliton_τ(K, M, δ, i) + soliton_ρ(K, i) for i = 1:K] + PDF = [soliton_τ(K, M, δ, i) + soliton_ρ(K, i) for i in 1:K] PDF ./= sum(PDF) - degrees = [i for i = 1:K if PDF[i] > atol] + degrees = [i for i in 1:K if PDF[i] > atol] CDF = cumsum([PDF[i] for i in degrees]) CDF ./= CDF[end] - new(K, M, δ, atol, degrees, CDF) + return new(K, M, δ, atol, degrees, CDF) end end @@ -92,7 +92,7 @@ function pdf(Ω::Soliton, i::Real) (j > length(Ω.degrees) || Ω.degrees[j] != i) && return zero(eltype(Ω.CDF)) rv = Ω.CDF[j] if j > 1 - rv -= Ω.CDF[j-1] + rv -= Ω.CDF[j - 1] end return rv end @@ -103,7 +103,7 @@ function cdf(Ω::Soliton, i::Integer) i > Ω.degrees[end] && return 1.0 j = searchsortedfirst(Ω.degrees, i) Ω.degrees[j] == i && return Ω.CDF[j] - return Ω.CDF[j-1] + return Ω.CDF[j - 1] end Statistics.mean(Ω::Soliton) = sum(i -> i * pdf(Ω, i), Ω.degrees) diff --git a/src/univariate/locationscale.jl b/src/univariate/locationscale.jl index 7d3ff8ca16..e48b916890 100644 --- a/src/univariate/locationscale.jl +++ b/src/univariate/locationscale.jl @@ -33,34 +33,34 @@ d = μ + σ * ρ # Create location-scale transformed distribution params(d) # Get the parameters, i.e. (μ, σ, ρ) ``` """ -struct AffineDistribution{T<:Real,S<:ValueSupport,D<:UnivariateDistribution{S}} <: - UnivariateDistribution{S} +struct AffineDistribution{T <: Real, S <: ValueSupport, D <: UnivariateDistribution{S}} <: + UnivariateDistribution{S} μ::T σ::T ρ::D # TODO: Remove? It is not used in Distributions anymore - function AffineDistribution{T,S,D}( - μ::T, - σ::T, - ρ::D; - check_args::Bool = true, - ) where {T<:Real,S<:ValueSupport,D<:UnivariateDistribution{S}} + function AffineDistribution{T, S, D}( + μ::T, + σ::T, + ρ::D; + check_args::Bool = true, + ) where {T <: Real, S <: ValueSupport, D <: UnivariateDistribution{S}} @check_args AffineDistribution (σ, !iszero(σ)) - new{T,S,D}(μ, σ, ρ) + return new{T, S, D}(μ, σ, ρ) end - function AffineDistribution{T}(μ::T, σ::T, ρ::UnivariateDistribution) where {T<:Real} + function AffineDistribution{T}(μ::T, σ::T, ρ::UnivariateDistribution) where {T <: Real} D = typeof(ρ) S = value_support(D) - return new{T,S,D}(μ, σ, ρ) + return new{T, S, D}(μ, σ, ρ) end end function AffineDistribution( - μ::T, - σ::T, - ρ::UnivariateDistribution; - check_args::Bool = true, -) where {T<:Real} + μ::T, + σ::T, + ρ::UnivariateDistribution; + check_args::Bool = true, + ) where {T <: Real} @check_args AffineDistribution (σ, !iszero(σ)) # μ and σ act on both random numbers and parameter-like quantities like mean # hence do not promote: but take care in eltype and partype @@ -68,16 +68,16 @@ function AffineDistribution( end function AffineDistribution( - μ::Real, - σ::Real, - ρ::UnivariateDistribution; - check_args::Bool = true, -) + μ::Real, + σ::Real, + ρ::UnivariateDistribution; + check_args::Bool = true, + ) return AffineDistribution(promote(μ, σ)..., ρ; check_args = check_args) end # aliases -const LocationScale{T,S,D} = AffineDistribution{T,S,D} +const LocationScale{T, S, D} = AffineDistribution{T, S, D} function LocationScale(μ::Real, σ::Real, ρ::UnivariateDistribution; check_args::Bool = true) Base.depwarn("`LocationScale` is deprecated. Use `+` and `*` instead", :LocationScale) # preparation for future PR where I remove σ > 0 check @@ -85,12 +85,12 @@ function LocationScale(μ::Real, σ::Real, ρ::UnivariateDistribution; check_arg return AffineDistribution(μ, σ, ρ; check_args = false) end -const ContinuousAffineDistribution{T<:Real,D<:ContinuousUnivariateDistribution} = - AffineDistribution{T,Continuous,D} -const DiscreteAffineDistribution{T<:Real,D<:DiscreteUnivariateDistribution} = - AffineDistribution{T,Discrete,D} +const ContinuousAffineDistribution{T <: Real, D <: ContinuousUnivariateDistribution} = + AffineDistribution{T, Continuous, D} +const DiscreteAffineDistribution{T <: Real, D <: DiscreteUnivariateDistribution} = + AffineDistribution{T, Discrete, D} -Base.eltype(::Type{<:AffineDistribution{T,S,D}}) where {T,S,D} = promote_type(eltype(D), T) +Base.eltype(::Type{<:AffineDistribution{T, S, D}}) where {T, S, D} = promote_type(eltype(D), T) minimum(d::AffineDistribution) = d.σ > 0 ? d.μ + d.σ * minimum(d.ρ) : d.μ + d.σ * maximum(d.ρ) @@ -117,11 +117,11 @@ convert( μ::Real, σ::Real, ρ::D, -) where {T<:Real,D<:UnivariateDistribution} = AffineDistribution(T(μ), T(σ), ρ) -function Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution) where {T<:Real} - AffineDistribution{T}(T(d.μ), T(d.σ), d.ρ) +) where {T <: Real, D <: UnivariateDistribution} = AffineDistribution(T(μ), T(σ), ρ) +function Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution) where {T <: Real} + return AffineDistribution{T}(T(d.μ), T(d.σ), d.ρ) end -Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution{T}) where {T<:Real} = d +Base.convert(::Type{AffineDistribution{T}}, d::AffineDistribution{T}) where {T <: Real} = d #### Parameters diff --git a/src/univariate/orderstatistic.jl b/src/univariate/orderstatistic.jl index 36e067fd0c..05ed09c3c2 100644 --- a/src/univariate/orderstatistic.jl +++ b/src/univariate/orderstatistic.jl @@ -39,19 +39,19 @@ OrderStatistic(DiscreteUniform(10), 10, 10) # distribution of the sample maximu OrderStatistic(Gamma(1, 1), 11, 5) # distribution of the sample median ``` """ -struct OrderStatistic{D<:UnivariateDistribution,S<:ValueSupport} <: - UnivariateDistribution{S} +struct OrderStatistic{D <: UnivariateDistribution, S <: ValueSupport} <: + UnivariateDistribution{S} dist::D n::Int rank::Int function OrderStatistic( - dist::UnivariateDistribution, - n::Int, - rank::Int; - check_args::Bool = true, - ) + dist::UnivariateDistribution, + n::Int, + rank::Int; + check_args::Bool = true, + ) @check_args(OrderStatistic, 1 ≤ rank ≤ n) - return new{typeof(dist),value_support(typeof(dist))}(dist, n, rank) + return new{typeof(dist), value_support(typeof(dist))}(dist, n, rank) end end diff --git a/src/univariates.jl b/src/univariates.jl index a8212310e5..8b98e956f2 100644 --- a/src/univariates.jl +++ b/src/univariates.jl @@ -1,6 +1,6 @@ #### Domain && Support -struct RealInterval{T<:Real} +struct RealInterval{T <: Real} lb::T ub::T end @@ -12,15 +12,15 @@ Base.maximum(r::RealInterval) = r.ub Base.extrema(r::RealInterval) = (r.lb, r.ub) Base.in(x::Real, r::RealInterval) = r.lb <= x <= r.ub -isbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = +isbounded(d::Union{D, Type{D}}) where {D <: UnivariateDistribution} = isupperbounded(d) && islowerbounded(d) -islowerbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = minimum(d) > -Inf -isupperbounded(d::Union{D,Type{D}}) where {D<:UnivariateDistribution} = maximum(d) < +Inf +islowerbounded(d::Union{D, Type{D}}) where {D <: UnivariateDistribution} = minimum(d) > -Inf +isupperbounded(d::Union{D, Type{D}}) where {D <: UnivariateDistribution} = maximum(d) < +Inf -hasfinitesupport(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = +hasfinitesupport(d::Union{D, Type{D}}) where {D <: DiscreteUnivariateDistribution} = isbounded(d) -hasfinitesupport(d::Union{D,Type{D}}) where {D<:ContinuousUnivariateDistribution} = false +hasfinitesupport(d::Union{D, Type{D}}) where {D <: ContinuousUnivariateDistribution} = false Base.:(==)(r1::RealInterval, r2::RealInterval) = r1.lb == r2.lb && r1.ub == r2.ub @@ -92,32 +92,32 @@ Generic fallback methods are provided, but it is often the case that `insupport` done more efficiently, and a specialized `insupport` is thus desirable. You should also override this function if the support is composed of multiple disjoint intervals. """ -insupport{D<:UnivariateDistribution}(d::Union{D,Type{D}}, x::Any) +insupport{D <: UnivariateDistribution}(d::Union{D, Type{D}}, x::Any) function insupport!( - r::AbstractArray, - d::Union{D,Type{D}}, - X::AbstractArray, -) where {D<:UnivariateDistribution} + r::AbstractArray, + d::Union{D, Type{D}}, + X::AbstractArray, + ) where {D <: UnivariateDistribution} length(r) == length(X) || throw(DimensionMismatch("Inconsistent array dimensions.")) - for i = 1:length(X) + for i in 1:length(X) @inbounds r[i] = insupport(d, X[i]) end return r end -insupport(d::Union{D,Type{D}}, X::AbstractArray) where {D<:UnivariateDistribution} = +insupport(d::Union{D, Type{D}}, X::AbstractArray) where {D <: UnivariateDistribution} = insupport!(BitArray(undef, size(X)), d, X) -insupport(d::Union{D,Type{D}}, x::Real) where {D<:ContinuousUnivariateDistribution} = +insupport(d::Union{D, Type{D}}, x::Real) where {D <: ContinuousUnivariateDistribution} = minimum(d) <= x <= maximum(d) -insupport(d::Union{D,Type{D}}, x::Real) where {D<:DiscreteUnivariateDistribution} = +insupport(d::Union{D, Type{D}}, x::Real) where {D <: DiscreteUnivariateDistribution} = isinteger(x) && minimum(d) <= x <= maximum(d) -support(d::Union{D,Type{D}}) where {D<:ContinuousUnivariateDistribution} = +support(d::Union{D, Type{D}}) where {D <: ContinuousUnivariateDistribution} = RealInterval(minimum(d), maximum(d)) -support(d::Union{D,Type{D}}) where {D<:DiscreteUnivariateDistribution} = +support(d::Union{D, Type{D}}) where {D <: DiscreteUnivariateDistribution} = round(Int, minimum(d)):round(Int, maximum(d)) # Type used for dispatch on finite support @@ -130,13 +130,15 @@ macro distr_support(D, lb, ub) D_has_constantbounds = (isa(ub, Number) || ub == :Inf) && (isa(lb, Number) || lb == :(-Inf)) - paramdecl = D_has_constantbounds ? :(d::Union{$D,Type{<:$D}}) : :(d::$D) + paramdecl = D_has_constantbounds ? :(d::Union{$D, Type{<:$D}}) : :(d::$D) # overall - esc(quote - Base.minimum($(paramdecl)) = $lb - Base.maximum($(paramdecl)) = $ub - end) + return esc( + quote + Base.minimum($(paramdecl)) = $lb + Base.maximum($(paramdecl)) = $ub + end + ) end @@ -321,7 +323,7 @@ See also: [`logpdf`](@ref). pdf(d::UnivariateDistribution, x::Real) = exp(logpdf(d, x)) # extract value from array of zero dimension -pdf(d::UnivariateDistribution, x::AbstractArray{<:Real,0}) = pdf(d, first(x)) +pdf(d::UnivariateDistribution, x::AbstractArray{<:Real, 0}) = pdf(d, first(x)) """ logpdf(d::UnivariateDistribution, x::Real) @@ -333,7 +335,7 @@ See also: [`pdf`](@ref). logpdf(d::UnivariateDistribution, x::Real) # extract value from array of zero dimension -logpdf(d::UnivariateDistribution, x::AbstractArray{<:Real,0}) = logpdf(d, first(x)) +logpdf(d::UnivariateDistribution, x::AbstractArray{<:Real, 0}) = logpdf(d, first(x)) # loglikelihood for `Real` Base.@propagate_inbounds loglikelihood(d::UnivariateDistribution, x::Real) = logpdf(d, x) @@ -434,10 +436,10 @@ gradlogpdf(d::ContinuousUnivariateDistribution, x::Real) = function _pdf_fill_outside!( - r::AbstractArray, - d::DiscreteUnivariateDistribution, - X::UnitRange, -) + r::AbstractArray, + d::DiscreteUnivariateDistribution, + X::UnitRange, + ) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 @@ -456,20 +458,20 @@ function _pdf_fill_outside!( # fill left part if vl > vfirst - for i = 1:(vl-vfirst) + for i in 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 - for v = vl:vr - r[v-fm1] = pdf(d, v) + for v in vl:vr + r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast - for i = (vr-vfirst+2):n + for i in (vr - vfirst + 2):n r[i] = 0.0 end end @@ -481,8 +483,8 @@ function _pdf!(r::AbstractArray{<:Real}, d::DiscreteUnivariateDistribution, X::U # fill central part: with non-zero pdf fm1 = vfirst - 1 - for v = vl:vr - r[v-fm1] = pdf(d, v) + for v in vl:vr + r[v - fm1] = pdf(d, v) end return r end @@ -491,19 +493,19 @@ end abstract type RecursiveProbabilityEvaluator end function _pdf!( - r::AbstractArray, - d::DiscreteUnivariateDistribution, - X::UnitRange, - rpe::RecursiveProbabilityEvaluator, -) + r::AbstractArray, + d::DiscreteUnivariateDistribution, + X::UnitRange, + rpe::RecursiveProbabilityEvaluator, + ) vl, vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 - r[vl-fm1] = pv = pdf(d, vl) - for v = (vl+1):vr - r[v-fm1] = pv = nextpdf(rpe, pv, v) + r[vl - fm1] = pv = pdf(d, vl) + for v in (vl + 1):vr + r[v - fm1] = pv = nextpdf(rpe, pv, v) end end @@ -583,14 +585,14 @@ function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") result = - if isfinite(minimum_d) && - !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) - x < minimum_d ? zero(c) : c - else - c = 1 - sum(Base.Fix1(pdf, d), (min(x+1, maximum_d)):maximum_d) - x >= maximum_d ? one(c) : c - end + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) + x < minimum_d ? zero(c) : c + else + c = 1 - sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? one(c) : c + end return result end @@ -600,14 +602,14 @@ function integerunitrange_ccdf(d::DiscreteUnivariateDistribution, x::Integer) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") result = - if isfinite(minimum_d) && - !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) - x < minimum_d ? one(c) : c - else - c = sum(Base.Fix1(pdf, d), (min(x+1, maximum_d)):maximum_d) - x >= maximum_d ? zero(c) : c - end + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) + x < minimum_d ? one(c) : c + else + c = sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? zero(c) : c + end return result end @@ -617,14 +619,14 @@ function integerunitrange_logcdf(d::DiscreteUnivariateDistribution, x::Integer) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") result = - if isfinite(minimum_d) && - !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d))) - x < minimum_d ? oftype(c, -Inf) : c - else - c = log1mexp(logsumexp(logpdf(d, y) for y = (min(x+1, maximum_d)):maximum_d)) - x >= maximum_d ? zero(c) : c - end + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = logsumexp(logpdf(d, y) for y in minimum_d:(max(x, minimum_d))) + x < minimum_d ? oftype(c, -Inf) : c + else + c = log1mexp(logsumexp(logpdf(d, y) for y in (min(x + 1, maximum_d)):maximum_d)) + x >= maximum_d ? zero(c) : c + end return result end @@ -634,14 +636,14 @@ function integerunitrange_logccdf(d::DiscreteUnivariateDistribution, x::Integer) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") result = - if isfinite(minimum_d) && - !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) - c = log1mexp(logsumexp(logpdf(d, y) for y = minimum_d:(max(x, minimum_d)))) - x < minimum_d ? zero(c) : c - else - c = logsumexp(logpdf(d, y) for y = (min(x+1, maximum_d)):maximum_d) - x >= maximum_d ? oftype(c, -Inf) : c - end + if isfinite(minimum_d) && + !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) + c = log1mexp(logsumexp(logpdf(d, y) for y in minimum_d:(max(x, minimum_d)))) + x < minimum_d ? zero(c) : c + else + c = logsumexp(logpdf(d, y) for y in (min(x + 1, maximum_d)):maximum_d) + x >= maximum_d ? oftype(c, -Inf) : c + end return result end diff --git a/src/utils.jl b/src/utils.jl index eaef6ec36e..f56b136435 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -87,7 +87,7 @@ If `check` is `false`, the checks are skipped. """ function check_args(f::F, check::Bool) where {F} check && f() - nothing + return nothing end ##### Utility functions @@ -114,9 +114,9 @@ end # because X == X' keeps failing due to floating point nonsense function isApproxSymmmetric(a::AbstractMatrix{Float64}) tmp = true - for j = 2:size(a, 1) - for i = 1:(j-1) - tmp &= abs(a[i, j] - a[j, i]) < 1e-8 + for j in 2:size(a, 1) + for i in 1:(j - 1) + tmp &= abs(a[i, j] - a[j, i]) < 1.0e-8 end end return tmp @@ -139,21 +139,21 @@ false ``` """ function ispossemdef( - X::AbstractMatrix, - k::Int; - atol::Real = 0.0, - rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), -) + X::AbstractMatrix, + k::Int; + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), + ) _check_rank_range(k, minimum(size(X))) ishermitian(X) || return false dp, dz, dn = eigsigns(Hermitian(X), atol, rtol) return dn == 0 && dp == k end function ispossemdef( - X::AbstractMatrix; - atol::Real = 0.0, - rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), -) + X::AbstractMatrix; + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), + ) ishermitian(X) || return false dp, dz, dn = eigsigns(Hermitian(X), atol, rtol) return dn == 0 @@ -161,21 +161,21 @@ end function _check_rank_range(k::Int, n::Int) 0 <= k <= n || throw(ArgumentError("rank must be between 0 and $(n) (inclusive)")) - nothing + return nothing end # return counts of the number of positive, zero, and negative eigenvalues function eigsigns( - X::AbstractMatrix, - atol::Real = 0.0, - rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), -) + X::AbstractMatrix, + atol::Real = 0.0, + rtol::Real = (minimum(size(X)) * eps(real(float(one(eltype(X)))))) * iszero(atol), + ) eigs = eigvals(X) - eigsigns(eigs, atol, rtol) + return eigsigns(eigs, atol, rtol) end function eigsigns(eigs::Vector{<:Real}, atol::Real, rtol::Real) tol = max(atol, rtol * eigs[end]) - eigsigns(eigs, tol) + return eigsigns(eigs, tol) end function eigsigns(eigs::Vector{<:Real}, tol::Real) dp = count(x -> tol < x, eigs) # number of positive eigenvalues diff --git a/test/censored.jl b/test/censored.jl index 8e21c40442..ec6a92403a 100644 --- a/test/censored.jl +++ b/test/censored.jl @@ -112,7 +112,7 @@ end @test !insupport(d, -1.1) @test !insupport(d, 2.1) @test sprint(show, "text/plain", d) == - "Censored($(Normal(0.0, 1.0)); lower=-1, upper=2)" + "Censored($(Normal(0.0, 1.0)); lower=-1, upper=2)" d = Censored(Cauchy(0, 1), nothing, 2) @test d isa Censored @@ -151,31 +151,31 @@ end @test !insupport(d, 10) @test censored(Censored(Normal(), 1, nothing), nothing, 2) == - Censored(Normal(), 1, 2) + Censored(Normal(), 1, 2) @test censored(Censored(Normal(), nothing, 1), -1, nothing) == - Censored(Normal(), -1, 1) + Censored(Normal(), -1, 1) @test censored(Censored(Normal(), 1, 2), 1.5, 2.5) == Censored(Normal(), 1.5, 2.0) @test censored(Censored(Normal(), 1, 3), 1.5, 2.5) == Censored(Normal(), 1.5, 2.5) @test censored(Censored(Normal(), 1, 2), 0.5, 2.5) == Censored(Normal(), 1.0, 2.0) @test censored(Censored(Normal(), 1, 2), 0.5, 1.5) == Censored(Normal(), 1.0, 1.5) @test censored(Censored(Normal(), nothing, 1), nothing, 1) == - Censored(Normal(), nothing, 1) + Censored(Normal(), nothing, 1) @test censored(Censored(Normal(), nothing, 1), nothing, 2) == - Censored(Normal(), nothing, 1) + Censored(Normal(), nothing, 1) @test censored(Censored(Normal(), nothing, 1), nothing, 1.5) == - Censored(Normal(), nothing, 1) + Censored(Normal(), nothing, 1) @test censored(Censored(Normal(), nothing, 1.5), nothing, 1) == - Censored(Normal(), nothing, 1) + Censored(Normal(), nothing, 1) @test censored(Censored(Normal(), 1, nothing), 1, nothing) == - Censored(Normal(), 1, nothing) + Censored(Normal(), 1, nothing) @test censored(Censored(Normal(), 1, nothing), 2, nothing) == - Censored(Normal(), 2, nothing) + Censored(Normal(), 2, nothing) @test censored(Censored(Normal(), 1, nothing), 1.5, nothing) == - Censored(Normal(), 1.5, nothing) + Censored(Normal(), 1.5, nothing) @test censored(Censored(Normal(), 1.5, nothing), 1, nothing) == - Censored(Normal(), 1.5, nothing) + Censored(Normal(), 1.5, nothing) end @testset "Uniform" begin @@ -193,9 +193,9 @@ end (-Inf, Inf), ] @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( - lower, - upper, - ) in bounds + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) @@ -211,10 +211,10 @@ end @test u == upper end @testset for f in [cdf, logcdf, ccdf, logccdf] - @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1e-8 - @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1e-8 - @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1e-8 - @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1e-8 + @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1.0e-8 + @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1.0e-8 + @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1.0e-8 + @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1.0e-8 @test @inferred(f(d, 5)) ≈ f(dmix, 5) end @testset for f in [mean, var] @@ -223,7 +223,7 @@ end @test @inferred(median(d)) ≈ clamp(median(d0), l, u) @inferred quantile(d, 0.5) @test Base.Fix1(quantile, d).(0:0.01:1) ≈ - clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with # Normal(μ, 0), they are infinite if lower === nothing || !isfinite(lower) @@ -247,7 +247,7 @@ end @test @inferred(loglikelihood(d, x)) ≈ sum(x -> logpdf(d, x), x) @test loglikelihood(d, [x; -1]) == -Inf # entropy - @test @inferred(entropy(d)) ≈ mean(x -> -logpdf(d, x), x) atol = 1e-1 + @test @inferred(entropy(d)) ≈ mean(x -> -logpdf(d, x), x) atol = 1.0e-1 end end @@ -255,18 +255,18 @@ end d0 = Normal() bounds = [(nothing, 0.2), (-0.1, nothing), (-0.1, 0.2)] @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( - lower, - upper, - ) in bounds + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) l, u = extrema(d) @testset for f in [cdf, logcdf, ccdf, logccdf] - @test f(d, l) ≈ f(dmix, l) atol = 1e-8 - @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1e-8 - @test f(d, u) ≈ f(dmix, u) atol = 1e-8 - @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1e-8 + @test f(d, l) ≈ f(dmix, l) atol = 1.0e-8 + @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1.0e-8 + @test f(d, u) ≈ f(dmix, u) atol = 1.0e-8 + @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1.0e-8 @test f(d, 5) ≈ f(dmix, 5) end @testset for f in [mean, var] @@ -274,7 +274,7 @@ end end @test median(d) ≈ clamp(median(d0), l, u) @test Base.Fix1(quantile, d).(0:0.01:1) ≈ - clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with # Normal(μ, 0), they are infinite if lower === nothing @@ -297,7 +297,7 @@ end # loglikelihood @test loglikelihood(d, x) ≈ sum(x -> logpdf(d, x), x) # entropy - @test entropy(d) ≈ mean(x -> -logpdf(d, x), x) atol = 1e-1 + @test entropy(d) ≈ mean(x -> -logpdf(d, x), x) atol = 1.0e-1 end end @@ -316,19 +316,19 @@ end (-Inf, Inf), ] @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( - lower, - upper, - ) in bounds + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) l, u = extrema(d) @testset for f in [pdf, logpdf, cdf, logcdf, ccdf, logccdf] - @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1e-8 - @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1e-8 - @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1e-8 - @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1e-8 + @test @inferred(f(d, l)) ≈ f(dmix, l) atol = 1.0e-8 + @test @inferred(f(d, l - 0.1)) ≈ f(dmix, l - 0.1) atol = 1.0e-8 + @test @inferred(f(d, u)) ≈ f(dmix, u) atol = 1.0e-8 + @test @inferred(f(d, u + 0.1)) ≈ f(dmix, u + 0.1) atol = 1.0e-8 @test @inferred(f(d, 5)) ≈ f(dmix, 5) end @testset for f in [mean, var] @@ -337,7 +337,7 @@ end @test @inferred(median(d)) ≈ clamp(median(d0), l, u) @inferred quantile(d, 0.5) @test Base.Fix1(quantile, d).(0:0.01:1) ≈ - clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) + clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u) # rand x = rand(d, 10_000) @test all(x -> insupport(d, x), x) @@ -347,10 +347,10 @@ end μ = @inferred mean(d) xall = unique(x) @test μ ≈ sum(x -> pdf(d, x) * x, xall) - @test mean(x) ≈ μ atol = 1e-1 + @test mean(x) ≈ μ atol = 1.0e-1 v = @inferred var(d) @test v ≈ sum(x -> pdf(d, x) * abs2(x - μ), xall) - @test std(x) ≈ sqrt(v) atol = 1e-1 + @test std(x) ≈ sqrt(v) atol = 1.0e-1 # entropy @test @inferred(entropy(d)) ≈ sum(x -> pdf(d, x) * -logpdf(d, x), xall) end @@ -360,43 +360,43 @@ end d0 = Poisson(20) bounds = [(nothing, 12), (2, nothing), (2, 12), (8, nothing)] @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper)" for ( - lower, - upper, - ) in bounds + lower, + upper, + ) in bounds d = censored(d0, lower, upper) dmix = _as_mixture(d) @test extrema(d) == extrema(dmix) l, u = extrema(d) @testset for f in [pdf, logpdf, cdf, logcdf, ccdf, logccdf] - @test f(d, l) ≈ f(dmix, l) atol = 1e-8 - @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1e-8 - @test f(d, u) ≈ f(dmix, u) atol = 1e-8 - @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1e-8 + @test f(d, l) ≈ f(dmix, l) atol = 1.0e-8 + @test f(d, l - 0.1) ≈ f(dmix, l - 0.1) atol = 1.0e-8 + @test f(d, u) ≈ f(dmix, u) atol = 1.0e-8 + @test f(d, u + 0.1) ≈ f(dmix, u + 0.1) atol = 1.0e-8 @test f(d, 5) ≈ f(dmix, 5) end @test median(d) ≈ clamp(median(d0), l, u) @test Base.Fix1(quantile, d).(0:0.01:0.99) ≈ - clamp.(Base.Fix1(quantile, d0).(0:0.01:0.99), l, u) + clamp.(Base.Fix1(quantile, d0).(0:0.01:0.99), l, u) x = rand(d, 100) @test loglikelihood(d, x) ≈ loglikelihood(dmix, x) # rand x = rand(d, 10_000) @test all(x -> insupport(d, x), x) # mean, std - @test mean(x) ≈ mean(x) atol = 1e-1 - @test std(x) ≈ std(x) atol = 1e-1 + @test mean(x) ≈ mean(x) atol = 1.0e-1 + @test std(x) ≈ std(x) atol = 1.0e-1 end end @testset "mixed types are still type-inferrible" begin bounds = [(nothing, 8), (2, nothing), (2, 8)] @testset "lower = $(lower === nothing ? "nothing" : lower), upper = $(upper === nothing ? "nothing" : upper), uncensored partype=$T0, partype=$T" for ( - lower, - upper, - ) in bounds, - T in (Int, Float32, Float64), - T0 in (Int, Float32, Float64) + lower, + upper, + ) in bounds, + T in (Int, Float32, Float64), + T0 in (Int, Float32, Float64) d0 = Uniform(T0(0), T0(10)) d = censored( diff --git a/test/cholesky/lkjcholesky.jl b/test/cholesky/lkjcholesky.jl index 723545e232..56e2d0030d 100644 --- a/test/cholesky/lkjcholesky.jl +++ b/test/cholesky/lkjcholesky.jl @@ -18,15 +18,15 @@ using FiniteDifferences marginal = Distributions._marginal(dmat) ndraws = length(xs) zs = Array{eltype(d)}(undef, p, p, ndraws) - for k = 1:ndraws + for k in 1:ndraws zs[:, :, k] = Matrix(xs[k]) end @testset "LKJCholesky marginal moments" begin @test mean(zs; dims = 3)[:, :, 1] ≈ I atol = 0.1 @test var(zs; dims = 3)[:, :, 1] ≈ var(marginal) * (ones(p, p) - I) atol = 0.1 - @testset for n = 2:5 - for i = 1:p, j = 1:(i-1) + @testset for n in 2:5 + for i in 1:p, j in 1:(i - 1) @test moment(zs[i, j, :], n) ≈ moment(rand(marginal, ndraws), n) atol = 0.1 end @@ -35,8 +35,8 @@ using FiniteDifferences @testset "LKJCholesky marginal KS test" begin α = 0.01 - L = sum(1:(p-1)) - for i = 1:p, j = 1:(i-1) + L = sum(1:(p - 1)) + for i in 1:p, j in 1:(i - 1) @test pvalue_kolmogorovsmirnoff(zs[i, j, :], marginal) >= α / L / nkstests end end @@ -48,14 +48,14 @@ using FiniteDifferences J = jacobian(central_fdm(5, 1), cholesky_vec_to_corr_vec, stricttril_to_vec(L))[1] return logabsdet(J)[1] end - stricttril_to_vec(L) = [L[i, j] for i in axes(L, 1) for j = 1:(i-1)] + stricttril_to_vec(L) = [L[i, j] for i in axes(L, 1) for j in 1:(i - 1)] function vec_to_stricttril(l) n = length(l) p = Int((1 + sqrt(8n + 1)) / 2) L = similar(l, p, p) fill!(L, 0) k = 1 - for i = 1:p, j = 1:(i-1) + for i in 1:p, j in 1:(i - 1) L[i, j] = l[k] k += 1 end @@ -64,7 +64,7 @@ using FiniteDifferences function cholesky_vec_to_corr_vec(l) L = vec_to_stricttril(l) for i in axes(L, 1) - w = view(L, i, 1:(i-1)) + w = view(L, i, 1:(i - 1)) wnorm = norm(w) if wnorm > 1 w ./= wnorm @@ -159,7 +159,7 @@ using FiniteDifferences ) === T(-Inf) end z = rand(LKJ(40, 1)) - z .+= exp(Symmetric(randn(size(z)))) .* 1e-8 + z .+= exp(Symmetric(randn(size(z)))) .* 1.0e-8 x = cholesky(z) @test !insupport(LKJCholesky(4, 2), x) end @@ -223,14 +223,14 @@ using FiniteDifferences test_draws(d, xs; nkstests = nkstests) F2 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs2 = [deepcopy(F2) for _ = 1:(10^4)] + xs2 = [deepcopy(F2) for _ in 1:(10^4)] xs2[1] = cholesky(exp(Symmetric(randn(rng, p + 1, p + 1)))) rand!(rng, d, xs2) test_draws(d, xs2; nkstests = nkstests) # non-allocating F3 = cholesky(exp(Symmetric(randn(rng, p, p)))) - xs3 = [deepcopy(F3) for _ = 1:(10^4)] + xs3 = [deepcopy(F3) for _ in 1:(10^4)] rand!(rng, d, xs3) test_draws(d, xs3; check_uplo = uplo == 'U', nkstests = nkstests) end diff --git a/test/convolution.jl b/test/convolution.jl index dfa057a1be..8d2c2b9e6b 100644 --- a/test/convolution.jl +++ b/test/convolution.jl @@ -74,7 +74,7 @@ using Test [prevfloat(0.0), 0.0, nextfloat(0.0), 1.0], fill(1 // 4, 4), ) - d10 = DiscreteNonParametric((1//10):(1//10):1, fill(1 // 10, 10)) + d10 = DiscreteNonParametric((1 // 10):(1 // 10):1, fill(1 // 10, 10)) d_int_simple = @inferred(convolve(d1, d2)) @test d_int_simple isa DiscreteNonParametric @@ -82,7 +82,7 @@ using Test @test probs(d_int_simple) == [0.25, 0.5, 0.25] d_rat = convolve(d10, d10) - @test support(d_rat) == (1//5):(1//10):2 + @test support(d_rat) == (1 // 5):(1 // 10):2 @test probs(d_rat) == [ 1 // 100, 1 // 50, @@ -116,7 +116,7 @@ using Test 2.0, ] @test probs(d_float_supp) == - [1 // 16, 1 // 8, 3 // 16, 1 // 8, 1 // 16, 3 // 8, 1 // 16] + [1 // 16, 1 // 8, 3 // 16, 1 // 8, 1 // 16, 3 // 8, 1 // 16] end end diff --git a/test/density_interface.jl b/test/density_interface.jl index d4791f1a44..0cb1cafdfb 100644 --- a/test/density_interface.jl +++ b/test/density_interface.jl @@ -16,10 +16,10 @@ for di_func in (logdensityof, densityof) @test_throws ArgumentError di_func( d_uv_continuous, - [rand(d_uv_continuous) for i = 1:3], + [rand(d_uv_continuous) for i in 1:3], ) - @test_throws ArgumentError di_func(d_mv, hcat([rand(d_mv) for i = 1:3]...)) - @test_throws ArgumentError di_func(d_av, [rand(d_av) for i = 1:3]) + @test_throws ArgumentError di_func(d_mv, hcat([rand(d_mv) for i in 1:3]...)) + @test_throws ArgumentError di_func(d_av, [rand(d_av) for i in 1:3]) end end end diff --git a/test/eachvariate.jl b/test/eachvariate.jl index 03c1489cdd..60cd1f68d3 100644 --- a/test/eachvariate.jl +++ b/test/eachvariate.jl @@ -12,7 +12,7 @@ function FiniteDifferences.to_vec(xs::Distributions.EachVariate{V}) where {V} end # MWE in #1817 -struct FooEachvariate <: Sampleable{Multivariate,Continuous} end +struct FooEachvariate <: Sampleable{Multivariate, Continuous} end Base.length(::FooEachvariate) = 3 Base.eltype(::FooEachvariate) = Float64 function Distributions._rand!(rng::AbstractRNG, ::FooEachvariate, x::AbstractVector{<:Real}) diff --git a/test/edgecases.jl b/test/edgecases.jl index 1ed465e832..7a58016775 100644 --- a/test/edgecases.jl +++ b/test/edgecases.jl @@ -24,6 +24,6 @@ using Test, Distributions, StatsBase @test isapprox( fnecdf(y), [0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975], - atol = 1e-3, + atol = 1.0e-3, ) end diff --git a/test/edgeworth.jl b/test/edgeworth.jl index 5da5cbd6b9..6b5a7427a2 100644 --- a/test/edgeworth.jl +++ b/test/edgeworth.jl @@ -14,7 +14,7 @@ dg_s = Gamma(10, 1) dg_m = Gamma(10, 0.1) dg_za = Gamma(10, 1 / std(dg_s)) -for i = 0.01:0.01:0.99 +for i in 0.01:0.01:0.99 q = quantile(dg_s, i) @test isapprox(quantile(d_s, i), q, atol = 0.02) diff --git a/test/fit.jl b/test/fit.jl index 2a51e8ad09..0575cb36b0 100644 --- a/test/fit.jl +++ b/test/fit.jl @@ -128,7 +128,7 @@ end w = func[1](n0) ss = suffstats(Categorical, (3, x)) - h = Float64[count(v -> v == i, x) for i = 1:3] + h = Float64[count(v -> v == i, x) for i in 1:3] @test isa(ss, Distributions.CategoricalStats) @test ss.h ≈ h @@ -142,7 +142,7 @@ end @test probs(d2) == probs(d) ss = suffstats(Categorical, (3, x), w) - h = Float64[sum(w[x .== i]) for i = 1:3] + h = Float64[sum(w[x .== i]) for i in 1:3] @test isa(ss, Distributions.CategoricalStats) @test ss.h ≈ h diff --git a/test/functionals.jl b/test/functionals.jl index bbc04aeeaa..0ef6e62a2a 100644 --- a/test/functionals.jl +++ b/test/functionals.jl @@ -1,5 +1,5 @@ # Struct to test AbstractMvNormal methods -struct CholeskyMvNormal{M,T} <: Distributions.AbstractMvNormal +struct CholeskyMvNormal{M, T} <: Distributions.AbstractMvNormal m::M L::T end @@ -7,7 +7,7 @@ end # Constructor for diagonal covariance matrices used in the tests below function CholeskyMvNormal(m::Vector, Σ::Diagonal) L = Diagonal(map(sqrt, Σ.diag)) - return CholeskyMvNormal{typeof(m),typeof(L)}(m, L) + return CholeskyMvNormal{typeof(m), typeof(L)}(m, L) end Distributions.length(p::CholeskyMvNormal) = length(p.m) @@ -25,40 +25,40 @@ end # univariate distributions for d in (Normal(), Poisson(2.0), Binomial(10, 0.4)) m = Distributions.expectation(identity, d) - @test m ≈ mean(d) atol = 1e-3 - @test Distributions.expectation(x -> (x - mean(d))^2, d) ≈ var(d) atol = 1e-3 + @test m ≈ mean(d) atol = 1.0e-3 + @test Distributions.expectation(x -> (x - mean(d))^2, d) ≈ var(d) atol = 1.0e-3 - @test @test_deprecated(Distributions.expectation(d, identity, 1e-10)) == m + @test @test_deprecated(Distributions.expectation(d, identity, 1.0e-10)) == m @test @test_deprecated(Distributions.expectation(d, identity)) == m end # multivariate distribution d = MvNormal([1.5, -0.5], I) - @test Distributions.expectation(identity, d; nsamples = 10_000) ≈ mean(d) atol = 5e-2 + @test Distributions.expectation(identity, d; nsamples = 10_000) ≈ mean(d) atol = 5.0e-2 @test @test_deprecated(Distributions.expectation(d, identity; nsamples = 10_000)) ≈ - mean(d) atol = 5e-2 + mean(d) atol = 5.0e-2 end @testset "KL divergences" begin function test_kl(p, q) @test kldivergence(p, q) >= 0 - @test kldivergence(p, p) ≈ 0 atol = 1e-1 - @test kldivergence(q, q) ≈ 0 atol = 1e-1 + @test kldivergence(p, p) ≈ 0 atol = 1.0e-1 + @test kldivergence(q, q) ≈ 0 atol = 1.0e-1 if p isa UnivariateDistribution @test kldivergence(p, q) ≈ invoke( kldivergence, - Tuple{UnivariateDistribution,UnivariateDistribution}, + Tuple{UnivariateDistribution, UnivariateDistribution}, p, q, - ) atol = 1e-1 + ) atol = 1.0e-1 elseif p isa MultivariateDistribution @test kldivergence(p, q) ≈ invoke( kldivergence, - Tuple{MultivariateDistribution,MultivariateDistribution}, + Tuple{MultivariateDistribution, MultivariateDistribution}, p, q; nsamples = 10000, - ) atol = 1e-1 + ) atol = 1.0e-1 end end @@ -81,9 +81,9 @@ end end @testset "Categorical" begin @test kldivergence(Categorical([0.0, 0.1, 0.9]), Categorical([0.1, 0.1, 0.8])) ≥ - 0 + 0 @test kldivergence(Categorical([0.0, 0.1, 0.9]), Categorical([0.1, 0.1, 0.8])) ≈ - kldivergence([0.0, 0.1, 0.9], [0.1, 0.1, 0.8]) + kldivergence([0.0, 0.1, 0.9], [0.1, 0.1, 0.8]) end @testset "Chi" begin p = Chi(4.0) @@ -164,7 +164,7 @@ end q = NormalCanon(3, 4) test_kl(p, q) @test kldivergence(p, q) ≈ - kldivergence(Normal(1 / 2, 1 / sqrt(2)), Normal(3 / 4, 1 / 2)) + kldivergence(Normal(1 / 2, 1 / sqrt(2)), Normal(3 / 4, 1 / 2)) end @testset "Poisson" begin p = Poisson(4.0) diff --git a/test/matrixreshaped.jl b/test/matrixreshaped.jl index e058c76fbd..8ff4094782 100644 --- a/test/matrixreshaped.jl +++ b/test/matrixreshaped.jl @@ -4,7 +4,7 @@ using Distributions, Test, Random, LinearAlgebra rng = MersenneTwister(123456) function test_matrixreshaped(rng, d1, sizes) - @testset "MatrixReshaped $(nameof(typeof(d1))) tests" begin + return @testset "MatrixReshaped $(nameof(typeof(d1))) tests" begin x1 = rand(rng, d1) d1s = [@test_deprecated(MatrixReshaped(d1, s...)) for s in sizes] @@ -19,7 +19,7 @@ function test_matrixreshaped(rng, d1, sizes) @test_deprecated(@test_throws ArgumentError MatrixReshaped(d1, -length(d1), -1)) end @testset "MatrixReshaped size" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test size(d) == s end end @@ -34,35 +34,35 @@ function test_matrixreshaped(rng, d1, sizes) end end @testset "MatrixReshaped insupport" begin - for (i, d) in enumerate(d1s[1:(end-1)]) - for (j, s) in enumerate(sizes[1:(end-1)]) + for (i, d) in enumerate(d1s[1:(end - 1)]) + for (j, s) in enumerate(sizes[1:(end - 1)]) @test (i == j) ⊻ !insupport(d, reshape(x1, s)) end end end @testset "MatrixReshaped mean" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test mean(d) == reshape(mean(d1), s) end end @testset "MatrixReshaped mode" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test mode(d) == reshape(mode(d1), s) end end @testset "MatrixReshaped covariance" begin - for (d, (n, p)) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, (n, p)) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test cov(d) == cov(d1) @test cov(d, Val(false)) == reshape(cov(d1), n, p, n, p) end end @testset "MatrixReshaped variance" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test var(d) == reshape(var(d1), s) end end @testset "MatrixReshaped params" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) @test params(d) == (d1, s) end end @@ -77,7 +77,7 @@ function test_matrixreshaped(rng, d1, sizes) end end @testset "MatrixReshaped logpdf" begin - for (d, s) in zip(d1s[1:(end-1)], sizes[1:(end-1)]) + for (d, s) in zip(d1s[1:(end - 1)], sizes[1:(end - 1)]) x = reshape(x1, s) @test logpdf(d, x) == logpdf(d1, x1) end diff --git a/test/matrixvariates.jl b/test/matrixvariates.jl index 8d6bd34614..bb563f5daa 100644 --- a/test/matrixvariates.jl +++ b/test/matrixvariates.jl @@ -68,29 +68,29 @@ import Distributions: _univariate, _multivariate, _rand_params function test_draws(d::MatrixDistribution, M::Integer) rng = MersenneTwister(123) @testset "Testing matrix-variates with $key" for (key, func) in Dict( - "rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], - ) + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) test_draws(d, func[2](d, M)) end @testset "Testing matrix-variates with $key" for (key, func) in Dict( - "rand!(..., false)" => [(dist, X) -> rand!(dist, X, false), rand!], - "rand!(rng, ..., false)" => - [(dist, X) -> rand!(rng, dist, X, false), (dist, X) -> rand!(rng, dist, X)], - ) + "rand!(..., false)" => [(dist, X) -> rand!(dist, X, false), rand!], + "rand!(rng, ..., false)" => + [(dist, X) -> rand!(rng, dist, X, false), (dist, X) -> rand!(rng, dist, X)], + ) m = [Matrix{partype(d)}(undef, size(d)) for _ in Base.OneTo(M)] x = func[1](d, m) @test x ≡ m @test isapprox(mean(x), mean(d), atol = 0.1) - m3 = Array{partype(d),3}(undef, size(d)..., M) + m3 = Array{partype(d), 3}(undef, size(d)..., M) x = func[2](d, m3) @test x ≡ m3 @test isapprox(mean(x), mean(mean(d)), atol = 0.1) end @testset "Testing matrix-variates with $key" for (key, func) in Dict( - "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), - "rand!(rng, true)" => (dist, X) -> rand!(rng, dist, X, true), - ) + "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), + "rand!(rng, true)" => (dist, X) -> rand!(rng, dist, X, true), + ) m = Vector{Matrix{partype(d)}}(undef, M) x = func(d, m) @test x ≡ m @@ -157,7 +157,7 @@ import Distributions: _univariate, _multivariate, _rand_params @test n == size(mean(d), 2) end - test_dim(d::Union{MatrixNormal,MatrixTDist}) = nothing + test_dim(d::Union{MatrixNormal, MatrixTDist}) = nothing # -------------------------------------------------- # RUN EVERYTHING @@ -173,11 +173,11 @@ import Distributions: _univariate, _multivariate, _rand_params end function test_distr( - dist::Type{<:MatrixDistribution}, - n::Integer, - p::Integer, - M::Integer, - ) + dist::Type{<:MatrixDistribution}, + n::Integer, + p::Integer, + M::Integer, + ) d = dist(_rand_params(dist, Float64, n, p)...) test_distr(d, M) nothing @@ -198,12 +198,12 @@ import Distributions: _univariate, _multivariate, _rand_params end function test_draws_against_univariate_cdf( - D::MatrixDistribution, - d::UnivariateDistribution, - ) + D::MatrixDistribution, + d::UnivariateDistribution, + ) α = 0.025 M = 100000 - matvardraws = [rand(D)[1] for m = 1:M] + matvardraws = [rand(D)[1] for m in 1:M] @test pvalue_kolmogorovsmirnoff(matvardraws, d) >= α nothing end @@ -233,10 +233,10 @@ import Distributions: _univariate, _multivariate, _rand_params end function test_against_multivariate( - dist::Union{Type{MatrixNormal},Type{MatrixTDist}}, - n::Integer, - p::Integer, - ) + dist::Union{Type{MatrixNormal}, Type{MatrixTDist}}, + n::Integer, + p::Integer, + ) D = dist(_rand_params(dist, Float64, n, 1)...) d = _multivariate(D) test_against_multivariate(D, d) @@ -291,24 +291,24 @@ import Distributions: _univariate, _multivariate, _rand_params ) stan_output = JSON.parsefile(filename) K = length(stan_output) - for k = 1:K + for k in 1:K d, X, lpdf = unpack_matvar_json_dict(dist, stan_output[k]) - @test isapprox(logpdf(d, X), lpdf, atol = 1e-10) - @test isapprox(logpdf(d, [X, X]), [lpdf, lpdf], atol = 1e-8) - @test isapprox(pdf(d, X), exp(lpdf), atol = 1e-6) - @test isapprox(pdf(d, [X, X]), exp.([lpdf, lpdf]), atol = 1e-6) + @test isapprox(logpdf(d, X), lpdf, atol = 1.0e-10) + @test isapprox(logpdf(d, [X, X]), [lpdf, lpdf], atol = 1.0e-8) + @test isapprox(pdf(d, X), exp(lpdf), atol = 1.0e-6) + @test isapprox(pdf(d, [X, X]), exp.([lpdf, lpdf]), atol = 1.0e-6) end nothing end function test_against_stan( - dist::Union{ - Type{MatrixNormal}, - Type{MatrixTDist}, - Type{MatrixBeta}, - Type{MatrixFDist}, - }, - ) + dist::Union{ + Type{MatrixNormal}, + Type{MatrixTDist}, + Type{MatrixBeta}, + Type{MatrixFDist}, + }, + ) nothing end @@ -372,7 +372,7 @@ import Distributions: _univariate, _multivariate, _rand_params q = MvTDist(10, randn(n), rand(d)) ρ = Chisq(ν) A = rand(q, M) - z = [A[:, m]' * H[m] * A[:, m] / (A[:, m]' * Σ * A[:, m]) for m = 1:M] + z = [A[:, m]' * H[m] * A[:, m] / (A[:, m]' * Σ * A[:, m]) for m in 1:M] @test pvalue_kolmogorovsmirnoff(z, ρ) >= α end @testset "H ~ W(ν, I) ⟹ H[i, i] ~ χ²(ν)" begin @@ -380,10 +380,10 @@ import Distributions: _univariate, _multivariate, _rand_params ρ = Chisq(κ) g = Wishart(κ, ScalMat(n, 1)) mymats = zeros(n, n, M) - for m = 1:M + for m in 1:M mymats[:, :, m] = rand(g) end - for i = 1:n + for i in 1:n @test pvalue_kolmogorovsmirnoff(mymats[i, i, :], ρ) >= α / n end end @@ -400,12 +400,12 @@ import Distributions: _univariate, _multivariate, _rand_params X = H[1] @test Distributions.singular_wishart_logkernel(d, X) ≈ - Distributions.nonsingular_wishart_logkernel(d, X) + Distributions.nonsingular_wishart_logkernel(d, X) @test Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) ≈ - Distributions.nonsingular_wishart_logc0(n, ν, d.S) + Distributions.nonsingular_wishart_logc0(n, ν, d.S) @test logpdf(d, X) ≈ - Distributions.singular_wishart_logkernel(d, X) + - Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) + Distributions.singular_wishart_logkernel(d, X) + + Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) end nothing end @@ -414,9 +414,9 @@ import Distributions: _univariate, _multivariate, _rand_params @testset "InverseWishart constructor" begin # Tests https://github.com/JuliaStats/Distributions.jl/issues/1948 @test typeof(InverseWishart(5, ScalMat(5, 1))) == - InverseWishart{Float64,ScalMat{Float64}} + InverseWishart{Float64, ScalMat{Float64}} @test typeof(InverseWishart(5, PDiagMat(ones(Int, 5)))) == - InverseWishart{Float64,PDiagMat{Float64,Vector{Float64}}} + InverseWishart{Float64, PDiagMat{Float64, Vector{Float64}}} end end @@ -467,14 +467,14 @@ import Distributions: _univariate, _multivariate, _rand_params G = LKJ(d, η) M = 10000 α = 0.05 - L = sum(1:(d-1)) + L = sum(1:(d - 1)) ρ = Distributions._marginal(G) mymats = zeros(d, d, M) - for m = 1:M + for m in 1:M mymats[:, :, m] = rand(G) end - for i = 1:d - for j = 1:(i-1) + for i in 1:d + for j in 1:(i - 1) @test pvalue_kolmogorovsmirnoff(mymats[i, j, :], ρ) >= α / L end end @@ -487,9 +487,9 @@ import Distributions: _univariate, _multivariate, _rand_params η = 2.3 lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst(d, η) + Distributions.lkj_onion_loginvconst(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_loginvconst_alt(d, η) + Distributions.lkj_loginvconst_alt(d, η) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) # ============= # odd uniform @@ -498,15 +498,15 @@ import Distributions: _univariate, _multivariate, _rand_params η = 1.0 lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst(d, η) + Distributions.lkj_onion_loginvconst(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) + Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_vine_loginvconst_uniform(d) + Distributions.lkj_vine_loginvconst_uniform(d) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_loginvconst_alt(d, η) + Distributions.lkj_loginvconst_alt(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.corr_logvolume(d) + Distributions.corr_logvolume(d) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) # ============= # even non-uniform @@ -515,9 +515,9 @@ import Distributions: _univariate, _multivariate, _rand_params η = 2.3 lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst(d, η) + Distributions.lkj_onion_loginvconst(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_loginvconst_alt(d, η) + Distributions.lkj_loginvconst_alt(d, η) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) # ============= # even uniform @@ -526,15 +526,15 @@ import Distributions: _univariate, _multivariate, _rand_params η = 1.0 lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst(d, η) + Distributions.lkj_onion_loginvconst(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) + Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) @test Distributions.lkj_vine_loginvconst(d, η) ≈ - Distributions.lkj_vine_loginvconst_uniform(d) + Distributions.lkj_vine_loginvconst_uniform(d) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.lkj_loginvconst_alt(d, η) + Distributions.lkj_loginvconst_alt(d, η) @test Distributions.lkj_onion_loginvconst(d, η) ≈ - Distributions.corr_logvolume(d) + Distributions.corr_logvolume(d) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) end @testset "check integrating constant as a volume" begin @@ -561,11 +561,11 @@ import Distributions: _univariate, _multivariate, _rand_params # ============================================================================= function test_matrixvariate( - dist::Type{<:MatrixDistribution}, - n::Integer, - p::Integer, - M::Integer, - ) + dist::Type{<:MatrixDistribution}, + n::Integer, + p::Integer, + M::Integer, + ) test_distr(dist, n, p, M) test_against_univariate(dist) test_against_multivariate(dist, n, p) diff --git a/test/mixture.jl b/test/mixture.jl index 1a64cbfedd..d5aa40f5e0 100644 --- a/test/mixture.jl +++ b/test/mixture.jl @@ -7,18 +7,18 @@ using ForwardDiff: Dual # Core testing procedure function test_mixture( - g::UnivariateMixture, - n::Int, - ns::Int, - rng::Union{AbstractRNG,Missing} = missing, -) + g::UnivariateMixture, + n::Int, + ns::Int, + rng::Union{AbstractRNG, Missing} = missing, + ) if g isa UnivariateGMM T = eltype(g.means) else T = eltype(typeof(g)) end X = zeros(T, n) - for i = 1:n + for i in 1:n X[i] = rand(g) end @@ -28,21 +28,21 @@ function test_mixture( # mean mu = 0.0 - for k = 1:K + for k in 1:K mu += pr[k] * mean(component(g, k)) end @test @inferred(mean(g)) ≈ mu # evaluation of cdf cf = zeros(T, n) - for k = 1:K + for k in 1:K c_k = component(g, k) - for i = 1:n + for i in 1:n cf[i] += pr[k] * cdf(c_k, X[i]) end end - for i = 1:n + for i in 1:n @test @inferred(cdf(g, X[i])) ≈ cf[i] end @test Base.Fix1(cdf, g).(X) ≈ cf @@ -50,9 +50,9 @@ function test_mixture( # evaluation P0 = zeros(T, n, K) LP0 = zeros(T, n, K) - for k = 1:K + for k in 1:K c_k = component(g, k) - for i = 1:n + for i in 1:n x_i = X[i] P0[i, k] = pdf(c_k, x_i) LP0[i, k] = logpdf(c_k, x_i) @@ -62,7 +62,7 @@ function test_mixture( mix_p0 = P0 * pr mix_lp0 = log.(mix_p0) - for i = 1:n + for i in 1:n @test @inferred(pdf(g, X[i])) ≈ mix_p0[i] @test @inferred(logpdf(g, X[i])) ≈ mix_lp0[i] @test @inferred(componentwise_pdf(g, X[i])) ≈ vec(P0[i, :]) @@ -84,7 +84,7 @@ function test_mixture( # sampling # sampling does not work with `Float32` since `AliasTable` does not support `Float32` # Ref: https://github.com/JuliaStats/StatsBase.jl/issues/158 - if T <: AbstractFloat && eltype(probs(g)) === Float64 + return if T <: AbstractFloat && eltype(probs(g)) === Float64 if ismissing(rng) Xs = rand(g, ns) else @@ -97,13 +97,13 @@ function test_mixture( end function test_mixture( - g::MultivariateMixture, - n::Int, - ns::Int, - rng::Union{AbstractRNG,Missing} = missing, -) + g::MultivariateMixture, + n::Int, + ns::Int, + rng::Union{AbstractRNG, Missing} = missing, + ) X = zeros(length(g), n) - for i = 1:n + for i in 1:n if ismissing(rng) X[:, i] = rand(g) else @@ -117,7 +117,7 @@ function test_mixture( # mean mu = zeros(length(g)) - for k = 1:K + for k in 1:K mu .+= pr[k] .* mean(component(g, k)) end @test @inferred(mean(g)) ≈ mu @@ -125,9 +125,9 @@ function test_mixture( # evaluation P0 = zeros(n, K) LP0 = zeros(n, K) - for k = 1:K + for k in 1:K c_k = component(g, k) - for i = 1:n + for i in 1:n x_i = X[:, i] P0[i, k] = pdf(c_k, x_i) LP0[i, k] = logpdf(c_k, x_i) @@ -137,7 +137,7 @@ function test_mixture( mix_p0 = P0 * pr mix_lp0 = log.(mix_p0) - for i = 1:n + for i in 1:n x_i = X[:, i] @test @inferred(pdf(g, x_i)) ≈ mix_p0[i] @test @inferred(logpdf(g, x_i)) ≈ mix_lp0[i] @@ -164,7 +164,7 @@ function test_mixture( @test size(Xs) == (length(g), ns) @test isapprox(vec(mean(Xs, dims = 2)), mean(g), atol = 0.1) @test isapprox(cov(Xs, dims = 2), cov(g), atol = 0.1) - @test isapprox(var(Xs, dims = 2), var(g), atol = 0.1) + return @test isapprox(var(Xs, dims = 2), var(g), atol = 0.1) end function test_params(g::AbstractMixtureModel) @@ -173,26 +173,26 @@ function test_params(g::AbstractMixtureModel) mm = MixtureModel(C, pars...) @test g.prior == mm.prior @test g.components == mm.components - @test g == deepcopy(g) + return @test g == deepcopy(g) end function test_params(g::UnivariateGMM) pars = params(g) mm = UnivariateGMM(pars...) @test g == mm - @test g == deepcopy(g) + return @test g == deepcopy(g) end # Tests @testset "Testing Mixtures with $key" for (key, rng) in Dict( - "rand(...)" => missing, - "rand(rng, ...)" => MersenneTwister(123), -) + "rand(...)" => missing, + "rand(rng, ...)" => MersenneTwister(123), + ) @testset "Testing UnivariateMixture" begin g_u = MixtureModel([Normal(), Normal()]) - @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) + @test isa(g_u, MixtureModel{Univariate, Continuous, <:Normal}) @test ncomponents(g_u) == 2 test_mixture(g_u, 1000, 10^6, rng) test_params(g_u) @@ -207,7 +207,7 @@ end [(0.0, 1.0), (2.0, 1.0), (-4.0, 1.5)], [0.2, 0.5, 0.3], ) - @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) + @test isa(g_u, MixtureModel{Univariate, Continuous, <:Normal}) @test ncomponents(g_u) == 3 test_mixture(g_u, 1000, 10^6, rng) test_params(g_u) @@ -217,7 +217,7 @@ end g_u = MixtureModel(Normal{Float32}, [(0.0f0, 1.0f0), (0.0f0, 2.0f0)], [0.4f0, 0.6f0]) - @test isa(g_u, MixtureModel{Univariate,Continuous,<:Normal}) + @test isa(g_u, MixtureModel{Univariate, Continuous, <:Normal}) @test ncomponents(g_u) == 2 test_mixture(g_u, 1000, 10^6, rng) test_params(g_u) @@ -227,11 +227,13 @@ end @test @inferred(median(g_u)) === 0.0f0 @test @inferred(quantile(g_u, 0.5f0)) === 0.0f0 - g_u = MixtureModel([ - TriangularDist(-1, 2, 0), - TriangularDist(-0.5, 3, 1), - TriangularDist(-2, 0, -1), - ]) + g_u = MixtureModel( + [ + TriangularDist(-1, 2, 0), + TriangularDist(-0.5, 3, 1), + TriangularDist(-2, 0, -1), + ] + ) @test minimum(g_u) ≈ -2.0 @test maximum(g_u) ≈ 3.0 @test extrema(g_u) == (minimum(g_u), maximum(g_u)) @@ -281,7 +283,7 @@ end ], T[0.2, 0.5, 0.3], ) - @test isa(g_m, MixtureModel{Multivariate,Continuous,IsoNormal}) + @test isa(g_m, MixtureModel{Multivariate, Continuous, IsoNormal}) @test length(components(g_m)) == 3 @test length(g_m) == 2 @test insupport(g_m, [0.0, 0.0]) @@ -297,7 +299,7 @@ end unif_mixt = MixtureModel([u1, u2]) @test var(utot) ≈ var(unif_mixt) @test mean(utot) ≈ mean(unif_mixt) - for x = -1.0:0.5:2.5 + for x in -1.0:0.5:2.5 @test cdf(utot, x) ≈ cdf(utot, x) end end diff --git a/test/multivariate/dirichlet.jl b/test/multivariate/dirichlet.jl index 82ca5b22c8..568c779802 100644 --- a/test/multivariate/dirichlet.jl +++ b/test/multivariate/dirichlet.jl @@ -11,9 +11,9 @@ Random.seed!(34567) rng = MersenneTwister(123) @testset "Testing Dirichlet with $key" for (key, func) in Dict( - "rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], -) + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) for T in (Int, Float64) d = Dirichlet(3, T(2)) @@ -47,7 +47,7 @@ rng = MersenneTwister(123) x = func[2](d, 100) p = pdf(d, x) lp = logpdf(d, x) - for i = 1:size(x, 2) + for i in 1:size(x, 2) @test lp[i] ≈ logpdf(d, x[:, i]) @test p[i] ≈ pdf(d, x[:, i]) end @@ -77,7 +77,7 @@ rng = MersenneTwister(123) x = func[2](d, 100) p = pdf(d, x) lp = logpdf(d, x) - for i = 1:size(x, 2) + for i in 1:size(x, 2) @test p[i] ≈ pdf(d, x[:, i]) @test lp[i] ≈ logpdf(d, x[:, i]) end @@ -152,12 +152,12 @@ end # Use special finite differencing method that tries to avoid moving outside of the # support by limiting the range of the points around the input that are evaluated - fdm = central_fdm(5, 1; max_range = 1e-9) + fdm = central_fdm(5, 1; max_range = 1.0e-9) for x in (x1, x2) # We have to adjust the tolerance since the finite differencing method is rough - test_frule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1e-5, nans = true) - test_rrule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1e-5, nans = true) + test_frule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1.0e-5, nans = true) + test_rrule(Distributions._logpdf, d, x; fdm = fdm, rtol = 1.0e-5, nans = true) end end end diff --git a/test/multivariate/dirichletmultinomial.jl b/test/multivariate/dirichletmultinomial.jl index 04d0a656f2..1c7796c6db 100644 --- a/test/multivariate/dirichletmultinomial.jl +++ b/test/multivariate/dirichletmultinomial.jl @@ -9,9 +9,9 @@ Random.seed!(123) rng = MersenneTwister(123) @testset "Testing DirichletMultinomial with $key" for (key, func) in Dict( - "rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], -) + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) # Test Constructors d = DirichletMultinomial(10, ones(5)) @@ -54,11 +54,11 @@ rng = MersenneTwister(123) for x in (2 * ones(5), [1, 2, 3, 4, 0], [3.0, 0.0, 3.0, 0.0, 4.0], [0, 0, 0, 0, 10]) @test pdf(d, x) ≈ - factorial(d.n) * gamma(d.α0) / gamma(d.n + d.α0) * - prod(gamma.(d.α .+ x) ./ (gamma.(x .+ 1) .* gamma.(d.α))) + factorial(d.n) * gamma(d.α0) / gamma(d.n + d.α0) * + prod(gamma.(d.α .+ x) ./ (gamma.(x .+ 1) .* gamma.(d.α))) @test logpdf(d, x) ≈ - logfactorial(d.n) + loggamma(d.α0) - loggamma(d.n + d.α0) + - sum(loggamma, d.α + x) - sum(loggamma, d.α) - sum(loggamma.(x .+ 1)) + logfactorial(d.n) + loggamma(d.α0) - loggamma(d.n + d.α0) + + sum(loggamma, d.α + x) - sum(loggamma, d.α) - sum(loggamma.(x .+ 1)) end # test Sampling diff --git a/test/multivariate/jointorderstatistics.jl b/test/multivariate/jointorderstatistics.jl index 950bc4b878..caeecbd694 100644 --- a/test/multivariate/jointorderstatistics.jl +++ b/test/multivariate/jointorderstatistics.jl @@ -33,14 +33,14 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test end @testset for T in [Float32, Float64], - dist in [Uniform(T(2), T(10)), Exponential(T(10)), Normal(T(100), T(10))], - n in [16, 40], - r in [ - 1:n, - ([i, j] for j = 2:n for i = 1:min(10, j-1))..., - vcat(2:4, (n-10):(n-5)), - (2, n ÷ 2, n - 5), - ] + dist in [Uniform(T(2), T(10)), Exponential(T(10)), Normal(T(100), T(10))], + n in [16, 40], + r in [ + 1:n, + ([i, j] for j in 2:n for i in 1:min(10, j - 1))..., + vcat(2:4, (n - 10):(n - 5)), + (2, n ÷ 2, n - 5), + ] d = JointOrderStatistics(dist, n, r) @@ -65,7 +65,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test insupport(d, x) if length(x) > 1 @test !insupport(d, reverse(x)) - @test !insupport(d, x[1:(end-1)]) + @test !insupport(d, x[1:(end - 1)]) end @test !insupport(d, x2) @test !insupport(d, fill(NaN, length(x))) @@ -84,15 +84,15 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test xi, xj = x lc = T( logfactorial(n) - logfactorial(i - 1) - logfactorial(n - j) - - logfactorial(j - i - 1), + logfactorial(j - i - 1), ) lp = ( lc + - (i - 1) * logcdf(dist, xi) + - (n - j) * logccdf(dist, xj) + - (j - i - 1) * logdiffcdf(dist, xj, xi) + - logpdf(dist, xi) + - logpdf(dist, xj) + (i - 1) * logcdf(dist, xi) + + (n - j) * logccdf(dist, xj) + + (j - i - 1) * logdiffcdf(dist, xj, xi) + + logpdf(dist, xi) + + logpdf(dist, xj) ) @test logpdf(d, x) ≈ lp @test pdf(d, x) ≈ exp(lp) @@ -109,7 +109,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test pdf(d, x2) == zero(T) x3 = copy(x) - x3[end-1], x3[end] = x3[end], x3[end-1] + x3[end - 1], x3[end] = x3[end], x3[end - 1] @test logpdf(d, x3) == T(-Inf) @test pdf(d, x3) == zero(T) @@ -188,8 +188,8 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test n = 100 rs = [ # good mixture of r values with gaps and no gaps 1:n, - vcat(1:10, (div(n, 2)-5):(div(n, 2)+5), (n-9):n), - vcat(10:20, (n-19):(n-10)), + vcat(1:10, (div(n, 2) - 5):(div(n, 2) + 5), (n - 9):n), + vcat(10:20, (n - 19):(n - 10)), (1, n), ] @@ -217,7 +217,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test v = [sum(k -> inv((n - k + 1)^2), 1:i) for i in r] xcor_exact = Symmetric(sqrt.(v ./ v')) end - for ii = 1:m, ji = (ii+1):m + for ii in 1:m, ji in (ii + 1):m i = r[ii] j = r[ji] ρ = xcor[ii, ji] diff --git a/test/multivariate/multinomial.jl b/test/multivariate/multinomial.jl index 622c7e5619..e9530917f3 100644 --- a/test/multivariate/multinomial.jl +++ b/test/multivariate/multinomial.jl @@ -11,9 +11,9 @@ using Test d = Multinomial(nt, p) @testset "Testing Multinomial with $key" for (key, func) in Dict( - "rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], - ) + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) # Basics @@ -29,17 +29,17 @@ using Test @test partype(d) === T # Conversion - @test typeof(d) === Multinomial{T,Vector{T}} + @test typeof(d) === Multinomial{T, Vector{T}} for S in (Float16, Float32, Float64) S === T && continue - @test typeof(convert(Multinomial{S}, d)) === Multinomial{S,Vector{S}} - @test typeof(convert(Multinomial{S,Vector{S}}, d)) === Multinomial{S,Vector{S}} - @test typeof(convert(Multinomial{S}, params(d)...)) === Multinomial{S,Vector{S}} - @test typeof(convert(Multinomial{S,Vector{S}}, params(d)...)) == - Multinomial{S,Vector{S}} + @test typeof(convert(Multinomial{S}, d)) === Multinomial{S, Vector{S}} + @test typeof(convert(Multinomial{S, Vector{S}}, d)) === Multinomial{S, Vector{S}} + @test typeof(convert(Multinomial{S}, params(d)...)) === Multinomial{S, Vector{S}} + @test typeof(convert(Multinomial{S, Vector{S}}, params(d)...)) == + Multinomial{S, Vector{S}} end @test convert(Multinomial{T}, d) === d - @test convert(Multinomial{T,Vector{T}}, d) === d + @test convert(Multinomial{T, Vector{T}}, d) === d # random sampling @@ -72,7 +72,7 @@ using Test x = func[2](d, 100) pv = pdf(d, x) lp = logpdf(d, x) - for i = 1:size(x, 2) + for i in 1:size(x, 2) @test pv[i] ≈ pdf(d, x[:, i]) @test lp[i] ≈ logpdf(d, x[:, i]) end @@ -103,13 +103,13 @@ using Test @test isa(ss, Distributions.MultinomialStats) @test ss.n == nt @test ss.scnts == - vec(sum(T[x[i, j] for i = 1:size(x, 1), j = 1:size(x, 2)], dims = 2)) + vec(sum(T[x[i, j] for i in 1:size(x, 1), j in 1:size(x, 2)], dims = 2)) @test ss.tw == n0 ss = suffstats(Multinomial, x, w) @test isa(ss, Distributions.MultinomialStats) @test ss.n == nt - @test ss.scnts ≈ T[x[i, j] for i = 1:size(x, 1), j = 1:size(x, 2)] * w + @test ss.scnts ≈ T[x[i, j] for i in 1:size(x, 1), j in 1:size(x, 2)] * w @test ss.tw ≈ sum(w) # fit @@ -148,15 +148,15 @@ using Test # Abstract vector p - @test typeof(Multinomial(nt, SVector{length(p),T}(p))) == - Multinomial{T,SVector{3,T}} + @test typeof(Multinomial(nt, SVector{length(p), T}(p))) == + Multinomial{T, SVector{3, T}} end @testset "Testing Multinomial with $key" for (key, func) in Dict( - "rand!(...)" => (dist, X) -> rand!(dist, X), - "rand!(rng, ...)" => (dist, X) -> rand!(rng, dist, X), - ) + "rand!(...)" => (dist, X) -> rand!(dist, X), + "rand!(rng, ...)" => (dist, X) -> rand!(rng, dist, X), + ) # random sampling X = Matrix{Int}(undef, length(p), 100) x = @inferred(func(d, X)) @@ -166,16 +166,16 @@ using Test @test all(insupport(d, x)) pv = pdf(d, x) lp = logpdf(d, x) - for i = 1:size(x, 2) + for i in 1:size(x, 2) @test pv[i] ≈ pdf(d, x[:, i]) @test lp[i] ≈ logpdf(d, x[:, i]) end end @testset "Testing Multinomial with $key" for (key, func) in Dict( - "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), - "rand!(rng, ..., true)" => (dist, X) -> rand!(rng, dist, X, true), - ) + "rand!(..., true)" => (dist, X) -> rand!(dist, X, true), + "rand!(rng, ..., true)" => (dist, X) -> rand!(rng, dist, X, true), + ) # random sampling X = Vector{Vector{Int}}(undef, 100) x = @inferred(func(d, X)) @@ -185,9 +185,9 @@ using Test end @testset "Testing Multinomial with $key" for (key, func) in Dict( - "rand!(..., false)" => (dist, X) -> rand!(dist, X, false), - "rand!(rng, ..., false)" => (dist, X) -> rand!(rng, dist, X, false), - ) + "rand!(..., false)" => (dist, X) -> rand!(dist, X, false), + "rand!(rng, ..., false)" => (dist, X) -> rand!(rng, dist, X, false), + ) # random sampling X = [Vector{Int}(undef, length(p)) for _ in Base.OneTo(100)] x1 = X[1] diff --git a/test/multivariate/mvlogitnormal.jl b/test/multivariate/mvlogitnormal.jl index 22d11a4fc3..76d6a06b9a 100644 --- a/test/multivariate/mvlogitnormal.jl +++ b/test/multivariate/mvlogitnormal.jl @@ -33,13 +33,13 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int = 10^6) T = partype(d) <: Float64 ? Float32 : Float64 if dnorm isa MvNormal @test convert(MvLogitNormal{MvNormal{T}}, d).normal == - convert(MvNormal{T}, dnorm) + convert(MvNormal{T}, dnorm) @test partype(convert(MvLogitNormal{MvNormal{T}}, d)) <: T @test canonform(d) isa MvLogitNormal{<:MvNormalCanon} @test canonform(d).normal == canonform(dnorm) elseif dnorm isa MvNormalCanon @test convert(MvLogitNormal{MvNormalCanon{T}}, d).normal == - convert(MvNormalCanon{T}, dnorm) + convert(MvNormalCanon{T}, dnorm) @test partype(convert(MvLogitNormal{MvNormalCanon{T}}, d)) <: T @test meanform(d) isa MvLogitNormal{<:MvNormal} @test meanform(d).normal == meanform(dnorm) @@ -48,17 +48,17 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int = 10^6) @testset "sampling" begin X = rand(d, nsamples) - Y = @views log.(X[1:(end-1), :]) .- log.(X[end, :]') + Y = @views log.(X[1:(end - 1), :]) .- log.(X[end, :]') Ymean = vec(mean(Y; dims = 2)) Ycov = cov(Y; dims = 2) - for i = 1:(length(d)-1) + for i in 1:(length(d) - 1) @test isapprox( Ymean[i], mean(dnorm)[i], atol = sqrt(var(dnorm)[i] / nsamples) * 8, ) end - for i = 1:(length(d)-1), j = 1:(length(d)-1) + for i in 1:(length(d) - 1), j in 1:(length(d) - 1) @test isapprox( Ycov[i, j], cov(dnorm)[i, j], @@ -71,14 +71,14 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int = 10^6) X = rand(d, nsamples) dfit = fit_mle(MvLogitNormal, X) dfit_norm = dfit.normal - for i = 1:(length(d)-1) + for i in 1:(length(d) - 1) @test isapprox( mean(dfit_norm)[i], mean(dnorm)[i], atol = sqrt(var(dnorm)[i] / nsamples) * 8, ) end - for i = 1:(length(d)-1), j = 1:(length(d)-1) + for i in 1:(length(d) - 1), j in 1:(length(d) - 1) @test isapprox( cov(dfit_norm)[i, j], cov(dnorm)[i, j], @@ -88,13 +88,13 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int = 10^6) @test fit_mle(MvLogitNormal{IsoNormal}, X) isa MvLogitNormal{<:IsoNormal} end - @testset "evaluation" begin + return @testset "evaluation" begin X = rand(d, nsamples) - for i = 1:min(100, nsamples) + for i in 1:min(100, nsamples) @test @inferred(logpdf(d, X[:, i])) ≈ log(pdf(d, X[:, i])) if dnorm isa MvNormal @test @inferred(gradlogpdf(d, X[:, i])) ≈ - ForwardDiff.gradient(x -> logpdf(d, x), X[:, i]) + ForwardDiff.gradient(x -> logpdf(d, x), X[:, i]) end end @test logpdf(d, X) ≈ log.(pdf(d, X)) @@ -150,13 +150,13 @@ end @testset "show" begin d = MvLogitNormal([1.0, 2.0, 3.0], Diagonal([4.0, 5.0, 6.0])) @test sprint(show, d) === """ - MvLogitNormal{DiagNormal}( - DiagNormal( - dim: 3 - μ: [1.0, 2.0, 3.0] - Σ: [4.0 0.0 0.0; 0.0 5.0 0.0; 0.0 0.0 6.0] - ) - ) - """ + MvLogitNormal{DiagNormal}( + DiagNormal( + dim: 3 + μ: [1.0, 2.0, 3.0] + Σ: [4.0 0.0 0.0; 0.0 5.0 0.0; 0.0 0.0 6.0] + ) + ) + """ end end diff --git a/test/multivariate/mvlognormal.jl b/test/multivariate/mvlognormal.jl index d6d33e5556..0f0ef5a593 100644 --- a/test/multivariate/mvlognormal.jl +++ b/test/multivariate/mvlognormal.jl @@ -4,14 +4,13 @@ using Distributions, FillArrays, PDMats using LinearAlgebra, Random, Test - ####### Core testing procedure function test_mvlognormal( - g::MvLogNormal, - n_tsamples::Int = 10^6, - rng::Union{AbstractRNG,Missing} = missing, -) + g::MvLogNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG, Missing} = missing, + ) d = length(g) mn = mean(g) md = median(g) @@ -21,8 +20,8 @@ function test_mvlognormal( e = entropy(g) @test partype(g) == Float64 @test isa(mn, Vector{Float64}) - if g.normal.μ isa Zeros{Float64,1} - @test md isa FillArrays.AbstractFill{Float64,1} + if g.normal.μ isa Zeros{Float64, 1} + @test md isa FillArrays.AbstractFill{Float64, 1} else @test md isa Vector{Float64} end @@ -40,7 +39,7 @@ function test_mvlognormal( @test mn ≈ exp.(mean(g.normal) .+ var(g.normal) / 2) @test mo ≈ exp.(mean(g.normal) .- var(g.normal)) @test entropy(g) ≈ - d * (1 + Distributions.log2π) / 2 + logdetcov(g.normal) / 2 + sum(mean(g.normal)) + d * (1 + Distributions.log2π) / 2 + logdetcov(g.normal) / 2 + sum(mean(g.normal)) gg = typeof(g)(MvNormal(params(g)...)) @test Vector(g.normal.μ) == Vector(gg.normal.μ) @test Matrix(g.normal.Σ) == Matrix(gg.normal.Σ) @@ -58,13 +57,13 @@ function test_mvlognormal( emp_md = vec(median(X, dims = 2)) Z = X .- emp_mn emp_cov = (Z * Z') ./ n_tsamples - for i = 1:d + for i in 1:d @test isapprox(emp_mn[i], mn[i], atol = (sqrt(s[i] / n_tsamples) * 8.0)) end - for i = 1:d + for i in 1:d @test isapprox(emp_md[i], md[i], atol = (sqrt(s[i] / n_tsamples) * 8.0)) end - for i = 1:d, j = 1:d + for i in 1:d, j in 1:d @test isapprox( emp_cov[i, j], S[i, j], @@ -73,7 +72,7 @@ function test_mvlognormal( end # evaluation of logpdf and pdf - for i = 1:min(100, n_tsamples) + for i in 1:min(100, n_tsamples) @test logpdf(g, X[:, i]) ≈ log(pdf(g, X[:, i])) end @test logpdf(g, X) ≈ log.(pdf(g, X)) @@ -86,57 +85,57 @@ function test_mvlognormal( @test isapprox( location(g), location(MvLogNormal, :meancov, mean(g), cov(g)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location(MvLogNormal, :mean, mean(g), scale(g)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location(MvLogNormal, :median, median(g), scale(g)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location(MvLogNormal, :mode, mode(g), scale(g)), - atol = 1e-8, + atol = 1.0e-8, ) - @test isapprox(scale(g), scale(MvLogNormal, :meancov, mean(g), cov(g)), atol = 1e-8) + @test isapprox(scale(g), scale(MvLogNormal, :meancov, mean(g), cov(g)), atol = 1.0e-8) @test isapprox( location(g), location!(MvLogNormal, :meancov, mean(g), cov(g), zero(mn)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location!(MvLogNormal, :mean, mean(g), scale(g), zero(mn)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location!(MvLogNormal, :median, median(g), scale(g), zero(mn)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( location(g), location!(MvLogNormal, :mode, mode(g), scale(g), zero(mn)), - atol = 1e-8, + atol = 1.0e-8, ) @test isapprox( scale(g), Distributions.scale!(MvLogNormal, :meancov, mean(g), cov(g), zero(S)), - atol = 1e-8, + atol = 1.0e-8, ) lc1, sc1 = params(MvLogNormal, mean(g), cov(g)) lc2, sc2 = params!(MvLogNormal, mean(g), cov(g), similar(mn), similar(S)) - @test isapprox(location(g), lc1, atol = 1e-8) - @test isapprox(location(g), lc2, atol = 1e-8) - @test isapprox(scale(g), sc1, atol = 1e-8) - @test isapprox(scale(g), sc2, atol = 1e-8) + @test isapprox(location(g), lc1, atol = 1.0e-8) + @test isapprox(location(g), lc2, atol = 1.0e-8) + @test isapprox(scale(g), sc1, atol = 1.0e-8) + return @test isapprox(scale(g), sc2, atol = 1.0e-8) end ####### Validate results for a single-dimension MvLogNormal by comparing with univariate LogNormal @@ -162,26 +161,26 @@ end va = [0.16, 0.25, 0.36] C = [0.4 -0.2 -0.1; -0.2 0.5 -0.1; -0.1 -0.1 0.6] for (g, μ, Σ) in [ - (MvLogNormal(mu, PDMats.PDMat(C)), mu, C), - (MvLogNormal(mu_r, PDMats.PDMat(C)), mu_r, C), - (MvLogNormal(PDMats.PDiagMat(sqrt.(va))), zeros(3), Matrix(Diagonal(va))), - (@test_deprecated(MvLogNormal(mu, sqrt(0.2))), mu, Matrix(0.2I, 3, 3)), - (@test_deprecated(MvLogNormal(3, sqrt(0.2))), zeros(3), Matrix(0.2I, 3, 3)), - ( - @test_deprecated(MvLogNormal(mu, Vector{Float64}(sqrt.(va)))), - mu, - Matrix(Diagonal(va)), - ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 - ( - @test_deprecated(MvLogNormal(Vector{Float64}(sqrt.(va)))), - zeros(3), - Matrix(Diagonal(va)), - ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 - (MvLogNormal(mu, C), mu, C), - (MvLogNormal(mu, Diagonal(C)), mu, Diagonal(C)), - (MvLogNormal(mu, Symmetric(Diagonal(C))), mu, Diagonal(C)), - (MvLogNormal(C), zeros(3), C), - ] + (MvLogNormal(mu, PDMats.PDMat(C)), mu, C), + (MvLogNormal(mu_r, PDMats.PDMat(C)), mu_r, C), + (MvLogNormal(PDMats.PDiagMat(sqrt.(va))), zeros(3), Matrix(Diagonal(va))), + (@test_deprecated(MvLogNormal(mu, sqrt(0.2))), mu, Matrix(0.2I, 3, 3)), + (@test_deprecated(MvLogNormal(3, sqrt(0.2))), zeros(3), Matrix(0.2I, 3, 3)), + ( + @test_deprecated(MvLogNormal(mu, Vector{Float64}(sqrt.(va)))), + mu, + Matrix(Diagonal(va)), + ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 + ( + @test_deprecated(MvLogNormal(Vector{Float64}(sqrt.(va)))), + zeros(3), + Matrix(Diagonal(va)), + ), # Julia 0.4 loses type information so Vector{Float64} can be dropped when we don't support 0.4 + (MvLogNormal(mu, C), mu, C), + (MvLogNormal(mu, Diagonal(C)), mu, Diagonal(C)), + (MvLogNormal(mu, Symmetric(Diagonal(C))), mu, Diagonal(C)), + (MvLogNormal(C), zeros(3), C), + ] m, s = params(g) @test Vector(m) ≈ μ test_mvlognormal(g, 10^4) @@ -189,8 +188,8 @@ end d = MvLogNormal(Array{Float32}(mu), PDMats.PDMat(Array{Float32}(C))) @test convert(MvLogNormal{Float32}, d) === d @test typeof(convert(MvLogNormal{Float64}, d)) == - typeof(MvLogNormal(mu, PDMats.PDMat(C))) + typeof(MvLogNormal(mu, PDMats.PDMat(C))) @test typeof(convert(MvLogNormal{Float64}, d.normal.μ, d.normal.Σ)) == - typeof(MvLogNormal(mu, PDMats.PDMat(C))) + typeof(MvLogNormal(mu, PDMats.PDMat(C))) @test d == deepcopy(d) end diff --git a/test/multivariate/mvnormal.jl b/test/multivariate/mvnormal.jl index 841b9fb876..e42bcf605c 100644 --- a/test/multivariate/mvnormal.jl +++ b/test/multivariate/mvnormal.jl @@ -23,36 +23,36 @@ using FillArrays J = [4.0 -2.0 -1.0; -2.0 5.0 -1.0; -1.0 -1.0 6.0] D = Diagonal(J) for (g, μ, Σ) in [ - (@test_deprecated(MvNormal(mu, sqrt(2.0))), mu, Matrix(2.0I, 3, 3)), - (@test_deprecated(MvNormal(mu_r, sqrt(2.0))), mu_r, Matrix(2.0I, 3, 3)), - (@test_deprecated(MvNormal(3, sqrt(2.0))), zeros(3), Matrix(2.0I, 3, 3)), - (@test_deprecated(MvNormal(mu, sqrt.(va))), mu, Matrix(Diagonal(va))), - (@test_deprecated(MvNormal(mu_r, sqrt.(va))), mu_r, Matrix(Diagonal(va))), - (@test_deprecated(MvNormal(sqrt.(va))), zeros(3), Matrix(Diagonal(va))), - (MvNormal(mu, C), mu, C), - (MvNormal(mu_r, C), mu_r, C), - (MvNormal(C), zeros(3), C), - (MvNormal(Symmetric(C)), zeros(3), Matrix(Symmetric(C))), - (MvNormal(Diagonal(dv)), zeros(3), Matrix(Diagonal(dv))), - (@test_deprecated(MvNormalCanon(h, 2.0)), h ./ 2.0, Matrix(0.5I, 3, 3)), - (@test_deprecated(MvNormalCanon(mu_r, 2.0)), mu_r ./ 2.0, Matrix(0.5I, 3, 3)), - (@test_deprecated(MvNormalCanon(3, 2.0)), zeros(3), Matrix(0.5I, 3, 3)), - (@test_deprecated(MvNormalCanon(h, dv)), h ./ dv, Matrix(Diagonal(inv.(dv)))), - (@test_deprecated(MvNormalCanon(mu_r, dv)), mu_r ./ dv, Matrix(Diagonal(inv.(dv)))), - (@test_deprecated(MvNormalCanon(dv)), zeros(3), Matrix(Diagonal(inv.(dv)))), - (MvNormalCanon(h, J), J \ h, inv(J)), - (MvNormalCanon(J), zeros(3), inv(J)), - (MvNormalCanon(h, D), Diagonal(D) \ h, inv(D)), - (MvNormalCanon(D), zeros(3), inv(D)), - (MvNormalCanon(h, Symmetric(D)), D \ h, inv(D)), - (MvNormalCanon(Hermitian(D)), zeros(3), inv(D)), - (MvNormal(mu, Symmetric(C)), mu, Matrix(Symmetric(C))), - (MvNormal(mu_r, Symmetric(C)), mu_r, Matrix(Symmetric(C))), - (MvNormal(mu, Diagonal(dv)), mu, Matrix(Diagonal(dv))), - (MvNormal(mu, Symmetric(Diagonal(dv))), mu, Matrix(Diagonal(dv))), - (MvNormal(mu, Hermitian(Diagonal(dv))), mu, Matrix(Diagonal(dv))), - (MvNormal(mu_r, Diagonal(dv)), mu_r, Matrix(Diagonal(dv))), - ] + (@test_deprecated(MvNormal(mu, sqrt(2.0))), mu, Matrix(2.0I, 3, 3)), + (@test_deprecated(MvNormal(mu_r, sqrt(2.0))), mu_r, Matrix(2.0I, 3, 3)), + (@test_deprecated(MvNormal(3, sqrt(2.0))), zeros(3), Matrix(2.0I, 3, 3)), + (@test_deprecated(MvNormal(mu, sqrt.(va))), mu, Matrix(Diagonal(va))), + (@test_deprecated(MvNormal(mu_r, sqrt.(va))), mu_r, Matrix(Diagonal(va))), + (@test_deprecated(MvNormal(sqrt.(va))), zeros(3), Matrix(Diagonal(va))), + (MvNormal(mu, C), mu, C), + (MvNormal(mu_r, C), mu_r, C), + (MvNormal(C), zeros(3), C), + (MvNormal(Symmetric(C)), zeros(3), Matrix(Symmetric(C))), + (MvNormal(Diagonal(dv)), zeros(3), Matrix(Diagonal(dv))), + (@test_deprecated(MvNormalCanon(h, 2.0)), h ./ 2.0, Matrix(0.5I, 3, 3)), + (@test_deprecated(MvNormalCanon(mu_r, 2.0)), mu_r ./ 2.0, Matrix(0.5I, 3, 3)), + (@test_deprecated(MvNormalCanon(3, 2.0)), zeros(3), Matrix(0.5I, 3, 3)), + (@test_deprecated(MvNormalCanon(h, dv)), h ./ dv, Matrix(Diagonal(inv.(dv)))), + (@test_deprecated(MvNormalCanon(mu_r, dv)), mu_r ./ dv, Matrix(Diagonal(inv.(dv)))), + (@test_deprecated(MvNormalCanon(dv)), zeros(3), Matrix(Diagonal(inv.(dv)))), + (MvNormalCanon(h, J), J \ h, inv(J)), + (MvNormalCanon(J), zeros(3), inv(J)), + (MvNormalCanon(h, D), Diagonal(D) \ h, inv(D)), + (MvNormalCanon(D), zeros(3), inv(D)), + (MvNormalCanon(h, Symmetric(D)), D \ h, inv(D)), + (MvNormalCanon(Hermitian(D)), zeros(3), inv(D)), + (MvNormal(mu, Symmetric(C)), mu, Matrix(Symmetric(C))), + (MvNormal(mu_r, Symmetric(C)), mu_r, Matrix(Symmetric(C))), + (MvNormal(mu, Diagonal(dv)), mu, Matrix(Diagonal(dv))), + (MvNormal(mu, Symmetric(Diagonal(dv))), mu, Matrix(Diagonal(dv))), + (MvNormal(mu, Hermitian(Diagonal(dv))), mu, Matrix(Diagonal(dv))), + (MvNormal(mu_r, Diagonal(dv)), mu_r, Matrix(Diagonal(dv))), + ] @test mean(g) ≈ μ @test cov(g) ≈ Σ @@ -86,16 +86,16 @@ end @test typeof(MvNormal(mu, PDMat(Array{Float32}(C)))) == typeof(MvNormal(mu, PDMat(C))) @test typeof(MvNormal(mu, Array{Float32}(C))) == typeof(MvNormal(mu, PDMat(C))) @test typeof(@test_deprecated(MvNormal(mu, 2.0f0))) == - typeof(@test_deprecated(MvNormal(mu, 2.0))) + typeof(@test_deprecated(MvNormal(mu, 2.0))) @test typeof(MvNormalCanon(h, PDMat(Array{Float32}(J)))) == - typeof(MvNormalCanon(h, PDMat(J))) + typeof(MvNormalCanon(h, PDMat(J))) @test typeof(MvNormalCanon(h, Array{Float32}(J))) == typeof(MvNormalCanon(h, PDMat(J))) @test typeof(@test_deprecated(MvNormalCanon(h, 2.0f0))) == - typeof(@test_deprecated(MvNormalCanon(h, 2.0))) + typeof(@test_deprecated(MvNormalCanon(h, 2.0))) @test typeof(MvNormalCanon(mu, Array{Float16}(h), PDMat(Array{Float32}(J)))) == - typeof(MvNormalCanon(mu, h, PDMat(J))) + typeof(MvNormalCanon(mu, h, PDMat(J))) d = MvNormal(Array{Float32}(mu), PDMat(Array{Float32}(C))) @test convert(MvNormal{Float32}, d) === d @@ -105,9 +105,9 @@ end d = MvNormalCanon(Array{Float32}(mu), Array{Float32}(h), PDMat(Array{Float32}(J))) @test convert(MvNormalCanon{Float32}, d) === d @test typeof(convert(MvNormalCanon{Float64}, d)) == - typeof(MvNormalCanon(mu, h, PDMat(J))) + typeof(MvNormalCanon(mu, h, PDMat(J))) @test typeof(convert(MvNormalCanon{Float64}, d.μ, d.h, d.J)) == - typeof(MvNormalCanon(mu, h, PDMat(J))) + typeof(MvNormalCanon(mu, h, PDMat(J))) @test MvNormal(mu, I) === @test_deprecated(MvNormal(mu, 1)) @test MvNormal(mu, 9 * I) === @test_deprecated(MvNormal(mu, 3)) @@ -120,14 +120,14 @@ end @test MvNormalCanon(h, I) == MvNormalCanon(h, Diagonal(Ones(length(h)))) @test MvNormalCanon(h, 9 * I) == MvNormalCanon(h, Diagonal(Fill(9, length(h)))) @test MvNormalCanon(h, 0.25f0 * I) == - MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h)))) + MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h)))) @test typeof(MvNormalCanon(h, I)) === - typeof(MvNormalCanon(h, Diagonal(Ones(length(h))))) + typeof(MvNormalCanon(h, Diagonal(Ones(length(h))))) @test typeof(MvNormalCanon(h, 9 * I)) === - typeof(MvNormalCanon(h, Diagonal(Fill(9, length(h))))) + typeof(MvNormalCanon(h, Diagonal(Fill(9, length(h))))) @test typeof(MvNormalCanon(h, 0.25f0 * I)) === - typeof(MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h))))) + typeof(MvNormalCanon(h, Diagonal(Fill(0.25f0, length(h))))) end @testset "MvNormal 32-bit logpdf" begin @@ -167,7 +167,7 @@ if isdefined(PDMats, :PDSparseMat) dists = [d_prec_sparse, d_prec_dense, d_cov_dense] samples = [x_prec_sparse, x_prec_dense, x_cov_dense] - tol = 1e-16 + tol = 1.0e-16 se = sqrt.(diag(Σ) ./ nsamp) #= The cholesky decomposition of sparse matrices is performed by `SuiteSparse.CHOLMOD`, @@ -179,10 +179,10 @@ if isdefined(PDMats, :PDSparseMat) identical. As a result, these tests only check for approximate statistical equality, rather than strict numerical equality of the samples. =# - for i = 1:3, j = 1:3 + for i in 1:3, j in 1:3 @test all(abs.(mean(samples[i]) .- μ) .< 2se) - loglik_ii = [logpdf(dists[i], samples[i][:, k]) for k = 1:100_000] - loglik_ji = [logpdf(dists[j], samples[i][:, k]) for k = 1:100_000] + loglik_ii = [logpdf(dists[i], samples[i][:, k]) for k in 1:100_000] + loglik_ji = [logpdf(dists[j], samples[i][:, k]) for k in 1:100_000] # test average likelihood ratio between distribution i and sample j are small @test mean((loglik_ii .- loglik_ji) .^ 2) < tol end @@ -254,14 +254,14 @@ end @testset "MvNormal affine transformations" begin @testset "moment identities" begin - for n = 1:5 # dimension + for n in 1:5 # dimension # distribution μ = randn(n) for Σ in ( - randn(n, n) |> A -> A * A', # dense - Diagonal(abs2.(randn(n))), # diagonal - abs2(randn()) * I, - ) # scaled unit + randn(n, n) |> A -> A * A', # dense + Diagonal(abs2.(randn(n))), # diagonal + abs2(randn()) * I, + ) # scaled unit d = MvNormal(μ, Σ) # random arrays for transformations diff --git a/test/multivariate/mvtdist.jl b/test/multivariate/mvtdist.jl index 540b62e838..a5638bb4c2 100644 --- a/test/multivariate/mvtdist.jl +++ b/test/multivariate/mvtdist.jl @@ -26,7 +26,7 @@ import PDMats: PDMat -5.4585441614404803801, ] df = [1.0, 2, 3, 5, 10] - for i = 1:length(df) + for i in 1:length(df) d = MvTDist(df[i], mu, Sigma) @test isapprox(logpdf(d, [-2.0, 3]), rvalues[i], atol = 1.0e-8) dd = typeof(d)(params(d)...) @@ -37,17 +37,17 @@ import PDMats: PDMat # test constructors for mixed inputs: @test typeof(GenericMvTDist(1, Vector{Float32}(mu), PDMat(Sigma))) == - typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) + typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) @test typeof(GenericMvTDist(1, mu, PDMat(Array{Float32}(Sigma)))) == - typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) + typeof(GenericMvTDist(1.0, mu, PDMat(Sigma))) d = GenericMvTDist(1, Array{Float32}(mu), PDMat(Array{Float32}(Sigma))) @test convert(GenericMvTDist{Float32}, d) === d @test typeof(convert(GenericMvTDist{Float64}, d)) == - typeof(GenericMvTDist(1, mu, PDMat(Sigma))) + typeof(GenericMvTDist(1, mu, PDMat(Sigma))) @test typeof(convert(GenericMvTDist{Float64}, d.df, d.dim, d.μ, d.Σ)) == - typeof(GenericMvTDist(1, mu, PDMat(Sigma))) + typeof(GenericMvTDist(1, mu, PDMat(Sigma))) @test partype(d) == Float32 @test d == deepcopy(d) @@ -61,7 +61,7 @@ import PDMats: PDMat # depends on PDMats#101 (merged but not released) # Sigma_static = @SMatrix [4. 2; 2 3] - for i = 1:length(df) + for i in 1:length(df) d = GenericMvTDist(df[i], mu_static, PDMat(Sigma)) d32 = convert(GenericMvTDist{Float32}, d) @test d.μ isa SVector diff --git a/test/multivariate/product.jl b/test/multivariate/product.jl index ba5a4ccc28..a1ea9e584a 100644 --- a/test/multivariate/product.jl +++ b/test/multivariate/product.jl @@ -69,7 +69,7 @@ using Distributions: Product for a in ([0, 1], [-0.5, 0.5]) # Construct independent distributions and `Product` distribution from these. - ds = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ = 1:N] + ds = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N] x = rand.(ds) d_product = product_distribution(ds) @test d_product isa Product diff --git a/test/multivariate/vonmisesfisher.jl b/test/multivariate/vonmisesfisher.jl index 6e000be393..f43ab03d93 100644 --- a/test/multivariate/vonmisesfisher.jl +++ b/test/multivariate/vonmisesfisher.jl @@ -10,19 +10,19 @@ logvmfCp(p::Int, κ::Real) = safe_logvmfpdf(μ::Vector, κ::Real, x::Vector) = logvmfCp(length(μ), κ) + κ * dot(μ, x) -function gen_vmf_tdata(n::Int, p::Int, rng::Union{AbstractRNG,Missing} = missing) +function gen_vmf_tdata(n::Int, p::Int, rng::Union{AbstractRNG, Missing} = missing) if ismissing(rng) X = randn(p, n) else X = randn(rng, p, n) end - for i = 1:n + for i in 1:n X[:, i] = X[:, i] ./ norm(X[:, i]) end return X end -function test_vmf_rot(p::Int, rng::Union{AbstractRNG,Missing} = missing) +function test_vmf_rot(p::Int, rng::Union{AbstractRNG, Missing} = missing) if ismissing(rng) μ = randn(p) x = randn(p) @@ -37,13 +37,12 @@ function test_vmf_rot(p::Int, rng::Union{AbstractRNG,Missing} = missing) v = μ - vcat(1, zeros(p - 1)) H = I - 2 * v * v' / (v' * v) - @test Distributions._vmf_rot!(s.v, copy(x)) ≈ (H * x) + return @test Distributions._vmf_rot!(s.v, copy(x)) ≈ (H * x) end - -function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG,Missing} = missing) +function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG, Missing} = missing) p = 3 if ismissing(rng) @@ -55,8 +54,8 @@ function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG,Missing} = missing s = Distributions.VonMisesFisherSampler(μ, float(κ)) - genw3_res = [Distributions._vmf_genw3(rng, s.p, s.b, s.x0, s.c, s.κ) for _ = 1:ns] - genwp_res = [Distributions._vmf_genwp(rng, s.p, s.b, s.x0, s.c, s.κ) for _ = 1:ns] + genw3_res = [Distributions._vmf_genw3(rng, s.p, s.b, s.x0, s.c, s.κ) for _ in 1:ns] + genwp_res = [Distributions._vmf_genwp(rng, s.p, s.b, s.x0, s.c, s.κ) for _ in 1:ns] @test isapprox(mean(genw3_res), mean(genwp_res), atol = 0.01) @test isapprox(std(genw3_res), std(genwp_res), atol = 0.01 / κ) @@ -67,17 +66,17 @@ function test_genw3(κ::Real, ns::Int, rng::Union{AbstractRNG,Missing} = missing var_w = 1 - coth_κ^2 + 1 / κ^2 @test isapprox(mean(genw3_res), mean_w, atol = 0.01) - @test isapprox(std(genw3_res), sqrt(var_w), atol = 0.01 / κ) + return @test isapprox(std(genw3_res), sqrt(var_w), atol = 0.01 / κ) end function test_vonmisesfisher( - p::Int, - κ::Real, - n::Int, - ns::Int, - rng::Union{AbstractRNG,Missing} = missing, -) + p::Int, + κ::Real, + n::Int, + ns::Int, + rng::Union{AbstractRNG, Missing} = missing, + ) if ismissing(rng) μ = randn(p) else @@ -94,9 +93,9 @@ function test_vonmisesfisher( # conversions @test convert(VonMisesFisher{partype(d)}, d) === d for d32 in ( - convert(VonMisesFisher{Float32}, d), - convert(VonMisesFisher{Float32}, d.μ, d.κ, d.logCκ), - ) + convert(VonMisesFisher{Float32}, d), + convert(VonMisesFisher{Float32}, d.μ, d.κ, d.logCκ), + ) @test d32 isa VonMisesFisher{Float32} @test params(d32) == (map(Float32, μ), Float32(κ)) end @@ -111,7 +110,7 @@ function test_vonmisesfisher( X = gen_vmf_tdata(n, p, rng) lp0 = zeros(n) - for i = 1:n + for i in 1:n xi = X[:, i] lp0[i] = safe_logvmfpdf(μ, κ, xi) @test logpdf(d, xi) ≈ lp0[i] @@ -132,7 +131,7 @@ function test_vonmisesfisher( else X = rand(rng, d, n) end - for i = 1:n + for i in 1:n @test norm(X[:, i]) ≈ 1.0 @test insupport(d, X[:, i]) end @@ -150,7 +149,7 @@ function test_vonmisesfisher( d_est = fit(VonMisesFisher{Float64}, X) @test isa(d_est, VonMisesFisher) @test isapprox(d_est.μ, μ, atol = 0.01) - @test isapprox(d_est.κ, κ, atol = κ * 0.01) + return @test isapprox(d_est.κ, κ, atol = κ * 0.01) end @@ -170,19 +169,19 @@ end n = 1000 ns = 10^6 @testset "Testing VonMisesFisher with $key" for (key, rng) in Dict( - "rand(...)" => missing, - "rand(rng, ...)" => MersenneTwister(123), -) + "rand(...)" => missing, + "rand(rng, ...)" => MersenneTwister(123), + ) @testset "Testing VonMisesFisher with $key at ($p, $κ)" for (p, κ) in [ - (2, 1.0), - (2, 5.0), - (3, 1.0), - (3, 5.0), - (5, 2.0), - (2, 2), - (2, 1000), - ] # test with large κ + (2, 1.0), + (2, 5.0), + (3, 1.0), + (3, 5.0), + (5, 2.0), + (2, 2), + (2, 1000), + ] # test with large κ test_vonmisesfisher(p, κ, n, ns, rng) test_vmf_rot(p, rng) end diff --git a/test/multivariate_stats.jl b/test/multivariate_stats.jl index 92b264dd67..aab1279bd0 100644 --- a/test/multivariate_stats.jl +++ b/test/multivariate_stats.jl @@ -11,15 +11,15 @@ J = C for d in [ - Dirichlet(3, 2.0), - Dirichlet([2.0, 1.0, 3.0]), - IsoNormal(mu, 2.0), - DiagNormal(mu, [1.5, 2.0, 2.5]), - MvNormal(mu, C), - IsoNormalCanon(h, 2.0), - DiagNormalCanon(h, [1.5, 2.0, 1.2]), - MvNormalCanon(h, J), -] + Dirichlet(3, 2.0), + Dirichlet([2.0, 1.0, 3.0]), + IsoNormal(mu, 2.0), + DiagNormal(mu, [1.5, 2.0, 2.5]), + MvNormal(mu, C), + IsoNormalCanon(h, 2.0), + DiagNormalCanon(h, [1.5, 2.0, 1.2]), + MvNormalCanon(h, J), + ] println(d) dmean = mean(d) diff --git a/test/namedtuple/productnamedtuple.jl b/test/namedtuple/productnamedtuple.jl index 3835c1bf27..87aaf0b74c 100644 --- a/test/namedtuple/productnamedtuple.jl +++ b/test/namedtuple/productnamedtuple.jl @@ -44,29 +44,29 @@ using Test @testset "show" begin d = ProductNamedTupleDistribution((x = Gamma(1.0, 2.0), y = Normal())) @test repr(d) == """ - ProductNamedTupleDistribution{(:x, :y)}( - x: Gamma{Float64}(α=1.0, θ=2.0) - y: Normal{Float64}(μ=0.0, σ=1.0) - ) - """ + ProductNamedTupleDistribution{(:x, :y)}( + x: Gamma{Float64}(α=1.0, θ=2.0) + y: Normal{Float64}(μ=0.0, σ=1.0) + ) + """ end @testset "Properties" begin @testset "eltype" begin @testset for nt in [ - (x = Normal(1.0, 2.0), y = Normal(3.0, 4.0)), - (x = Normal(), y = Gamma()), - (x = Bernoulli(),), - (x = Normal(), y = Bernoulli()), - (w = LKJCholesky(3, 2.0),), - ( - x = Normal(), - y = Dirichlet(10, 1.0), - z = DiscreteUniform(1, 10), - w = LKJCholesky(3, 2.0), - ), - (x = product_distribution((x = Normal(), y = Gamma())),), - ] + (x = Normal(1.0, 2.0), y = Normal(3.0, 4.0)), + (x = Normal(), y = Gamma()), + (x = Bernoulli(),), + (x = Normal(), y = Bernoulli()), + (w = LKJCholesky(3, 2.0),), + ( + x = Normal(), + y = Dirichlet(10, 1.0), + z = DiscreteUniform(1, 10), + w = LKJCholesky(3, 2.0), + ), + (x = product_distribution((x = Normal(), y = Gamma())),), + ] d = ProductNamedTupleDistribution(nt) @test eltype(d) === eltype(rand(d)) end @@ -76,14 +76,14 @@ using Test nt = (x = Normal(1.0, 2.0), y = Gamma(), z = MvNormal(Diagonal(ones(5)))) d = ProductNamedTupleDistribution(nt) @test @inferred(minimum(d)) == - (x = minimum(nt.x), y = minimum(nt.y), z = minimum(nt.z)) + (x = minimum(nt.x), y = minimum(nt.y), z = minimum(nt.z)) end @testset "maximum" begin nt = (x = Normal(1.0, 2.0), y = Gamma(), z = MvNormal(Diagonal(ones(5)))) d = ProductNamedTupleDistribution(nt) @test @inferred(maximum(d)) == - (x = maximum(nt.x), y = maximum(nt.y), z = maximum(nt.z)) + (x = maximum(nt.x), y = maximum(nt.y), z = maximum(nt.z)) end @testset "insupport" begin @@ -105,13 +105,13 @@ using Test x = (x = rand(nt.x), y = rand(nt.y), z = rand(nt.z), w = rand(nt.w)) xrev = NamedTuple{(:z, :y, :x, :w)}(x) @test @inferred(logpdf(d, x)) == - logpdf(nt.x, x.x) + logpdf(nt.y, x.y) + logpdf(nt.z, x.z) + logpdf(nt.w, x.w) + logpdf(nt.x, x.x) + logpdf(nt.y, x.y) + logpdf(nt.z, x.z) + logpdf(nt.w, x.w) @test @inferred(logpdf(d, xrev)) == logpdf(d, x) @test @inferred(pdf(d, x)) == exp(logpdf(d, x)) @test @inferred(pdf(d, xrev)) == pdf(d, x) @test @inferred(loglikelihood(d, x)) == logpdf(d, x) @test @inferred(loglikelihood(d, xrev)) == loglikelihood(d, x) - xs = [(x = rand(nt.x), y = rand(nt.y), z = rand(nt.z), w = rand(nt.w)) for _ = 1:10] + xs = [(x = rand(nt.x), y = rand(nt.y), z = rand(nt.z), w = rand(nt.w)) for _ in 1:10] xs_perm = [NamedTuple{(:z, :y, :x, :w)}(x) for x in xs] @test @inferred(loglikelihood(d, xs)) == sum(logpdf.(Ref(d), xs)) @test @inferred(loglikelihood(d, xs_perm)) == loglikelihood(d, xs) @@ -126,20 +126,20 @@ using Test ) d = ProductNamedTupleDistribution(nt) @test @inferred(mode(d)) == - (x = mode(nt.x), y = mode(nt.y), z = mode(nt.z), w = mode(nt.w)) + (x = mode(nt.x), y = mode(nt.y), z = mode(nt.z), w = mode(nt.w)) @test @inferred(mean(d)) == - (x = mean(nt.x), y = mean(nt.y), z = mean(nt.z), w = mean(nt.w)) + (x = mean(nt.x), y = mean(nt.y), z = mean(nt.z), w = mean(nt.w)) @test @inferred(var(d)) == - (x = var(nt.x), y = var(nt.y), z = var(nt.z), w = var(nt.w)) + (x = var(nt.x), y = var(nt.y), z = var(nt.z), w = var(nt.w)) @test @inferred(entropy(d)) == - entropy(nt.x) + entropy(nt.y) + entropy(nt.z) + entropy(nt.w) + entropy(nt.x) + entropy(nt.y) + entropy(nt.z) + entropy(nt.w) d1 = ProductNamedTupleDistribution((x = Normal(1.0, 2.0), y = Gamma())) d2 = ProductNamedTupleDistribution((x = Normal(), y = Gamma(2.0, 3.0))) d2_perm = ProductNamedTupleDistribution((y = Gamma(2.0, 3.0), x = Normal())) d2_sub = ProductNamedTupleDistribution((x = Normal(1.0, 2.0),)) @test kldivergence(d1, d2) == - kldivergence(d1.dists.x, d2.dists.x) + kldivergence(d1.dists.y, d2.dists.y) + kldivergence(d1.dists.x, d2.dists.x) + kldivergence(d1.dists.y, d2.dists.y) @test kldivergence(d1, d2_perm) == kldivergence(d1, d2) @test_throws ArgumentError kldivergence(d1, d2_sub) @test_throws ArgumentError kldivergence(d2_sub, d1) @@ -196,7 +196,7 @@ using Test @test length(xs1) == 10 @test all(insupport.(Ref(d), xs1)) - xs2 = @inferred Array{<:NamedTuple{(:x, :y, :z, :w)},3} rand(rng, d, (2, 3, 4)) + xs2 = @inferred Array{<:NamedTuple{(:x, :y, :z, :w)}, 3} rand(rng, d, (2, 3, 4)) @test size(xs2) == (2, 3, 4) @test all(insupport.(Ref(d), xs2)) @@ -208,12 +208,14 @@ using Test end @testset "rand!" begin - d = ProductNamedTupleDistribution(( - x = Normal(1.0, 2.0), - y = Gamma(), - z = Dirichlet(5, 1.0), - w = Bernoulli(), - )) + d = ProductNamedTupleDistribution( + ( + x = Normal(1.0, 2.0), + y = Gamma(), + z = Dirichlet(5, 1.0), + w = Bernoulli(), + ) + ) x = rand(d) xs = Array{typeof(x)}(undef, (2, 3, 4)) rand!(d, xs) diff --git a/test/pdfnorm.jl b/test/pdfnorm.jl index 24db7449dc..f0d93d33d0 100644 --- a/test/pdfnorm.jl +++ b/test/pdfnorm.jl @@ -11,7 +11,7 @@ function numeric_norm(d::DiscreteUnivariateDistribution) # When the distribution has infinite support, sum up to an arbitrary large # value. upper = isfinite(maximum(d)) ? round(Int, maximum(d)) : 100 - return sum(pdf(d, k)^2 for k = round(Int, minimum(d)):upper) + return sum(pdf(d, k)^2 for k in round(Int, minimum(d)):upper) end @testset "pdf L2 norm" begin @@ -37,7 +37,7 @@ end @testset "Categorical" begin for n in (1, 2, 5, 10) - d = Categorical(collect(1 / n for _ = 1:n)) + d = Categorical(collect(1 / n for _ in 1:n)) @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end for d in (Categorical([0.25, 0.75]), Categorical([1 / 6, 1 / 3, 1 / 2])) @@ -51,8 +51,8 @@ end end # The norm doesn't depend on the mean @test pdfsquaredL2norm(Cauchy(100, 1)) == - pdfsquaredL2norm(Cauchy(-100, 1)) == - pdfsquaredL2norm(Cauchy(0, 1)) + pdfsquaredL2norm(Cauchy(-100, 1)) == + pdfsquaredL2norm(Cauchy(0, 1)) end @testset "Chi" begin @@ -86,12 +86,12 @@ end @testset "Geometric" begin for d in ( - Geometric(0.20), - Geometric(0.25), - Geometric(0.50), - Geometric(0.75), - Geometric(0.80), - ) + Geometric(0.2), + Geometric(0.25), + Geometric(0.5), + Geometric(0.75), + Geometric(0.8), + ) @test pdfsquaredL2norm(d) ≈ numeric_norm(d) end end @@ -102,8 +102,8 @@ end end # The norm doesn't depend on the mean @test pdfsquaredL2norm(Logistic(100, 1)) == - pdfsquaredL2norm(Logistic(-100, 1)) == - pdfsquaredL2norm(Logistic(0, 1)) + pdfsquaredL2norm(Logistic(-100, 1)) == + pdfsquaredL2norm(Logistic(0, 1)) end @testset "Normal" begin @@ -113,8 +113,8 @@ end @test pdfsquaredL2norm(Normal(1, 0)) ≈ Inf # The norm doesn't depend on the mean @test pdfsquaredL2norm(Normal(100, 1)) == - pdfsquaredL2norm(Normal(-100, 1)) == - pdfsquaredL2norm(Normal(0, 1)) + pdfsquaredL2norm(Normal(-100, 1)) == + pdfsquaredL2norm(Normal(0, 1)) end @testset "Poisson" begin diff --git a/test/product.jl b/test/product.jl index fb41510041..2558bdae61 100644 --- a/test/product.jl +++ b/test/product.jl @@ -18,7 +18,7 @@ using LinearAlgebra ds2 = Fill(Normal(first(μ), 1.0), N) d_product2 = @inferred(product_distribution(ds2)) - @test d_product2 isa MvNormal{Float64,Distributions.ScalMat{Float64},<:Fill{Float64,1}} + @test d_product2 isa MvNormal{Float64, Distributions.ScalMat{Float64}, <:Fill{Float64, 1}} # Check that methods for `ProductDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @@ -50,12 +50,12 @@ end # when `Product` is removed d_product1 = @inferred(Distributions.ProductDistribution(ds1)) @test d_product1 isa - Distributions.VectorOfUnivariateDistribution{<:Vector,Continuous,Float64} + Distributions.VectorOfUnivariateDistribution{<:Vector, Continuous, Float64} d_product2 = @inferred(product_distribution(ntuple(i -> Uniform(0.0, ubound[i]), 11)...)) @test d_product2 isa - Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + Distributions.VectorOfUnivariateDistribution{<:Tuple, Continuous, Float64} ds3 = Fill(Uniform(0.0, first(ubound)), N) # Replace with @@ -63,7 +63,7 @@ end # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) @test d_product3 isa - Distributions.VectorOfUnivariateDistribution{<:Fill,Continuous,Float64} + Distributions.VectorOfUnivariateDistribution{<:Fill, Continuous, Float64} # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product2), (ds3, d_product3)) @@ -99,7 +99,7 @@ end for a in ([0, 1], [-0.5, 0.5]) # Construct independent distributions and `ProductDistribution` from these. - ds1 = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ = 1:N] + ds1 = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N] # Replace with # d_product1 = @inferred(product_distribution(ds1)) # when `Product` is removed @@ -114,7 +114,7 @@ end product_distribution(ntuple(_ -> DiscreteNonParametric(a, [0.5, 0.5]), 11)...) ) @test d_product2 isa Distributions.VectorOfUnivariateDistribution{ - <:NTuple{N,<:DiscreteNonParametric}, + <:NTuple{N, <:DiscreteNonParametric}, Discrete, eltype(a), } @@ -125,7 +125,7 @@ end # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) @test d_product3 isa Distributions.VectorOfUnivariateDistribution{ - <:Fill{<:DiscreteNonParametric,1}, + <:Fill{<:DiscreteNonParametric, 1}, Discrete, eltype(a), } @@ -165,7 +165,7 @@ end ds = (Bernoulli(0.3), Uniform(0.0, 0.7), Categorical([0.4, 0.2, 0.4])) d_product = @inferred(product_distribution(ds...)) @test d_product isa - Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + Distributions.VectorOfUnivariateDistribution{<:Tuple, Continuous, Float64} ds_vec = vcat(ds...) @@ -210,7 +210,7 @@ end ds2 = Fill(Uniform(0.0, first(ubound)), M, N) d_product2 = @inferred(product_distribution(ds2)) @test d_product2 isa Distributions.MatrixOfUnivariateDistribution{ - <:Fill{<:Uniform,2}, + <:Fill{<:Uniform, 2}, Continuous, Float64, } @@ -223,7 +223,7 @@ end @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(vec(var.(ds))) @test @inferred(cov(d_product, Val(false))) == - reshape(Diagonal(vec(var.(ds))), M, N, M, N) + reshape(Diagonal(vec(var.(ds))), M, N, M, N) @test minimum(d_product) == map(minimum, ds) @test maximum(d_product) == map(maximum, ds) @@ -251,7 +251,7 @@ end @test d_product1 isa Distributions.ProductDistribution{ length(N) + 1, 1, - <:Array{<:Dirichlet{Float64},length(N)}, + <:Array{<:Dirichlet{Float64}, length(N)}, Continuous, Float64, } @@ -261,7 +261,7 @@ end @test d_product2 isa Distributions.ProductDistribution{ length(N) + 1, 1, - <:Fill{<:Dirichlet{Float64},length(N)}, + <:Fill{<:Dirichlet{Float64}, length(N)}, Continuous, Float64, } @@ -293,16 +293,16 @@ end x = @inferred(rand(d_product)) @test size(x) == size(d_product) @test x isa - typeof(mapreduce(rand, (x, y) -> cat(x, y; dims = ndims(ds) + 1), ds)) + typeof(mapreduce(rand, (x, y) -> cat(x, y; dims = ndims(ds) + 1), ds)) # inference broken for non-Fill arrays y = reshape(x, Val(2)) if ds isa Fill @test @inferred(logpdf(d_product, x)) ≈ - sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) + sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) else @test logpdf(d_product, x) ≈ - sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) + sum(logpdf(d, y[:, i]) for (i, d) in enumerate(ds)) end # ensure that samples are different, in particular if `Fill` is used @test length(unique(x)) == length(d_product) diff --git a/test/qq.jl b/test/qq.jl index 22cffc6a8b..d676af415d 100644 --- a/test/qq.jl +++ b/test/qq.jl @@ -25,7 +25,7 @@ using Test @test length(a.qy) == length(a.qx) == 10 @test a.qx ≈ b.qx ≈ c.qx ≈ a.qy ≈ b.qy ≈ c.qy - for n = 0:3 + for n in 0:3 a = qqbuild(rand(n), Uniform(0, 1)) @test length(a.qy) == length(a.qx) == n end diff --git a/test/reshaped.jl b/test/reshaped.jl index a75560118c..9238c205d7 100644 --- a/test/reshaped.jl +++ b/test/reshaped.jl @@ -41,7 +41,7 @@ # support for d in d1s, s in sizes @test (size(d) == s) ⊻ - (length(s) != length(size(d)) || !insupport(d, reshape(x1, s))) + (length(s) != length(size(d)) || !insupport(d, reshape(x1, s))) end # mean diff --git a/test/samplers.jl b/test/samplers.jl index bd2ff9928a..bffa48d814 100644 --- a/test/samplers.jl +++ b/test/samplers.jl @@ -54,23 +54,23 @@ import Distributions: ] @testset "Binomial: $S" for (S, paramlst) in [ - ( - BinomialGeomSampler, - [ - (0, 0.4), - (0, 0.6), - (5, 0.0), - (5, 1.0), - (1, 0.2), - (1, 0.8), - (3, 0.4), - (4, 0.6), - ], - ), - (BinomialTPESampler, [(40, 0.5), (100, 0.4), (300, 0.6)]), - (BinomialPolySampler, binomparams), - (BinomialAliasSampler, binomparams), - ] + ( + BinomialGeomSampler, + [ + (0, 0.4), + (0, 0.6), + (5, 0.0), + (5, 1.0), + (1, 0.2), + (1, 0.8), + (3, 0.4), + (4, 0.6), + ], + ), + (BinomialTPESampler, [(40, 0.5), (100, 0.4), (300, 0.6)]), + (BinomialPolySampler, binomparams), + (BinomialAliasSampler, binomparams), + ] @testset "pa=$pa" for pa in paramlst n, p = pa test_samples(S(n, p), Binomial(n, p), n_tsamples) @@ -81,9 +81,9 @@ import Distributions: ## Poisson samplers @testset "Poisson: $S" for (S, paramlst) in [ - (PoissonCountSampler, [0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 20.0, 30.0]), - (PoissonADSampler, [10.0, 15.0, 20.0, 30.0]), - ] + (PoissonCountSampler, [0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 20.0, 30.0]), + (PoissonADSampler, [10.0, 15.0, 20.0, 30.0]), + ] @testset "μ=$μ" for μ in paramlst test_samples(S(μ), Poisson(μ), n_tsamples) test_samples(S(μ), Poisson(μ), n_tsamples, rng = rng) @@ -93,10 +93,10 @@ import Distributions: ## Poisson Binomial sampler @testset "Poisson-Binomial: $S" for S in (PoissBinAliasSampler,) @testset "p=$p" for p in ( - fill(0.2, 30), - range(0.1, stop = 0.99, length = 30), - [fill(0.1, 10); fill(0.9, 10)], - ) + fill(0.2, 30), + range(0.1, stop = 0.99, length = 30), + [fill(0.1, 10); fill(0.9, 10)], + ) d = PoissonBinomial(p) test_samples(S(d), d, n_tsamples) test_samples(S(d), d, n_tsamples, rng = rng) @@ -117,13 +117,13 @@ import Distributions: ## Gamma samplers @testset "Gamma (shape >= 1): $S" for S in [GammaGDSampler, GammaMTSampler] @testset "d=$d" for d in [ - Gamma(1.0, 1.0), - Gamma(2.0, 1.0), - Gamma(3.0, 1.0), - Gamma(1.0, 2.0), - Gamma(3.0, 2.0), - Gamma(100.0, 2.0), - ] + Gamma(1.0, 1.0), + Gamma(2.0, 1.0), + Gamma(3.0, 1.0), + Gamma(1.0, 2.0), + Gamma(3.0, 2.0), + Gamma(100.0, 2.0), + ] test_samples(S(d), d, n_tsamples) test_samples(S(d), d, n_tsamples, rng = rng) end @@ -160,15 +160,15 @@ import Distributions: @testset "Random.Sampler" begin for dist in ( - Binomial(5, 0.3), - Exponential(2.0), - Gamma(0.1, 1.0), - Gamma(2.0, 1.0), - MatrixNormal(3, 4), - MvNormal(zeros(3), I), - Normal(1.5, 2.0), - Poisson(0.5), - ) + Binomial(5, 0.3), + Exponential(2.0), + Gamma(0.1, 1.0), + Gamma(2.0, 1.0), + MatrixNormal(3, 4), + MvNormal(zeros(3), I), + Normal(1.5, 2.0), + Poisson(0.5), + ) @test Random.Sampler(rng, dist, Val(1)) == dist @test Random.Sampler(rng, dist) == sampler(dist) end diff --git a/test/statsapi.jl b/test/statsapi.jl index 4bc54204e1..ec9785914d 100644 --- a/test/statsapi.jl +++ b/test/statsapi.jl @@ -13,12 +13,12 @@ using Test ys = rand(dist, 1_000_000) # Check that empirical frequencies match pvalues of left/right tail approximately - @test pvalue(dist, x; tail = :left) ≈ mean(≤(x), ys) rtol = 5e-3 - @test pvalue(dist, x; tail = :right) ≈ mean(≥(x), ys) rtol = 5e-3 + @test pvalue(dist, x; tail = :left) ≈ mean(≤(x), ys) rtol = 5.0e-3 + @test pvalue(dist, x; tail = :right) ≈ mean(≥(x), ys) rtol = 5.0e-3 # Check consistency of pvalues of both tails @test pvalue(dist, x; tail = :both) == - min(1, 2 * min(pvalue(dist, x; tail = :left), pvalue(dist, x; tail = :right))) + min(1, 2 * min(pvalue(dist, x; tail = :left), pvalue(dist, x; tail = :right))) # Incorrect value for keyword argument @test_throws ArgumentError("`tail=:l` is invalid") pvalue(dist, x; tail = :l) diff --git a/test/testutils.jl b/test/testutils.jl index e79df17430..8edca3ab19 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -11,7 +11,7 @@ import ForwardDiff function _linspace(a::Float64, b::Float64, n::Int) intv = (b - a) / (n - 1) r = Vector{Float64}(undef, n) - @inbounds for i = 1:n + @inbounds for i in 1:n r[i] = a + (i - 1) * intv end r[n] = b @@ -28,11 +28,11 @@ end # testing the implementation of a discrete univariate distribution # function test_distr( - distr::DiscreteUnivariateDistribution, - n::Int; - testquan::Bool = true, - rng::AbstractRNG = Random.default_rng(), -) + distr::DiscreteUnivariateDistribution, + n::Int; + testquan::Bool = true, + rng::AbstractRNG = Random.default_rng(), + ) test_range(distr) vs = get_evalsamples(distr, 0.00001) @@ -45,7 +45,7 @@ function test_distr( test_stats(distr, vs) test_samples(distr, n) test_samples(distr, n; rng = rng) - test_params(distr) + return test_params(distr) end function test_cgf(dist, ts) @@ -64,16 +64,17 @@ function test_cgf(dist, ts) @test (exp ∘ cgf)(dist, t) ≈ mgf(dist, t) rtol = rtol end end + return end # testing the implementation of a continuous univariate distribution # function test_distr( - distr::ContinuousUnivariateDistribution, - n::Int; - testquan::Bool = true, - rng::AbstractRNG = MersenneTwister(123), -) + distr::ContinuousUnivariateDistribution, + n::Int; + testquan::Bool = true, + rng::AbstractRNG = MersenneTwister(123), + ) test_range(distr) vs = get_evalsamples(distr, 0.01, 2000) @@ -88,7 +89,7 @@ function test_distr( allow_test_stats(distr) && test_stats(distr, xs) xs = test_samples(distr, n, rng = rng) allow_test_stats(distr) && test_stats(distr, xs) - test_params(distr) + return test_params(distr) end @@ -103,13 +104,13 @@ end # for discrete samplers # function test_samples( - s::Sampleable{Univariate,Discrete}, # the sampleable instance - distr::DiscreteUnivariateDistribution, # corresponding distribution - n::Int; # number of samples to generate - q::Float64 = 1.0e-7, # confidence interval, 1 - q as confidence - verbose::Bool = false, # show intermediate info (for debugging) - rng::Union{AbstractRNG,Missing} = missing, -) # add an rng? + s::Sampleable{Univariate, Discrete}, # the sampleable instance + distr::DiscreteUnivariateDistribution, # corresponding distribution + n::Int; # number of samples to generate + q::Float64 = 1.0e-7, # confidence interval, 1 - q as confidence + verbose::Bool = false, # show intermediate info (for debugging) + rng::Union{AbstractRNG, Missing} = missing, + ) # add an rng? # The basic idea # ------------------ @@ -144,7 +145,7 @@ function test_samples( # clb = Vector{Int}(undef, m) cub = Vector{Int}(undef, m) - for i = 1:m + for i in 1:m bp = Binomial(n, p0[i]) clb[i] = floor(Int, quantile(bp, q / 2)) cub[i] = ceil(Int, cquantile(bp, q / 2)) @@ -159,9 +160,9 @@ function test_samples( Random.seed!(1234) samples2 = rand(s, n) Random.seed!(1234) - samples3 = [rand(s) for _ = 1:n] + samples3 = [rand(s) for _ in 1:n] Random.seed!(1234) - samples4 = [rand(s) for _ = 1:n] + samples4 = [rand(s) for _ in 1:n] else # RNGs have to be copied with `copy`, not `deepcopy` # Ref https://github.com/JuliaLang/julia/issues/42899 @@ -170,8 +171,8 @@ function test_samples( rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) - samples3 = [rand(rng3, s) for _ = 1:n] - samples4 = [rand(rng4, s) for _ = 1:n] + samples3 = [rand(rng3, s) for _ in 1:n] + samples4 = [rand(rng4, s) for _ in 1:n] end @test length(samples) == n @test samples2 == samples @@ -180,10 +181,10 @@ function test_samples( # scan samples and get counts cnts = zeros(Int, m) cnts_sc = zeros(Int, m) - for i = 1:n + for i in 1:n @inbounds si = samples[i] if rmin <= si <= rmax - cnts[si-rmin+1] += 1 + cnts[si - rmin + 1] += 1 else vmin <= si <= vmax || throw( DomainError( @@ -195,7 +196,7 @@ function test_samples( @inbounds si_sc = samples3[i] if rmin <= si_sc <= rmax - cnts_sc[si_sc-rmin+1] += 1 + cnts_sc[si_sc - rmin + 1] += 1 else vmin <= si_sc <= vmax || throw( DomainError( @@ -207,13 +208,13 @@ function test_samples( end # check the counts - for i = 1:m - verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts[i])") + for i in 1:m + verbose && println("v = $(rmin + i - 1) ==> ($(clb[i]), $(cub[i])): $(cnts[i])") clb[i] <= cnts[i] <= cub[i] || error( "The counts of samples generated by `rand(s, n)` are out of the confidence interval.", ) - verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts_sc[i])") + verbose && println("v = $(rmin + i - 1) ==> ($(clb[i]), $(cub[i])): $(cnts_sc[i])") clb[i] <= cnts_sc[i] <= cub[i] || error( "The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.", ) @@ -232,14 +233,14 @@ test_samples( # for continuous samplers # function test_samples( - s::Sampleable{Univariate,Continuous}, # the sampleable instance - distr::ContinuousUnivariateDistribution, # corresponding distribution - n::Int; # number of samples to generate - nbins::Int = 50, # divide the main interval into nbins - q::Float64 = 1.0e-6, # confidence interval, 1 - q as confidence - verbose::Bool = false, # show intermediate info (for debugging) - rng::Union{AbstractRNG,Missing} = missing, -) # add an rng? + s::Sampleable{Univariate, Continuous}, # the sampleable instance + distr::ContinuousUnivariateDistribution, # corresponding distribution + n::Int; # number of samples to generate + nbins::Int = 50, # divide the main interval into nbins + q::Float64 = 1.0e-6, # confidence interval, 1 - q as confidence + verbose::Bool = false, # show intermediate info (for debugging) + rng::Union{AbstractRNG, Missing} = missing, + ) # add an rng? # The basic idea # ------------------ @@ -283,8 +284,8 @@ function test_samples( cub = Vector{Int}(undef, nbins) cdfs = map(Base.Fix1(cdf, distr), edges) - for i = 1:nbins - pi = cdfs[i+1] - cdfs[i] + for i in 1:nbins + pi = cdfs[i + 1] - cdfs[i] bp = Binomial(n, pi) clb[i] = floor(Int, quantile(bp, q / 2)) cub[i] = ceil(Int, cquantile(bp, q / 2)) @@ -299,9 +300,9 @@ function test_samples( Random.seed!(1234) samples2 = rand(s, n) Random.seed!(1234) - samples3 = [rand(s) for _ = 1:n] + samples3 = [rand(s) for _ in 1:n] Random.seed!(1234) - samples4 = [rand(s) for _ = 1:n] + samples4 = [rand(s) for _ in 1:n] else # RNGs have to be copied with `copy`, not `deepcopy` # Ref https://github.com/JuliaLang/julia/issues/42899 @@ -310,8 +311,8 @@ function test_samples( rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) - samples3 = [rand(rng3, s) for _ = 1:n] - samples4 = [rand(rng4, s) for _ = 1:n] + samples3 = [rand(rng3, s) for _ in 1:n] + samples4 = [rand(rng4, s) for _ in 1:n] end @test length(samples) == n @test samples2 == samples @@ -322,7 +323,7 @@ function test_samples( end # check whether all samples are in the valid range - for i = 1:n + for i in 1:n @inbounds si = samples[i] vmin <= si <= vmax || throw( DomainError( @@ -347,12 +348,12 @@ function test_samples( @assert length(cnts_sc) == nbins # check the counts - for i = 1:nbins + for i in 1:nbins if verbose @printf( "[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], - edges[i+1], + edges[i + 1], clb[i], cub[i], cnts[i] @@ -360,7 +361,7 @@ function test_samples( @printf( "[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], - edges[i+1], + edges[i + 1], clb[i], cub[i], cnts_sc[i] @@ -398,7 +399,7 @@ function test_range(d::UnivariateDistribution) @test isfinite(vmin) == is_lb @test isfinite(vmax) == is_ub - @test isbounded(d) == (is_lb && is_ub) + return @test isbounded(d) == (is_lb && is_ub) end function get_evalsamples(d::DiscreteUnivariateDistribution, q::Float64) @@ -457,7 +458,7 @@ function test_support(d::UnivariateDistribution, vs::AbstractVector) end end - if isbounded(d) && isa(d, DiscreteUnivariateDistribution) + return if isbounded(d) && isa(d, DiscreteUnivariateDistribution) s = support(d) @test isa(s, AbstractUnitRange) @test first(s) == minimum(d) @@ -486,25 +487,25 @@ function test_range_evaluation(d::DiscreteUnivariateDistribution) p0 = map(Base.Fix1(pdf, d), collect(rmin:rmax)) @test map(Base.Fix1(pdf, d), rmin:rmax) ≈ p0 if rmin + 2 <= rmax - @test map(Base.Fix1(pdf, d), (rmin+1):(rmax-1)) ≈ p0[2:(end-1)] + @test map(Base.Fix1(pdf, d), (rmin + 1):(rmax - 1)) ≈ p0[2:(end - 1)] end - if isbounded(d) + return if isbounded(d) @test map(Base.Fix1(pdf, d), support(d)) ≈ p0 - @test map(Base.Fix1(pdf, d), (rmin-2):rmax) ≈ vcat(0.0, 0.0, p0) - @test map(Base.Fix1(pdf, d), rmin:(rmax+3)) ≈ vcat(p0, 0.0, 0.0, 0.0) - @test map(Base.Fix1(pdf, d), (rmin-2):(rmax+3)) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0) + @test map(Base.Fix1(pdf, d), (rmin - 2):rmax) ≈ vcat(0.0, 0.0, p0) + @test map(Base.Fix1(pdf, d), rmin:(rmax + 3)) ≈ vcat(p0, 0.0, 0.0, 0.0) + @test map(Base.Fix1(pdf, d), (rmin - 2):(rmax + 3)) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0) elseif islowerbounded(d) - @test map(Base.Fix1(pdf, d), (rmin-2):rmax) ≈ vcat(0.0, 0.0, p0) + @test map(Base.Fix1(pdf, d), (rmin - 2):rmax) ≈ vcat(0.0, 0.0, p0) end end function test_evaluation( - d::DiscreteUnivariateDistribution, - vs::AbstractVector, - testquan::Bool = true, -) + d::DiscreteUnivariateDistribution, + vs::AbstractVector, + testquan::Bool = true, + ) nv = length(vs) p = Vector{Float64}(undef, nv) c = Vector{Float64}(undef, nv) @@ -523,7 +524,7 @@ function test_evaluation( lcc[i] = logccdf(d, v) @assert p[i] >= 0.0 - @assert (i == 1 || c[i] >= c[i-1]) + @assert (i == 1 || c[i] >= c[i - 1]) ci += p[i] @test ci ≈ c[i] @@ -552,15 +553,15 @@ function test_evaluation( @test Base.Fix1(logpdf, d).(vs) ≈ lp @test Base.Fix1(logcdf, d).(vs) ≈ lc - @test Base.Fix1(logccdf, d).(vs) ≈ lcc + return @test Base.Fix1(logccdf, d).(vs) ≈ lcc end function test_evaluation( - d::ContinuousUnivariateDistribution, - vs::AbstractVector, - testquan::Bool = true, -) + d::ContinuousUnivariateDistribution, + vs::AbstractVector, + testquan::Bool = true, + ) nv = length(vs) p = Vector{Float64}(undef, nv) c = Vector{Float64}(undef, nv) @@ -581,7 +582,7 @@ function test_evaluation( lc[i] = logcdf(d, v) lcc[i] = logccdf(d, v) - @assert (i == 1 || c[i] >= c[i-1]) + @assert (i == 1 || c[i] >= c[i - 1]) @test isapprox(c[i] + cc[i], 1.0, atol = 1.0e-12) if !isa(d, StudentizedRange) @@ -606,7 +607,7 @@ function test_evaluation( if !isa(d, StudentizedRange) # check: pdf should be the derivative of cdf - for i = 2:(nv-1) + for i in 2:(nv - 1) if p[i] > 1.0e-6 v = vs[i] ap = (cdf(d, v + 1.0e-6) - cdf(d, v - 1.0e-6)) / (2.0e-6) @@ -625,7 +626,7 @@ function test_evaluation( @test Base.Fix1(ccdf, d).(vs) ≈ cc @test Base.Fix1(logcdf, d).(vs) ≈ lc - @test Base.Fix1(logccdf, d).(vs) ≈ lcc + return @test Base.Fix1(logccdf, d).(vs) ≈ lcc end function test_nonfinite(distr::UnivariateDistribution) @@ -648,6 +649,7 @@ function test_nonfinite(distr::UnivariateDistribution) @test isnan(f(distr, NaN)) end end + return end #### Testing statistics methods @@ -664,7 +666,7 @@ function test_stats(d::DiscreteUnivariateDistribution, vs::AbstractVector) xskew = dot(p, (vf .- xmean) .^ 3) / (xstd .^ 3) xkurt = dot(p, (vf .- xmean) .^ 4) / (xvar .^ 2) - 3.0 - if isbounded(d) + return if isbounded(d) @test isapprox(mean(d), xmean, atol = 1.0e-8) @test isapprox(var(d), xvar, atol = 1.0e-8) @test isapprox(std(d), xstd, atol = 1.0e-8) @@ -713,7 +715,7 @@ function test_stats(d::ContinuousUnivariateDistribution, xs::AbstractVector{Floa @test isapprox(mean(d), xmean, atol = mean_tol) # test variance - if applicable(kurtosis, d) + return if applicable(kurtosis, d) kd = kurtosis(d) # when the excessive kurtosis is sufficiently large (i.e. > 2) # the sample variance has a finite variance, given by @@ -735,7 +737,7 @@ function test_params(d::Distribution) pars = params(d) d_new = D(pars...) @test d_new == d - @test d_new == deepcopy(d) + return @test d_new == deepcopy(d) end function test_params(d::Truncated) @@ -746,13 +748,13 @@ function test_params(d::Truncated) pars = params(d_unt) d_new = truncated(D(pars...), d.lower, d.upper) @test d_new == d - @test d == deepcopy(d) + return @test d == deepcopy(d) end # Finite difference differentiation function fdm(f, at) - map(1:length(at)) do i - FiniteDifferences.central_fdm(5, 1)(x -> f([at[1:(i-1)]; x; at[(i+1):end]]), at[i]) + return map(1:length(at)) do i + FiniteDifferences.central_fdm(5, 1)(x -> f([at[1:(i - 1)]; x; at[(i + 1):end]]), at[i]) end end @@ -763,7 +765,7 @@ function pvalue_kolmogorovsmirnoff(x::AbstractVector, d::UnivariateDistribution) # compute maximum absolute deviation from the empirical cdf n = length(x) cdfs = sort!(map(Base.Fix1(cdf, d), x)) - dmax = maximum(zip(cdfs, (0:(n-1)) / n, (1:n) / n)) do (cdf, lower, upper) + dmax = maximum(zip(cdfs, (0:(n - 1)) / n, (1:n) / n)) do (cdf, lower, upper) return max(cdf - lower, upper - cdf) end @@ -771,8 +773,8 @@ function pvalue_kolmogorovsmirnoff(x::AbstractVector, d::UnivariateDistribution) return ccdf(KSDist(n), dmax) end -function test_affine_transformations(::Type{T}, params...) where {T<:UnivariateDistribution} - @testset "affine transformations ($T)" begin +function test_affine_transformations(::Type{T}, params...) where {T <: UnivariateDistribution} + return @testset "affine transformations ($T)" begin # distribution d = T(params...) diff --git a/test/truncate.jl b/test/truncate.jl index 1303870357..484df3237c 100644 --- a/test/truncate.jl +++ b/test/truncate.jl @@ -2,225 +2,226 @@ module TestTruncate -using Distributions -using ForwardDiff: Dual, ForwardDiff -using StatsFuns -import JSON -using Test -using ..Main: fdm - -function verify_and_test_drive(jsonfile, selected, n_tsamples::Int, lower::Int, upper::Int) - R = JSON.parsefile(jsonfile) - for dct in R - ex = dct["expr"] - dsym = Symbol(dct["dtype"]) - if dsym in [:Skellam, :NormalInverseGaussian] - continue + using Distributions + using ForwardDiff: Dual, ForwardDiff + using StatsFuns + import JSON + using Test + using ..Main: fdm + + function verify_and_test_drive(jsonfile, selected, n_tsamples::Int, lower::Int, upper::Int) + R = JSON.parsefile(jsonfile) + for dct in R + ex = dct["expr"] + dsym = Symbol(dct["dtype"]) + if dsym in [:Skellam, :NormalInverseGaussian] + continue + end + + dname = string(dsym) + + dsymt = Symbol("truncated($(dct["dtype"]),$lower,$upper") + dnamet = string(dsym) + + # test whether it is included in the selected list + # or all are selected (when selected is empty) + if !isempty(selected) && !(dname in selected) + continue + end + + # perform testing + dtype = eval(dsym) + dtypet = Truncated + d0 = eval(Meta.parse(ex)) + if minimum(d0) > lower || maximum(d0) < upper + continue + end + + println(" testing truncated($(ex),$lower,$upper)") + d = truncated(eval(Meta.parse(ex)), lower, upper) + if dtype != Uniform && dtype != DiscreteUniform # Uniform is truncated to Uniform + @assert isa(dtype, Type) && dtype <: UnivariateDistribution + @test isa(d, dtypet) + # verification and testing + verify_and_test(d, dct, n_tsamples) + end end + return + end - dname = string(dsym) - - dsymt = Symbol("truncated($(dct["dtype"]),$lower,$upper") - dnamet = string(dsym) - # test whether it is included in the selected list - # or all are selected (when selected is empty) - if !isempty(selected) && !(dname in selected) - continue + _parse_x(d::DiscreteUnivariateDistribution, x) = round(Int, x) + _parse_x(d::ContinuousUnivariateDistribution, x) = Float64(x) + + _json_value(x::Number) = x + + _json_value(x::AbstractString) = + x == "inf" ? Inf : + x == "-inf" ? -Inf : x == "nan" ? NaN : error("Invalid numerical value: $x") + + function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int) + # verify stats + @test minimum(d) ≈ max(_json_value(dct["minimum"]), d.lower) + @test maximum(d) ≈ min(_json_value(dct["maximum"]), d.upper) + @test extrema(d) == (minimum(d), maximum(d)) + @test d == deepcopy(d) + + # verify logpdf and cdf at certain points + pts = dct["points"] + for pt in pts + x = _parse_x(d, pt["x"]) + lp = d.lower <= x <= d.upper ? Float64(pt["logpdf"]) - d.logtp : -Inf + cf = x < d.lower ? 0.0 : x >= d.upper ? 1.0 : (Float64(pt["cdf"]) - d.lcdf) / d.tp + if !isa( + d, + Distributions.Truncated{ + Distributions.StudentizedRange{Float64}, + Distributions.Continuous, + }, + ) + @test logpdf(d, x) ≈ lp atol = sqrt(eps()) + end + @test cdf(d, x) ≈ cf atol = sqrt(eps()) + # NOTE: some distributions use pdf() in StatsFuns.jl which have no generic support yet + if !any( + T -> d isa T, + [ + Distributions.Truncated{ + Distributions.NoncentralChisq{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.NoncentralF{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.NoncentralT{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.StudentizedRange{Float64}, + Distributions.Continuous, + Float64, + }, + Distributions.Truncated{ + Distributions.Rician{Float64}, + Distributions.Continuous, + Float64, + }, + ], + ) + @test isapprox(logpdf(d, Dual(float(x))), lp, atol = sqrt(eps())) + end + # NOTE: this test is disabled as StatsFuns.jl doesn't have generic support for cdf() + # @test isapprox(cdf(d, Dual(x)) , cf, atol=sqrt(eps())) end - # perform testing - dtype = eval(dsym) - dtypet = Truncated - d0 = eval(Meta.parse(ex)) - if minimum(d0) > lower || maximum(d0) < upper - continue + # test if truncated quantile function can be evaluated consistently for different types at certain points + @test isapprox(quantile(d, 0), quantile(d, Float32(0))) + @test isapprox(quantile(d, 1), quantile(d, Float32(1.0))) + @test isapprox(quantile(d, Float64(0.3)), quantile(d, Float32(0.3))) + @test isapprox(quantile(d, Float64(0.7)), quantile(d, Float32(0.7))) + + try + m = mgf(d, 0.0) + @test m == 1.0 + m = mgf(d, Dual(0.0)) + @test m == 1.0 + catch e + isa(e, MethodError) || throw(e) end - - println(" testing truncated($(ex),$lower,$upper)") - d = truncated(eval(Meta.parse(ex)), lower, upper) - if dtype != Uniform && dtype != DiscreteUniform # Uniform is truncated to Uniform - @assert isa(dtype, Type) && dtype <: UnivariateDistribution - @test isa(d, dtypet) - # verification and testing - verify_and_test(d, dct, n_tsamples) + try + c = cf(d, 0.0) + @test c == 1.0 + c = cf(d, Dual(0.0)) + @test c == 1.0 + # test some extra values: should all be well-defined + for t in (0.1, -0.1, 1.0, -1.0) + @test !isnan(cf(d, t)) + @test !isnan(cf(d, Dual(t))) + end + catch e + isa(e, MethodError) || throw(e) end - end -end - -_parse_x(d::DiscreteUnivariateDistribution, x) = round(Int, x) -_parse_x(d::ContinuousUnivariateDistribution, x) = Float64(x) - -_json_value(x::Number) = x - -_json_value(x::AbstractString) = - x == "inf" ? Inf : - x == "-inf" ? -Inf : x == "nan" ? NaN : error("Invalid numerical value: $x") - -function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int) - # verify stats - @test minimum(d) ≈ max(_json_value(dct["minimum"]), d.lower) - @test maximum(d) ≈ min(_json_value(dct["maximum"]), d.upper) - @test extrema(d) == (minimum(d), maximum(d)) - @test d == deepcopy(d) - - # verify logpdf and cdf at certain points - pts = dct["points"] - for pt in pts - x = _parse_x(d, pt["x"]) - lp = d.lower <= x <= d.upper ? Float64(pt["logpdf"]) - d.logtp : -Inf - cf = x < d.lower ? 0.0 : x >= d.upper ? 1.0 : (Float64(pt["cdf"]) - d.lcdf) / d.tp - if !isa( - d, - Distributions.Truncated{ - Distributions.StudentizedRange{Float64}, - Distributions.Continuous, - }, - ) - @test logpdf(d, x) ≈ lp atol = sqrt(eps()) - end - @test cdf(d, x) ≈ cf atol = sqrt(eps()) - # NOTE: some distributions use pdf() in StatsFuns.jl which have no generic support yet - if !any( - T -> d isa T, - [ - Distributions.Truncated{ - Distributions.NoncentralChisq{Float64}, - Distributions.Continuous, - Float64, - }, - Distributions.Truncated{ - Distributions.NoncentralF{Float64}, - Distributions.Continuous, - Float64, - }, - Distributions.Truncated{ - Distributions.NoncentralT{Float64}, - Distributions.Continuous, - Float64, - }, - Distributions.Truncated{ - Distributions.StudentizedRange{Float64}, - Distributions.Continuous, - Float64, - }, - Distributions.Truncated{ - Distributions.Rician{Float64}, - Distributions.Continuous, - Float64, - }, - ], - ) - @test isapprox(logpdf(d, Dual(float(x))), lp, atol = sqrt(eps())) - end - # NOTE: this test is disabled as StatsFuns.jl doesn't have generic support for cdf() - # @test isapprox(cdf(d, Dual(x)) , cf, atol=sqrt(eps())) - end - - # test if truncated quantile function can be evaluated consistently for different types at certain points - @test isapprox(quantile(d, 0), quantile(d, Float32(0))) - @test isapprox(quantile(d, 1), quantile(d, Float32(1.0))) - @test isapprox(quantile(d, Float64(0.3)), quantile(d, Float32(0.3))) - @test isapprox(quantile(d, Float64(0.7)), quantile(d, Float32(0.7))) - - try - m = mgf(d, 0.0) - @test m == 1.0 - m = mgf(d, Dual(0.0)) - @test m == 1.0 - catch e - isa(e, MethodError) || throw(e) - end - try - c = cf(d, 0.0) - @test c == 1.0 - c = cf(d, Dual(0.0)) - @test c == 1.0 - # test some extra values: should all be well-defined - for t in (0.1, -0.1, 1.0, -1.0) - @test !isnan(cf(d, t)) - @test !isnan(cf(d, Dual(t))) + # generic testing + return if isa(d, Cosine) + n_tsamples = floor(Int, n_tsamples / 10) end - catch e - isa(e, MethodError) || throw(e) end - # generic testing - if isa(d, Cosine) - n_tsamples = floor(Int, n_tsamples / 10) + # default methods + for (μ, lower, upper) in [(0, -1, 1), (1, 2, 4)] + d = truncated(Normal(μ, 1), lower, upper) + @test d.untruncated === Normal(μ, 1) + @test d.lower == lower + @test d.upper == upper + @test truncated(Normal(μ, 1); lower = lower, upper = upper) === d end -end - -# default methods -for (μ, lower, upper) in [(0, -1, 1), (1, 2, 4)] - d = truncated(Normal(μ, 1), lower, upper) - @test d.untruncated === Normal(μ, 1) - @test d.lower == lower - @test d.upper == upper - @test truncated(Normal(μ, 1); lower = lower, upper = upper) === d -end -for bound in (-2, 1) - d = @test_deprecated Distributions.Truncated(Normal(), Float64(bound), Inf) - @test truncated(Normal(); lower = bound, upper = Inf) == d + for bound in (-2, 1) + d = @test_deprecated Distributions.Truncated(Normal(), Float64(bound), Inf) + @test truncated(Normal(); lower = bound, upper = Inf) == d - d_nothing = truncated(Normal(); lower = bound) - @test truncated(Normal(); lower = bound, upper = nothing) == d_nothing - @test extrema(d_nothing) == promote(bound, Inf) + d_nothing = truncated(Normal(); lower = bound) + @test truncated(Normal(); lower = bound, upper = nothing) == d_nothing + @test extrema(d_nothing) == promote(bound, Inf) - d = @test_deprecated Distributions.Truncated(Normal(), -Inf, Float64(bound)) - @test truncated(Normal(); lower = -Inf, upper = bound) == d + d = @test_deprecated Distributions.Truncated(Normal(), -Inf, Float64(bound)) + @test truncated(Normal(); lower = -Inf, upper = bound) == d - d_nothing = truncated(Normal(); upper = bound) - @test truncated(Normal(); lower = nothing, upper = bound) == d_nothing - @test extrema(d_nothing) == promote(-Inf, bound) -end -@test truncated(Normal()) === Normal() + d_nothing = truncated(Normal(); upper = bound) + @test truncated(Normal(); lower = nothing, upper = bound) == d_nothing + @test extrema(d_nothing) == promote(-Inf, bound) + end + @test truncated(Normal()) === Normal() -## main + ## main -for c in ["discrete", "continuous"] + for c in ["discrete", "continuous"] - title = string(uppercase(c[1]), c[2:end]) - println(" [$title]") - println(" ------------") - jsonfile = joinpath(@__DIR__, "ref", "$(c)_test.ref.json") - verify_and_test_drive(jsonfile, ARGS, 10^6, 3, 5) - println() -end + title = string(uppercase(c[1]), c[2:end]) + println(" [$title]") + println(" ------------") + jsonfile = joinpath(@__DIR__, "ref", "$(c)_test.ref.json") + verify_and_test_drive(jsonfile, ARGS, 10^6, 3, 5) + println() + end -## automatic differentiation + ## automatic differentiation -f = x -> logpdf(truncated(Normal(x[1], x[2]), x[3], x[4]), mean(x)) -at = [0.0, 1.0, 0.0, 1.0] -@test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1e-6) + f = x -> logpdf(truncated(Normal(x[1], x[2]), x[3], x[4]), mean(x)) + at = [0.0, 1.0, 0.0, 1.0] + @test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1.0e-6) -@testset "errors" begin - @test_throws ErrorException truncated(Normal(), 1, 0) - @test_throws ArgumentError truncated(Uniform(), 1, 2) - @test_throws ErrorException truncated(Exponential(), 3, 1) -end + @testset "errors" begin + @test_throws ErrorException truncated(Normal(), 1, 0) + @test_throws ArgumentError truncated(Uniform(), 1, 2) + @test_throws ErrorException truncated(Exponential(), 3, 1) + end -@testset "#1328" begin - dist = Poisson(2.0) - dist_zeroinflated = MixtureModel([Dirac(0.0), dist], [0.4, 0.6]) - dist_zerotruncated = truncated(dist; lower = 1) - dist_zeromodified = MixtureModel([Dirac(0.0), dist_zerotruncated], [0.4, 0.6]) + @testset "#1328" begin + dist = Poisson(2.0) + dist_zeroinflated = MixtureModel([Dirac(0.0), dist], [0.4, 0.6]) + dist_zerotruncated = truncated(dist; lower = 1) + dist_zeromodified = MixtureModel([Dirac(0.0), dist_zerotruncated], [0.4, 0.6]) - @test logsumexp(logpdf(dist, x) for x = 0:1000) ≈ 0 atol = 1e-15 - @test logsumexp(logpdf(dist_zeroinflated, x) for x = 0:1000) ≈ 0 atol = 1e-15 - @test logsumexp(logpdf(dist_zerotruncated, x) for x = 0:1000) ≈ 0 atol = 1e-15 - @test logsumexp(logpdf(dist_zeromodified, x) for x = 0:1000) ≈ 0 atol = 1e-15 -end + @test logsumexp(logpdf(dist, x) for x in 0:1000) ≈ 0 atol = 1.0e-15 + @test logsumexp(logpdf(dist_zeroinflated, x) for x in 0:1000) ≈ 0 atol = 1.0e-15 + @test logsumexp(logpdf(dist_zerotruncated, x) for x in 0:1000) ≈ 0 atol = 1.0e-15 + @test logsumexp(logpdf(dist_zeromodified, x) for x in 0:1000) ≈ 0 atol = 1.0e-15 + end end @testset "show" begin @test sprint(show, "text/plain", truncated(Normal(); lower = 2.0)) == - "Truncated($(Normal()); lower=2.0)" + "Truncated($(Normal()); lower=2.0)" @test sprint(show, "text/plain", truncated(Normal(); upper = 3.0)) == - "Truncated($(Normal()); upper=3.0)" + "Truncated($(Normal()); upper=3.0)" @test sprint(show, "text/plain", truncated(Normal(), 2.0, 3.0)) == - "Truncated($(Normal()); lower=2.0, upper=3.0)" + "Truncated($(Normal()); lower=2.0, upper=3.0)" end @testset "sampling with small mass (#1548)" begin diff --git a/test/truncated/exponential.jl b/test/truncated/exponential.jl index f74573e3dc..b25b62f9d2 100644 --- a/test/truncated/exponential.jl +++ b/test/truncated/exponential.jl @@ -19,5 +19,5 @@ using Distributions, Random, Test @test mean(truncated(Exponential(1.0); lower = -Inf, upper = 0 + eps())) ≈ 0 atol = eps() @test mean(truncated(Exponential(1.0), 1.0, 1.0 + eps())) ≈ 1.0 # near-degenerate - @test mean(truncated(Exponential(1e308), 1.0, 1.0 + eps())) ≈ 1.0 # near-degenerate + @test mean(truncated(Exponential(1.0e308), 1.0, 1.0 + eps())) ≈ 1.0 # near-degenerate end diff --git a/test/truncated/normal.jl b/test/truncated/normal.jl index c8e249c594..63354ef68a 100644 --- a/test/truncated/normal.jl +++ b/test/truncated/normal.jl @@ -5,11 +5,11 @@ rng = MersenneTwister(123) @testset "Truncated normal, mean and variance" begin @test mean(truncated(Normal(0, 1), 100, 115)) ≈ - 100.00999800099926070518490239457545847490332879043 + 100.00999800099926070518490239457545847490332879043 @test mean(truncated(Normal(-2, 3), 50, 70)) ≈ - 50.171943499898757645751683644632860837133138152489 + 50.171943499898757645751683644632860837133138152489 @test mean(truncated(Normal(0, 2), -100, 0)) ≈ - -1.59576912160573071175978423973752747390343452465973 + -1.59576912160573071175978423973752747390343452465973 @test mean(truncated(Normal(0, 1))) == 0 @test mean(truncated(Normal(0, 1); lower = -Inf, upper = Inf)) == 0 @test mean(truncated(Normal(0, 1); lower = 0)) ≈ +√(2 / π) @@ -17,9 +17,9 @@ rng = MersenneTwister(123) @test mean(truncated(Normal(0, 1); upper = 0)) ≈ -√(2 / π) @test mean(truncated(Normal(0, 1); lower = -Inf, upper = 0)) ≈ -√(2 / π) @test var(truncated(Normal(0, 1), 50, 70)) ≈ - 0.00039904318680389954790992722653605933053648912703600 + 0.000399043186803899547909927226536059330536489127036 @test var(truncated(Normal(-2, 3); lower = 50, upper = 70)) ≈ - 0.029373438107168350377591231295634273607812172191712 + 0.029373438107168350377591231295634273607812172191712 @test var(truncated(Normal(0, 1))) == 1 @test var(truncated(Normal(0, 1); lower = -Inf, upper = Inf)) == 1 @test var(truncated(Normal(0, 1); lower = 0)) ≈ 1 - 2 / π @@ -28,8 +28,8 @@ rng = MersenneTwister(123) @test var(truncated(Normal(0, 1); lower = -Inf, upper = 0)) ≈ 1 - 2 / π # https://github.com/JuliaStats/Distributions.jl/issues/827 @test mean(truncated(Normal(1000000, 1), 0, 1000)) ≈ - 999.999998998998999001005011019018990904720462367106 - @test var(truncated(Normal(), 999000, 1e6)) ≥ 0 + 999.999998998998999001005011019018990904720462367106 + @test var(truncated(Normal(), 999000, 1.0e6)) ≥ 0 @test var(truncated(Normal(1000000, 1), 0, 1000)) ≥ 0 # https://github.com/JuliaStats/Distributions.jl/issues/624 @test rand(truncated(Normal(+Inf, 1), 0, 1)) ≈ 1 @@ -50,9 +50,9 @@ end @testset "Truncated normal $trunc" begin trunc = truncated(Normal(0, 1), -2, 2) @testset "Truncated normal $trunc with $func" for func in [ - (r = rand, r! = rand!), - (r = ((d, n) -> rand(rng, d, n)), r! = ((d, X) -> rand!(rng, d, X))), - ] + (r = rand, r! = rand!), + (r = ((d, n) -> rand(rng, d, n)), r! = ((d, X) -> rand!(rng, d, X))), + ] repeats = 1000000 @test abs(mean(func.r(trunc, repeats))) < 0.01 diff --git a/test/types.jl b/test/types.jl index c8f4d7a18d..e1c28bd7cc 100644 --- a/test/types.jl +++ b/test/types.jl @@ -24,7 +24,7 @@ using ForwardDiff: Dual @assert ContinuousMatrixDistribution <: MatrixDistribution @testset "Test Sample Type" begin - for T in (Float64, Float32, Dual{Nothing,Float64,0}) + for T in (Float64, Float32, Dual{Nothing, Float64, 0}) @testset "Type $T" begin dists = ( MvNormal(Diagonal(ones(T, 2))), diff --git a/test/univariate/continuous.jl b/test/univariate/continuous.jl index 73c2fc1809..0c486dd1be 100644 --- a/test/univariate/continuous.jl +++ b/test/univariate/continuous.jl @@ -9,15 +9,15 @@ n_tsamples = 100 # additional distributions that have no direct counterparts in R references @testset "Testing $(distr)" for distr in [ - Biweight(), - Biweight(1, 3), - Epanechnikov(), - Epanechnikov(1, 3), - Triweight(), - Triweight(2), - Triweight(1, 3), - Triweight(1), -] + Biweight(), + Biweight(1, 3), + Epanechnikov(), + Epanechnikov(1, 3), + Triweight(), + Triweight(2), + Triweight(1, 3), + Triweight(1), + ] test_distr(distr, n_tsamples; testquan = false) end @@ -25,7 +25,7 @@ end # Test for non-Float64 input using ForwardDiff @test string(logpdf(Normal(0, 1), big(1))) == - "-1.418938533204672741780329736405617639861397473637783412817151540482765695927251" + "-1.418938533204672741780329736405617639861397473637783412817151540482765695927251" @test derivative(t -> logpdf(Normal(1.0, 0.15), t), 2.5) ≈ -66.66666666666667 @test derivative(t -> pdf(Normal(t, 1.0), 0.0), 0.0) == 0.0 @@ -75,11 +75,11 @@ end @testset "VonMises with large kappa" begin d = VonMises(1.1, 1000) @test var(d) ≈ 0.0005001251251957198 - @test entropy(d) ≈ -2.034688918525470 - @test cf(d, 2.5) ≈ -0.921417 + 0.38047im atol = 1e-6 + @test entropy(d) ≈ -2.03468891852547 + @test cf(d, 2.5) ≈ -0.921417 + 0.38047im atol = 1.0e-6 @test pdf(d, 0.5) ≈ 1.758235814051e-75 @test logpdf(d, 0.5) ≈ -172.1295710466005 - @test cdf(d, 1.0) ≈ 0.000787319 atol = 1e-9 + @test cdf(d, 1.0) ≈ 0.000787319 atol = 1.0e-9 end @testset "NormalInverseGaussian random repeatable and basic metrics" begin diff --git a/test/univariate/continuous/arcsine.jl b/test/univariate/continuous/arcsine.jl index 829ea7f6a6..f974bd8e0b 100644 --- a/test/univariate/continuous/arcsine.jl +++ b/test/univariate/continuous/arcsine.jl @@ -19,7 +19,7 @@ using ForwardDiff @test isinf(logpdf(d, 5.0)) # derivative - for v = 3.01:0.1:4.99 + for v in 3.01:0.1:4.99 fgrad = ForwardDiff.derivative(x -> logpdf(d, x), v) glog = gradlogpdf(d, v) @test fgrad ≈ glog diff --git a/test/univariate/continuous/chernoff.jl b/test/univariate/continuous/chernoff.jl index 32af585a1f..48161bdd64 100644 --- a/test/univariate/continuous/chernoff.jl +++ b/test/univariate/continuous/chernoff.jl @@ -105,54 +105,54 @@ ] pdftest = [ - 0.000 0.758345 - 0.050 0.755123 - 0.100 0.745532 - 0.150 0.729792 - 0.200 0.708260 - 0.250 0.681422 - 0.300 0.649874 - 0.350 0.614303 - 0.400 0.575464 - 0.450 0.534159 - 0.500 0.491208 - 0.550 0.447424 - 0.600 0.403594 - 0.650 0.360447 - 0.700 0.318646 - 0.750 0.278760 - 0.800 0.241264 - 0.850 0.206523 - 0.900 0.174795 - 0.950 0.146231 - 1.000 0.120880 - 1.050 0.0987031 - 1.100 0.0795821 - 1.150 0.0633362 - 1.200 0.0497372 - 1.250 0.0385246 - 1.300 0.0294208 - 1.350 0.0221441 - 1.400 0.0164201 - 1.450 0.0119902 - 1.500 0.0086186 - 1.550 0.0060955 - 1.600 0.0042401 - 1.650 0.0028995 - 1.700 0.0019485 - 1.750 0.0012861 - 1.800 0.0008334 - 1.850 0.0005300 - 1.900 0.0003306 - 1.950 0.0002022 - 2.000 0.0001212 + 0.0 0.758345 + 0.05 0.755123 + 0.1 0.745532 + 0.15 0.729792 + 0.2 0.70826 + 0.25 0.681422 + 0.3 0.649874 + 0.35 0.614303 + 0.4 0.575464 + 0.45 0.534159 + 0.5 0.491208 + 0.55 0.447424 + 0.6 0.403594 + 0.65 0.360447 + 0.7 0.318646 + 0.75 0.27876 + 0.8 0.241264 + 0.85 0.206523 + 0.9 0.174795 + 0.95 0.146231 + 1.0 0.12088 + 1.05 0.0987031 + 1.1 0.0795821 + 1.15 0.0633362 + 1.2 0.0497372 + 1.25 0.0385246 + 1.3 0.0294208 + 1.35 0.0221441 + 1.4 0.0164201 + 1.45 0.0119902 + 1.5 0.0086186 + 1.55 0.0060955 + 1.6 0.0042401 + 1.65 0.0028995 + 1.7 0.0019485 + 1.75 0.0012861 + 1.8 0.0008334 + 1.85 0.00053 + 1.9 0.0003306 + 1.95 0.0002022 + 2.0 0.0001212 ] - for i = 1:size(cdftest, 1) + for i in 1:size(cdftest, 1) @test isapprox(cdf(d, cdftest[i, 1]), cdftest[i, 2]) end - for i = 1:size(pdftest, 1) - @test isapprox(pdf(d, pdftest[i, 1]), pdftest[i, 2]; atol = 1e-6) + for i in 1:size(pdftest, 1) + @test isapprox(pdf(d, pdftest[i, 1]), pdftest[i, 2]; atol = 1.0e-6) end end diff --git a/test/univariate/continuous/chisq.jl b/test/univariate/continuous/chisq.jl index 534b7aaf09..51a59d5598 100644 --- a/test/univariate/continuous/chisq.jl +++ b/test/univariate/continuous/chisq.jl @@ -1,4 +1,3 @@ - @testset "Chisq" begin test_cgf(Chisq(1), (0.49, -1, -100, -1.0f6)) test_cgf(Chisq(3), (0.49, -1, -100, -1.0f6)) diff --git a/test/univariate/continuous/erlang.jl b/test/univariate/continuous/erlang.jl index 3936b51bd1..eef0c9e0c2 100644 --- a/test/univariate/continuous/erlang.jl +++ b/test/univariate/continuous/erlang.jl @@ -1,2 +1,2 @@ -test_cgf(Erlang(1, 0.4), (1, 1 / 0.400001, -1, -100.0f0, -1e6)) -test_cgf(Erlang(10, 0.01), (1, 1 / 0.010001f0, -1, -100.0f0, -1e6)) +test_cgf(Erlang(1, 0.4), (1, 1 / 0.400001, -1, -100.0f0, -1.0e6)) +test_cgf(Erlang(10, 0.01), (1, 1 / 0.010001f0, -1, -100.0f0, -1.0e6)) diff --git a/test/univariate/continuous/exponential.jl b/test/univariate/continuous/exponential.jl index 0460b8bc68..86589f3262 100644 --- a/test/univariate/continuous/exponential.jl +++ b/test/univariate/continuous/exponential.jl @@ -1,16 +1,16 @@ @testset "Exponential" begin - test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1e6)) - test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1e6)) - test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1e6)) + test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1.0e6)) + test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1.0e6)) + test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1.0e6)) for T in (Float32, Float64) @test @inferred(rand(Exponential(T(1)))) isa T end end -test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1e6)) -test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1e6)) -test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1e6)) +test_cgf(Exponential(1), (0.9, -1, -100.0f0, -1.0e6)) +test_cgf(Exponential(0.91), (0.9, -1, -100.0f0, -1.0e6)) +test_cgf(Exponential(10), (0.08, -1, -100.0f0, -1.0e6)) # Sampling Tests @testset "Exponential sampling tests" begin diff --git a/test/univariate/continuous/gamma.jl b/test/univariate/continuous/gamma.jl index 0f881dacd0..329e22deae 100644 --- a/test/univariate/continuous/gamma.jl +++ b/test/univariate/continuous/gamma.jl @@ -1,9 +1,9 @@ using Test, Distributions, OffsetArrays @testset "Gamma" begin - test_cgf(Gamma(1, 1), (0.9, -1, -100.0f0, -1e6)) - test_cgf(Gamma(10, 1), (0.9, -1, -100.0f0, -1e6)) - test_cgf(Gamma(0.2, 10), (0.08, -1, -100.0f0, -1e6)) + test_cgf(Gamma(1, 1), (0.9, -1, -100.0f0, -1.0e6)) + test_cgf(Gamma(10, 1), (0.9, -1, -100.0f0, -1.0e6)) + test_cgf(Gamma(0.2, 10), (0.08, -1, -100.0f0, -1.0e6)) @testset "Gamma suffstats and OffsetArrays" begin a = rand(Gamma(), 11) diff --git a/test/univariate/continuous/gumbel.jl b/test/univariate/continuous/gumbel.jl index 6aae521062..ffc4c436cc 100644 --- a/test/univariate/continuous/gumbel.jl +++ b/test/univariate/continuous/gumbel.jl @@ -8,13 +8,13 @@ @testset "rand" begin d = Gumbel(rand(), rand()) - samples = [rand(d) for _ = 1:10_000] - @test mean(samples) ≈ mean(d) rtol = 5e-2 - @test std(samples) ≈ std(d) rtol = 5e-2 + samples = [rand(d) for _ in 1:10_000] + @test mean(samples) ≈ mean(d) rtol = 5.0e-2 + @test std(samples) ≈ std(d) rtol = 5.0e-2 samples = rand(d, 10_000) - @test mean(samples) ≈ mean(d) rtol = 5e-2 - @test std(samples) ≈ std(d) rtol = 5e-2 + @test mean(samples) ≈ mean(d) rtol = 5.0e-2 + @test std(samples) ≈ std(d) rtol = 5.0e-2 d = Gumbel{Int}(0, 1) @test rand(d) isa Float64 diff --git a/test/univariate/continuous/johnsonsu.jl b/test/univariate/continuous/johnsonsu.jl index 4ab0ab8220..9dbab1de5a 100644 --- a/test/univariate/continuous/johnsonsu.jl +++ b/test/univariate/continuous/johnsonsu.jl @@ -10,10 +10,10 @@ @test rand(d1) isa Float64 @test median(d1) == quantile(d1, 0.5) - x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) - @test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90]) - y = Base.Fix1(cquantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) - @test all(Base.Fix1(ccdf, d1).(y) .≈ [0.25, 0.45, 0.60, 0.80, 0.90]) + x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.6, 0.8, 0.9]) + @test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.6, 0.8, 0.9]) + y = Base.Fix1(cquantile, d1).([0.25, 0.45, 0.6, 0.8, 0.9]) + @test all(Base.Fix1(ccdf, d1).(y) .≈ [0.25, 0.45, 0.6, 0.8, 0.9]) @test mean(d1) ≈ 7.581281 @test var(d1) ≈ 19.1969485 diff --git a/test/univariate/continuous/kolmogorov.jl b/test/univariate/continuous/kolmogorov.jl index a3697c7d18..85199c10e0 100644 --- a/test/univariate/continuous/kolmogorov.jl +++ b/test/univariate/continuous/kolmogorov.jl @@ -5,12 +5,12 @@ using Test d = Kolmogorov() # compare to Smirnov's tabulated values @test round(cdf(d, 0.28), digits = 6) == 0.000_001 -@test round(cdf(d, 0.50), digits = 6) == 0.036_055 +@test round(cdf(d, 0.5), digits = 6) == 0.036_055 @test round(cdf(d, 0.99), digits = 6) == 0.719_126 -@test round(cdf(d, 1.00), digits = 6) == 0.730_000 +@test round(cdf(d, 1.0), digits = 6) == 0.730_000 @test round(cdf(d, 1.01), digits = 6) == 0.740_566 # @test round(cdf(d,1.04),digits=6) == .770_434 # table value appears to be wrong -@test round(cdf(d, 1.50), digits = 6) == 0.977_782 -@test round(cdf(d, 2.00), digits = 6) == 0.999_329 -@test round(cdf(d, 2.50), digits = 7) == 0.999_992_5 -@test round(cdf(d, 3.00), digits = 8) == 0.999_999_97 +@test round(cdf(d, 1.5), digits = 6) == 0.977_782 +@test round(cdf(d, 2.0), digits = 6) == 0.999_329 +@test round(cdf(d, 2.5), digits = 7) == 0.999_992_5 +@test round(cdf(d, 3.0), digits = 8) == 0.999_999_97 diff --git a/test/univariate/continuous/kumaraswamy.jl b/test/univariate/continuous/kumaraswamy.jl index 3fc730c5eb..b695849f1c 100644 --- a/test/univariate/continuous/kumaraswamy.jl +++ b/test/univariate/continuous/kumaraswamy.jl @@ -17,7 +17,7 @@ using Distributions: expectation @test typeof(@inferred rand(D)) === typeof(rand()) tol = sqrt(eps(float(T))) @testset "gradlogpdf" begin - for x = T(0):(T<:Integer ? one(T) : T(0.5)):T(20) + for x in T(0):(T <: Integer ? one(T) : T(0.5)):T(20) fd = ForwardDiff.derivative(Base.Fix1(logpdf, D), x) gl = @inferred gradlogpdf(D, x) @test fd ≈ gl atol = tol diff --git a/test/univariate/continuous/lindley.jl b/test/univariate/continuous/lindley.jl index 21948fb559..532c6b35c7 100644 --- a/test/univariate/continuous/lindley.jl +++ b/test/univariate/continuous/lindley.jl @@ -28,7 +28,7 @@ using Distributions: expectation @test @inferred(mean(D)) == T(3 / 2) tol = sqrt(eps(float(T))) @testset "Gradient of log PDF" begin - for x = T(0):T(0.5):T(20) + for x in T(0):T(0.5):T(20) fd = ForwardDiff.derivative(Fix1(logpdf, D), x) gl = @inferred gradlogpdf(D, x) @test gl isa T @@ -47,7 +47,7 @@ using Distributions: expectation S = supertype(typeof(D)) D₂ = Lindley(T(2)) d₁ = kldivergence(D, D₂) - d₂ = invoke(kldivergence, Tuple{S,S}, D, D₂) + d₂ = invoke(kldivergence, Tuple{S, S}, D, D₂) if T <: AbstractFloat @test d₁ isa T end @@ -75,7 +75,7 @@ using Distributions: expectation @test iszero(mgf(D, shape(D) + 1)) @test ForwardDiff.derivative(Fix1(mgf, D), 0) ≈ mean(D) @test central_fdm(5, 1)(Fix1(cf, D), 0) ≈ mean(D) * im - test_cgf(D, (-1e6, -100.0f0, Float16(-1), 1 // 10, 0.9)) + test_cgf(D, (-1.0e6, -100.0f0, Float16(-1), 1 // 10, 0.9)) end end end diff --git a/test/univariate/continuous/logitnormal.jl b/test/univariate/continuous/logitnormal.jl index f9de13c8f5..6badf737a9 100644 --- a/test/univariate/continuous/logitnormal.jl +++ b/test/univariate/continuous/logitnormal.jl @@ -6,10 +6,10 @@ using StatsFuns ####### Core testing procedure function test_logitnormal( - g::LogitNormal, - n_tsamples::Int = 10^6, - rng::Union{AbstractRNG,Missing} = missing, -) + g::LogitNormal, + n_tsamples::Int = 10^6, + rng::Union{AbstractRNG, Missing} = missing, + ) d = length(g) #mn = mean(g) md = median(g) @@ -23,16 +23,16 @@ function test_logitnormal( #@test isa(s, Float64) @test md ≈ logistic(g.μ) #@test entropy(g) ≈ d*(1 + Distributions.log2π)/2 + logdetcov(g.normal)/2 + sum(mean(g.normal)) - @test insupport(g, 1e-8) + @test insupport(g, 1.0e-8) # corner cases of 0 and 1 handled as in support @test insupport(g, 1.0) @test pdf(g, 0.0) == 0.0 @test insupport(g, 0.0) @test pdf(g, 1.0) == 0.0 - @test !insupport(g, -1e-8) - @test pdf(g, -1e-8) == 0.0 - @test !insupport(g, 1 + 1e-8) - @test pdf(g, 1 + 1e-8) == 0.0 + @test !insupport(g, -1.0e-8) + @test pdf(g, -1.0e-8) == 0.0 + @test !insupport(g, 1 + 1.0e-8) + @test pdf(g, 1 + 1.0e-8) == 0.0 # sampling if ismissing(rng) @@ -40,10 +40,10 @@ function test_logitnormal( else X = rand(rng, g, n_tsamples) end - @test isa(X, Array{Float64,1}) + @test isa(X, Array{Float64, 1}) # evaluation of logpdf and pdf - for i = 1:min(100, n_tsamples) + for i in 1:min(100, n_tsamples) @test logpdf(g, X[i]) ≈ log(pdf(g, X[i])) end @test Base.Fix1(logpdf, g).(X) ≈ log.(Base.Fix1(pdf, g).(X)) @@ -55,7 +55,7 @@ function test_logitnormal( @test location(g) == g.μ @test scale(g) == g.σ @test params(g) == (g.μ, g.σ) - @test g == deepcopy(g) + return @test g == deepcopy(g) end ###### General Testing diff --git a/test/univariate/continuous/lognormal.jl b/test/univariate/continuous/lognormal.jl index 05e23eddb1..e16d4e9c80 100644 --- a/test/univariate/continuous/lognormal.jl +++ b/test/univariate/continuous/lognormal.jl @@ -16,14 +16,14 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @test iszero(logcdf(LogNormal(), Inf)) @test logdiffcdf(LogNormal(), Float32(exp(3)), Float32(exp(3))) === -Inf @test logdiffcdf(LogNormal(), Float32(exp(5)), Float32(exp(3))) ≈ -6.607938594596893 rtol = - 1e-12 + 1.0e-12 @test logdiffcdf(LogNormal(), Float32(exp(5)), Float64(exp(3))) ≈ -6.60793859457367 rtol = - 1e-12 + 1.0e-12 @test logdiffcdf(LogNormal(), Float64(exp(5)), Float64(exp(3))) ≈ -6.607938594596893 rtol = - 1e-12 + 1.0e-12 let d = LogNormal(Float64(0), Float64(1)), - x = Float64(exp(-60)), - y = Float64(exp(-60.001)) + x = Float64(exp(-60)), + y = Float64(exp(-60.001)) float_res = logdiffcdf(d, x, y) big_x = BigFloat(x; precision = 100) @@ -68,7 +68,7 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T # test for #996 being fixed let d = LogNormal(0, 1), x = exp(1), ∂x = exp(2) @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ - ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) + ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) end end @@ -323,14 +323,14 @@ end @testset "LogNormal Sampling Tests" begin for d in [ - LogNormal() - LogNormal(1.0) - LogNormal(0.0, 2.0) - LogNormal(1.0, 2.0) - LogNormal(3.0, 0.5) - LogNormal(3.0, 1.0) - LogNormal(3.0, 2.0) - ] + LogNormal() + LogNormal(1.0) + LogNormal(0.0, 2.0) + LogNormal(1.0, 2.0) + LogNormal(3.0, 0.5) + LogNormal(3.0, 1.0) + LogNormal(3.0, 2.0) + ] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/loguniform.jl b/test/univariate/continuous/loguniform.jl index 09c58255a5..85f0ede433 100644 --- a/test/univariate/continuous/loguniform.jl +++ b/test/univariate/continuous/loguniform.jl @@ -43,13 +43,13 @@ import Random @test @inferred(pdf(d, 1.0001)) ≈ 0.43425105679757203 @test @inferred(pdf(d, 5)) ≈ 0.08685889638065035 @test @inferred(pdf(d, 9.9999)) ≈ 0.04342988248915007 - @test @inferred(cdf(d, 1.0001)) ≈ 4.342727686266485e-05 + @test @inferred(cdf(d, 1.0001)) ≈ 4.342727686266485e-5 @test @inferred(cdf(d, 5)) ≈ 0.6989700043360187 @test @inferred(cdf(d, 9.9999)) ≈ 0.999995657033466 @test @inferred(median(d)) ≈ 3.1622776601683795 @test @inferred(logpdf(d, 5)) ≈ -2.443470357682056 - for _ = 1:10 + for _ in 1:10 lo = rand(rng) hi = lo + 10 * rand(rng) dist = LogUniform(lo, hi) @@ -63,7 +63,7 @@ import Random @test cdf(u, log(x)) ≈ cdf(dist, x) @test @inferred(entropy(dist)) ≈ - Distributions.expectation(x -> -logpdf(dist, x), dist) + Distributions.expectation(x -> -logpdf(dist, x), dist) end @test kldivergence(LogUniform(1, 2), LogUniform(1, 2)) ≈ 0 atol = 100eps(Float64) @@ -73,7 +73,7 @@ import Random @test kldivergence(LogUniform(0.1, 1), LogUniform(1, 2)) === Inf @test @inferred(kldivergence(LogUniform(0.1f0, 1), LogUniform(1, 2))) === Inf32 - for _ = 1:10 + for _ in 1:10 aq = 10 * rand(rng) ap = aq + 10 * rand(rng) bp = ap + 10 * rand(rng) @@ -81,9 +81,9 @@ import Random p = LogUniform(ap, bp) q = LogUniform(aq, bq) @test @inferred(kldivergence(p, q)) ≈ - kldivergence(Uniform(log(ap), log(bp)), Uniform(log(aq), log(bq))) + kldivergence(Uniform(log(ap), log(bp)), Uniform(log(aq), log(bq))) end end -end#module +end #module diff --git a/test/univariate/continuous/noncentralt.jl b/test/univariate/continuous/noncentralt.jl index 3954f686fc..f40090cc95 100644 --- a/test/univariate/continuous/noncentralt.jl +++ b/test/univariate/continuous/noncentralt.jl @@ -10,11 +10,11 @@ using Test @test_throws MethodError NoncentralT(ν, complex(λ)) @test d == typeof(d)(params(d)...) @test d == deepcopy(d) - @test mean(d) ≈ -5.0766 atol = 1e-5 + @test mean(d) ≈ -5.0766 atol = 1.0e-5 λ = 5 d = NoncentralT(ν, λ) - @test mean(d) ≈ 5.0766 atol = 1e-5 + @test mean(d) ≈ 5.0766 atol = 1.0e-5 λ = 0 d = NoncentralT(ν, λ) diff --git a/test/univariate/continuous/normal.jl b/test/univariate/continuous/normal.jl index 1d83264b24..0f68977a02 100644 --- a/test/univariate/continuous/normal.jl +++ b/test/univariate/continuous/normal.jl @@ -3,8 +3,8 @@ using Test, Distributions, StatsFuns, ForwardDiff, OffsetArrays isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @testset "Normal" begin - test_cgf(Normal(0, 1), (1, -1, 100.0f0, 1e6, -1e6)) - test_cgf(Normal(1, 0.4), (1, -1, 100.0f0, 1e6, -1e6)) + test_cgf(Normal(0, 1), (1, -1, 100.0f0, 1.0e6, -1.0e6)) + test_cgf(Normal(1, 0.4), (1, -1, 100.0f0, 1.0e6, -1.0e6)) @test isa(convert(Normal{Float64}, Float16(0), Float16(1)), Normal{Float64}) d = Normal(1.1, 2.3) @test convert(Normal{Float64}, d) === d @@ -16,9 +16,9 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T @test -Inf === logpdf(Normal(), Inf) @test iszero(logcdf(Normal(0, 0), 0)) @test iszero(logcdf(Normal(), Inf)) - @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0f0)) ≈ -6.607938594596893 rtol = 1e-12 - @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0)) ≈ -6.607938594596893 rtol = 1e-12 - @test @inferred(logdiffcdf(Normal(), 5.0, 3.0)) ≈ -6.607938594596893 rtol = 1e-12 + @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0f0)) ≈ -6.607938594596893 rtol = 1.0e-12 + @test @inferred(logdiffcdf(Normal(), 5.0f0, 3.0)) ≈ -6.607938594596893 rtol = 1.0e-12 + @test @inferred(logdiffcdf(Normal(), 5.0, 3.0)) ≈ -6.607938594596893 rtol = 1.0e-12 @test_throws ArgumentError logdiffcdf(Normal(), 3, 5) # Arguments in the tails @@ -65,7 +65,7 @@ isnan_type(::Type{T}, v) where {T} = isnan(v) && v isa T # test for #996 being fixed let d = Normal(0, 1), x = 1.0, ∂x = 2.0 @inferred cdf(d, ForwardDiff.Dual(x, ∂x)) ≈ - ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) + ForwardDiff.Dual(cdf(d, x), ∂x * pdf(d, x)) end end @@ -197,7 +197,7 @@ end @test meanform(NormalCanon()) == Normal() @test meanform(canonform(Normal(0.25, 0.7))) ≈ Normal(0.25, 0.7) @test convert(NormalCanon, convert(Normal, NormalCanon(0.3, 0.8))) ≈ - NormalCanon(0.3, 0.8) + NormalCanon(0.3, 0.8) @test mean(canonform(Normal(0.25, 0.7))) ≈ 0.25 @test std(canonform(Normal(0.25, 0.7))) ≈ 0.7 end diff --git a/test/univariate/continuous/normalcanon.jl b/test/univariate/continuous/normalcanon.jl index 9ae52e15f0..4f27dc124b 100644 --- a/test/univariate/continuous/normalcanon.jl +++ b/test/univariate/continuous/normalcanon.jl @@ -1,10 +1,10 @@ # Sampling Tests @testset "NormalCanon sampling tests" begin for d in [ - NormalCanon() - NormalCanon(-1.0, 2.5) - NormalCanon(2.0, 0.8) - ] + NormalCanon() + NormalCanon(-1.0, 2.5) + NormalCanon(2.0, 0.8) + ] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/pareto.jl b/test/univariate/continuous/pareto.jl index c86292c550..195e34a96e 100644 --- a/test/univariate/continuous/pareto.jl +++ b/test/univariate/continuous/pareto.jl @@ -1,10 +1,10 @@ @testset "Pareto Sampling Tests" begin for d in [ - Pareto() - Pareto(2.0) - Pareto(2.0, 1.5) - Pareto(3.0, 2.0) - ] + Pareto() + Pareto(2.0) + Pareto(2.0, 1.5) + Pareto(3.0, 2.0) + ] test_distr(d, 10^6) end end diff --git a/test/univariate/continuous/pgeneralizedgaussian.jl b/test/univariate/continuous/pgeneralizedgaussian.jl index 33a89ac339..f90377b153 100644 --- a/test/univariate/continuous/pgeneralizedgaussian.jl +++ b/test/univariate/continuous/pgeneralizedgaussian.jl @@ -40,10 +40,10 @@ using Test μ = randn() α = Random.randexp() for (d, dref) in ( - (PGeneralizedGaussian(μ, α, 1), Laplace(μ, α)), # p = 1 (Laplace) - (PGeneralizedGaussian(), Normal()), # p = 2 (standard normal) - (PGeneralizedGaussian(μ, α, 2), Normal(μ, α / sqrt(2))), # p = 2 (normal) - ) + (PGeneralizedGaussian(μ, α, 1), Laplace(μ, α)), # p = 1 (Laplace) + (PGeneralizedGaussian(), Normal()), # p = 2 (standard normal) + (PGeneralizedGaussian(μ, α, 2), Normal(μ, α / sqrt(2))), # p = 2 (normal) + ) @test minimum(d) == -Inf @test maximum(d) == Inf @@ -62,15 +62,15 @@ using Test @test std(d) ≈ std(dref) @test skewness(d) == 0 - @test kurtosis(d) ≈ kurtosis(dref) atol = 1e-12 + @test kurtosis(d) ≈ kurtosis(dref) atol = 1.0e-12 @test entropy(d) ≈ entropy(dref) # PDF + CDF tests. for x in (-Inf, d.μ - 4.2, d.μ - 1.2, d.μ, Float32(d.μ) + 0.3f0, d.μ + 4, Inf32) @test @inferred(pdf(d, x)) ≈ pdf(dref, x) @test @inferred(logpdf(d, x)) ≈ logpdf(dref, x) - @test @inferred(cdf(d, x)) ≈ cdf(dref, x) atol = 1e-12 - @test @inferred(logcdf(d, x)) ≈ logcdf(dref, x) atol = 1e-12 + @test @inferred(cdf(d, x)) ≈ cdf(dref, x) atol = 1.0e-12 + @test @inferred(logcdf(d, x)) ≈ logcdf(dref, x) atol = 1.0e-12 end # Additional tests, including sampling diff --git a/test/univariate/continuous/rician.jl b/test/univariate/continuous/rician.jl index b46d53cbd9..1bd2cb8741 100644 --- a/test/univariate/continuous/rician.jl +++ b/test/univariate/continuous/rician.jl @@ -14,15 +14,15 @@ @test var(d1) ≈ var(d2) @test mode(d1) ≈ mode(d2) @test median(d1) ≈ median(d2) - @test Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) ≈ - Base.Fix1(quantile, d2).([0.25, 0.45, 0.60, 0.80, 0.90]) + @test Base.Fix1(quantile, d1).([0.25, 0.45, 0.6, 0.8, 0.9]) ≈ + Base.Fix1(quantile, d2).([0.25, 0.45, 0.6, 0.8, 0.9]) @test Base.Fix1(pdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(pdf, d2).(0.0:0.1:1.0) @test Base.Fix1(cdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(cdf, d2).(0.0:0.1:1.0) d1 = Rician(10.0, 10.0) @test median(d1) == quantile(d1, 0.5) - x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) - @test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90]) + x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.6, 0.8, 0.9]) + @test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.6, 0.8, 0.9]) x = rand(Rician(5.0, 5.0), 100000) d1 = fit(Rician, x) diff --git a/test/univariate/continuous/semicircle.jl b/test/univariate/continuous/semicircle.jl index aadfb051ba..8ef848de72 100644 --- a/test/univariate/continuous/semicircle.jl +++ b/test/univariate/continuous/semicircle.jl @@ -64,5 +64,5 @@ rng = StableRNG(123) @test quantile(dmean, 0.01) < mean(sample) < quantile(dmean, 0.99) pvalue = pvalue_kolmogorovsmirnoff(sample, semi) - @test pvalue > 1e-2 + @test pvalue > 1.0e-2 end diff --git a/test/univariate/continuous/skewedexponentialpower.jl b/test/univariate/continuous/skewedexponentialpower.jl index 7c3b5475b6..75826157f3 100644 --- a/test/univariate/continuous/skewedexponentialpower.jl +++ b/test/univariate/continuous/skewedexponentialpower.jl @@ -63,13 +63,13 @@ using Test # exponential power function in R from package # VaRES. Values are set to μ = 0, σ = 1, p = 0.5, α = 0.7 test = [ - -10.0000000 0.004770878 0.02119061 - -9.2631579 0.005831228 0.02508110 + -10.0 0.004770878 0.02119061 + -9.2631579 0.005831228 0.0250811 -8.5263158 0.007185598 0.02985598 -7.7894737 0.008936705 0.03576749 -7.0526316 0.011232802 0.04315901 -6.3157895 0.014293449 0.05250781 - -5.5789474 0.018454000 0.06449163 + -5.5789474 0.018454 0.06449163 -4.8421053 0.024246394 0.08010122 -4.1052632 0.032555467 0.10083623 -3.3684211 0.044947194 0.12906978 @@ -82,15 +82,15 @@ using Test 1.7894737 0.031620241 0.95773428 2.5263158 0.016507946 0.97472202 3.2631579 0.009427166 0.98399211 - 4.0000000 0.005718906 0.98942757 + 4.0 0.005718906 0.98942757 ] d = SkewedExponentialPower(0, 1, 0.5, 0.7) for t in eachrow(test) - @test @inferred(pdf(d, t[1])) ≈ t[2] rtol = 1e-5 - @test @inferred(logpdf(d, t[1])) ≈ log(t[2]) rtol = 1e-5 - @test @inferred(cdf(d, t[1])) ≈ t[3] rtol = 1e-3 - @test @inferred(logcdf(d, t[1])) ≈ log(t[3]) rtol = 1e-3 + @test @inferred(pdf(d, t[1])) ≈ t[2] rtol = 1.0e-5 + @test @inferred(logpdf(d, t[1])) ≈ log(t[2]) rtol = 1.0e-5 + @test @inferred(cdf(d, t[1])) ≈ t[3] rtol = 1.0e-3 + @test @inferred(logcdf(d, t[1])) ≈ log(t[3]) rtol = 1.0e-3 end test_distr(d, 10^6) @@ -108,8 +108,8 @@ using Test # moments of the standard SEPD, Equation 18 in Zhy, D. and V. Zinde-Walsh (2009) moments = [ (2 * p^(1 / p))^k * - ((-1)^k * α^(1 + k) + (1 - α)^(1 + k)) * - gamma((1 + k) / p) / gamma(1 / p) for k ∈ 1:4 + ((-1)^k * α^(1 + k) + (1 - α)^(1 + k)) * + gamma((1 + k) / p) / gamma(1 / p) for k in 1:4 ] @inferred var(d) ≈ moments[2] - moments[1]^2 diff --git a/test/univariate/continuous/triangular.jl b/test/univariate/continuous/triangular.jl index 0009f2a07f..8db7b6fad6 100644 --- a/test/univariate/continuous/triangular.jl +++ b/test/univariate/continuous/triangular.jl @@ -58,7 +58,7 @@ using Test @test mean(d) == (a + b + c) / 3 @test median(d) == ( c >= middle(a, b) ? a + sqrt((b - a) * (c - a) / 2) : - b - sqrt((b - a) * (b - c) / 2) + b - sqrt((b - a) * (b - c) / 2) ) @test var(d) == (a^2 + b^2 + c^2 - a * b - a * c - b * c) / 18 @@ -110,10 +110,10 @@ using Test @test mgf(d, 0) == 1 @test fdm(Base.Fix1(mgf, d), 0.0) ≈ mean(d) - @test fdm2(Base.Fix1(mgf, d), 0.0) ≈ mean(d)^2 + var(d) rtol = 1e-6 + @test fdm2(Base.Fix1(mgf, d), 0.0) ≈ mean(d)^2 + var(d) rtol = 1.0e-6 @test cf(d, 0) == 1 @test fdm(Base.Fix1(cf, d), 0.0) ≈ mean(d) * im - @test fdm2(Base.Fix1(cf, d), 0.0) ≈ -(mean(d)^2 + var(d)) rtol = 1e-6 + @test fdm2(Base.Fix1(cf, d), 0.0) ≈ -(mean(d)^2 + var(d)) rtol = 1.0e-6 end end end diff --git a/test/univariate/continuous/uniform.jl b/test/univariate/continuous/uniform.jl index 9f61393193..3c81566eab 100644 --- a/test/univariate/continuous/uniform.jl +++ b/test/univariate/continuous/uniform.jl @@ -9,8 +9,8 @@ using Test @testset "uniform.jl" begin # affine transformations test_affine_transformations(Uniform, rand(), 4 + rand()) - test_cgf(Uniform(0, 1), (1, -1, 100.0f0, 1e6, -1e6)) - test_cgf(Uniform(100.0f0, 101.0f0), (1, -1, 100.0f0, 1e6, -1e6)) + test_cgf(Uniform(0, 1), (1, -1, 100.0f0, 1.0e6, -1.0e6)) + test_cgf(Uniform(100.0f0, 101.0f0), (1, -1, 100.0f0, 1.0e6, -1.0e6)) @testset "ChainRules" begin # run test suite for values in the support @@ -52,19 +52,19 @@ using Test end @testset "cgf uniform around 0" begin for (lo, hi, t) in [ - ((Float16(0), Float16(1), sqrt(eps(Float16)))), - ((Float16(0), Float16(1), Float16(0))), - ((Float16(0), Float16(1), -sqrt(eps(Float16)))), - (0.0f0, 1.0f0, sqrt(eps(Float32))), - (0.0f0, 1.0f0, 0.0f0), - (0.0f0, 1.0f0, -sqrt(eps(Float32))), - (-2.0f0, 1.0f0, 1.0f-30), - (-2.0f-4, -1.0f-4, -2.0f-40), - (0.0, 1.0, sqrt(eps(Float64))), - (0.0, 1.0, 0.0), - (0.0, 1.0, -sqrt(eps(Float64))), - (-2.0, 5.0, -1e-35), - ] + ((Float16(0), Float16(1), sqrt(eps(Float16)))), + ((Float16(0), Float16(1), Float16(0))), + ((Float16(0), Float16(1), -sqrt(eps(Float16)))), + (0.0f0, 1.0f0, sqrt(eps(Float32))), + (0.0f0, 1.0f0, 0.0f0), + (0.0f0, 1.0f0, -sqrt(eps(Float32))), + (-2.0f0, 1.0f0, 1.0f-30), + (-2.0f-4, -1.0f-4, -2.0f-40), + (0.0, 1.0, sqrt(eps(Float64))), + (0.0, 1.0, 0.0), + (0.0, 1.0, -sqrt(eps(Float64))), + (-2.0, 5.0, -1.0e-35), + ] T = typeof(lo) @assert T == typeof(lo) == typeof(hi) == typeof(t) @assert t <= sqrt(eps(T)) @@ -80,7 +80,7 @@ using Test @test cgf(d, t) === zero(t) else @test Distributions.cgf_around_zero(d, t) ≈ - Distributions.cgf_away_from_zero(d_big, t_big) atol = eps(t) rtol = 0 + Distributions.cgf_away_from_zero(d_big, t_big) atol = eps(t) rtol = 0 @test Distributions.cgf_around_zero(d, t) === cgf(d, t) end end diff --git a/test/univariate/continuous/weibull.jl b/test/univariate/continuous/weibull.jl index 8647dd8e4a..92836cfde3 100644 --- a/test/univariate/continuous/weibull.jl +++ b/test/univariate/continuous/weibull.jl @@ -14,7 +14,7 @@ using Test @test @inferred(logpdf(d, T(-3))) === ST(-Inf) @test @inferred(pdf(d, T(0))) === (α == 1 ? ST(S(α) / S(θ)) : ST(0)) @test @inferred(logpdf(d, T(0))) === - (α == 1 ? ST(log(S(α) / S(θ))) : ST(-Inf)) + (α == 1 ? ST(log(S(α) / S(θ))) : ST(-Inf)) if T <: AbstractFloat @test @inferred(pdf(d, T(-Inf))) === ST(0) diff --git a/test/univariate/discrete/bernoulli.jl b/test/univariate/discrete/bernoulli.jl index b8f622d43c..f05d37800b 100644 --- a/test/univariate/discrete/bernoulli.jl +++ b/test/univariate/discrete/bernoulli.jl @@ -4,5 +4,5 @@ using Test, Random @test rand(Bernoulli()) isa Bool @test rand(Bernoulli(), 10) isa Vector{Bool} -test_cgf(Bernoulli(0.5), (1.0f0, -1.0f0, 1e6, -1e6)) -test_cgf(Bernoulli(0.1), (1.0f0, -1.0f0, 1e6, -1e6)) +test_cgf(Bernoulli(0.5), (1.0f0, -1.0f0, 1.0e6, -1.0e6)) +test_cgf(Bernoulli(0.1), (1.0f0, -1.0f0, 1.0e6, -1.0e6)) diff --git a/test/univariate/discrete/bernoullilogit.jl b/test/univariate/discrete/bernoullilogit.jl index e60039ea79..8a5007e7c6 100644 --- a/test/univariate/discrete/bernoullilogit.jl +++ b/test/univariate/discrete/bernoullilogit.jl @@ -40,8 +40,8 @@ end end @testset "cgf" begin - test_cgf(BernoulliLogit(), (1.0f0, -1.0f0, 1e6, -1e6)) - test_cgf(BernoulliLogit(0.1), (1.0f0, -1.0f0, 1e6, -1e6)) + test_cgf(BernoulliLogit(), (1.0f0, -1.0f0, 1.0e6, -1.0e6)) + test_cgf(BernoulliLogit(0.1), (1.0f0, -1.0f0, 1.0e6, -1.0e6)) end @testset "comparison with `Bernoulli`" begin @@ -73,9 +73,9 @@ end end for t in (-5.2, 1.2f0) - @test @inferred(mgf(d, t)) ≈ mgf(d0, t) rtol = 1e-6 - @test @inferred(cgf(d, t)) ≈ cgf(d0, t) rtol = 1e-6 - @test @inferred(cf(d, t)) ≈ cf(d0, t) rtol = 1e-6 + @test @inferred(mgf(d, t)) ≈ mgf(d0, t) rtol = 1.0e-6 + @test @inferred(cgf(d, t)) ≈ cgf(d0, t) rtol = 1.0e-6 + @test @inferred(cf(d, t)) ≈ cf(d0, t) rtol = 1.0e-6 end end end diff --git a/test/univariate/discrete/betabinomial.jl b/test/univariate/discrete/betabinomial.jl index 64f5b07a45..877cf7168e 100644 --- a/test/univariate/discrete/betabinomial.jl +++ b/test/univariate/discrete/betabinomial.jl @@ -5,7 +5,7 @@ using Test @testset "logpdf" begin d = BetaBinomial(50, 0.2, 0.6) - for k = 1:50 + for k in 1:50 p = @inferred(pdf(d, k)) lp = @inferred(logpdf(d, k)) @test lp ≈ log(p) @@ -15,7 +15,7 @@ using Test @testset "support" begin d = BetaBinomial(50, 0.2, 0.6) - for k = 1:50 + for k in 1:50 @test insupport(d, k) end @test !insupport(d, 51) @@ -28,14 +28,14 @@ using Test if n >= 0 && α > 0 && β > 0 @test @inferred(BetaBinomial(n, α, β)) isa BetaBinomial{ST} @test @inferred(BetaBinomial(n, α, β; check_args = true)) isa - BetaBinomial{ST} + BetaBinomial{ST} else @test_throws DomainError BetaBinomial(n, α, β) @test_throws DomainError BetaBinomial(n, α, β; check_args = true) end @test @inferred(BetaBinomial(n, α, β; check_args = false)) isa - BetaBinomial{ST} + BetaBinomial{ST} end end end diff --git a/test/univariate/discrete/binomial.jl b/test/univariate/discrete/binomial.jl index 2f5c8b37df..e2c93d29a5 100644 --- a/test/univariate/discrete/binomial.jl +++ b/test/univariate/discrete/binomial.jl @@ -7,27 +7,27 @@ Random.seed!(1234) # Test the consistency between the recursive and nonrecursive computation of the pdf # of the Binomial distribution for (p, n) in [ - (0.6, 10), - (0.8, 6), - (0.5, 40), - (0.04, 20), - (1.0, 100), - (0.0, 10), - (0.999999, 1000), - (1e-7, 1000), - ] + (0.6, 10), + (0.8, 6), + (0.5, 40), + (0.04, 20), + (1.0, 100), + (0.0, 10), + (0.999999, 1000), + (1.0e-7, 1000), + ] d = Binomial(n, p) a = Base.Fix1(pdf, d).(0:n) - for t = 0:n - @test pdf(d, t) ≈ a[1+t] + for t in 0:n + @test pdf(d, t) ≈ a[1 + t] end li = rand(0:n, 2) rng = minimum(li):maximum(li) b = Base.Fix1(pdf, d).(rng) for t in rng - @test pdf(d, t) ≈ b[t-first(rng)+1] + @test pdf(d, t) ≈ b[t - first(rng) + 1] end end @@ -41,7 +41,7 @@ Random.seed!(1234) @test median(Binomial(65, 3 // 10)) == 19 @test median(Binomial(85, 3 // 10)) == 25 - @test all(median(Binomial(7, p)) == quantile(Binomial(7, p), 1 // 2) for p = 0:0.1:1) + @test all(median(Binomial(7, p)) == quantile(Binomial(7, p), 1 // 2) for p in 0:0.1:1) # Test mode @test Distributions.mode(Binomial(100, 0.4)) == 40 diff --git a/test/univariate/discrete/categorical.jl b/test/univariate/discrete/categorical.jl index fc3be9bbfd..57318c808c 100644 --- a/test/univariate/discrete/categorical.jl +++ b/test/univariate/discrete/categorical.jl @@ -5,12 +5,12 @@ using StableRNGs @testset "Categorical" begin for p in Any[ - [0.5, 0.5], - [0.5f0, 0.5f0], - [1 // 2, 1 // 2], - [0.1, 0.3, 0.2, 0.4], - [0.15, 0.25, 0.6], - ] + [0.5, 0.5], + [0.5f0, 0.5f0], + [1 // 2, 1 // 2], + [0.1, 0.3, 0.2, 0.4], + [0.15, 0.25, 0.6], + ] d = Categorical(p) k = length(p) @@ -26,7 +26,7 @@ using StableRNGs @test d ≈ d c = 0.0 - for i = 1:k + for i in 1:k c += p[i] @test @inferred(pdf(d, i)) == p[i] @test @inferred(pdf(d, float(i))) == p[i] @@ -82,10 +82,10 @@ using StableRNGs p = ones(10^6) * 1.0e-6 @test Distributions.isprobvec(p) - @test convert(Categorical{Float64,Vector{Float64}}, d) === d + @test convert(Categorical{Float64, Vector{Float64}}, d) === d for x in (d, probs(d)) - d32 = convert(Categorical{Float32,Vector{Float32}}, d) - @test d32 isa Categorical{Float32,Vector{Float32}} + d32 = convert(Categorical{Float32, Vector{Float32}}, d) + @test d32 isa Categorical{Float32, Vector{Float32}} @test probs(d32) == map(Float32, probs(d)) end @@ -127,7 +127,7 @@ using StableRNGs @testset "issue #832" begin priorities = collect(Float64, 1:1000) - priorities[1:50] .= 1e8 + priorities[1:50] .= 1.0e8 at = Distributions.AliasTable(priorities) iat = rand(at, 16) @@ -135,7 +135,7 @@ using StableRNGs # failure rate of a single sample is sum(51:1000)/50e8 = 9.9845e-5 # failure rate of 4 out of 16 samples is 1-cdf(Binomial(16, 9.9845e-5), 3) = 1.8074430840897548e-13 # this test should randomly fail with a probability of 1.8074430840897548e-13 - @test count(==(1e8), priorities[iat]) >= 13 + @test count(==(1.0e8), priorities[iat]) >= 13 end end diff --git a/test/univariate/discrete/dirac.jl b/test/univariate/discrete/dirac.jl index c3a561d7a1..c42711782d 100644 --- a/test/univariate/discrete/dirac.jl +++ b/test/univariate/discrete/dirac.jl @@ -2,8 +2,8 @@ using Distributions using Test @testset "Dirac tests" begin - test_cgf(Dirac(13), (1, 1.0f-4, 1e10, 10, -4)) - test_cgf(Dirac(-1.0f2), (1, 1.0f-4, 1e10, 10, -4)) + test_cgf(Dirac(13), (1, 1.0f-4, 1.0e10, 10, -4)) + test_cgf(Dirac(-1.0f2), (1, 1.0f-4, 1.0e10, 10, -4)) for val in (3, 3.0, -3.5) d = Dirac(val) diff --git a/test/univariate/discrete/discretenonparametric.jl b/test/univariate/discrete/discretenonparametric.jl index 1a010201fe..5c0a7252a3 100644 --- a/test/univariate/discrete/discretenonparametric.jl +++ b/test/univariate/discrete/discretenonparametric.jl @@ -4,14 +4,14 @@ using Test # A dummy RNG that always outputs 1 struct AllOneRNG <: AbstractRNG end -Base.rand(::AllOneRNG, ::Type{T}) where {T<:Number} = one(T) +Base.rand(::AllOneRNG, ::Type{T}) where {T <: Number} = one(T) rng = MersenneTwister(123) @testset "Testing matrix-variates with $key" for (key, func) in Dict( - "rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], -) + "rand(...)" => [rand, rand], + "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)], + ) d = DiscreteNonParametric([40.0, 80.0, 120.0, -60.0], [0.4, 0.3, 0.1, 0.2]) @@ -127,17 +127,17 @@ rng = MersenneTwister(123) ws = [1.0, 2, 1, 0, 1, 1, 2, 3, 2, 6, 1] ss = suffstats(DiscreteNonParametric, xs) - @test ss isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} + @test ss isa Distributions.DiscreteNonParametricStats{Int, Float64, Vector{Int}} @test ss.support == [1, 2, 3, 4, 5] @test ss.freq == [2.0, 3.0, 3.0, 2.0, 1.0] ss2 = suffstats(DiscreteNonParametric, xs, ws) - @test ss2 isa Distributions.DiscreteNonParametricStats{Int,Float64,Vector{Int}} + @test ss2 isa Distributions.DiscreteNonParametricStats{Int, Float64, Vector{Int}} @test ss2.support == [1, 2, 3, 4, 5] @test ss2.freq == [2.0, 11.0, 5.0, 1.0, 1.0] d1 = fit_mle(DiscreteNonParametric, ss) - @test d1 isa DiscreteNonParametric{Int,Float64,Vector{Int}} + @test d1 isa DiscreteNonParametric{Int, Float64, Vector{Int}} @test support(d1) == ss.support @test probs(d1) ≈ ss.freq ./ 11 @@ -154,29 +154,29 @@ rng = MersenneTwister(123) # Numerical stability; see issue #872 and PR #926 p = [1 - eps(Float32), eps(Float32)] d = Categorical(p) - @test ([rand(d) for _ = 1:100_000]; true) + @test ([rand(d) for _ in 1:100_000]; true) # Numerical stability w/ large prob vectors; # see issue #1017 n = 20000 # large vector length - p = Float32[0.5; fill(0.5 / (n ÷ 2) - 3e-8, n ÷ 2); fill(eps(Float64), n ÷ 2)] + p = Float32[0.5; fill(0.5 / (n ÷ 2) - 3.0e-8, n ÷ 2); fill(eps(Float64), n ÷ 2)] d = Categorical(p) rng = AllOneRNG() @test (rand(rng, d); true) # Sampling with values of zero probability; see issue #1105 d = DiscreteNonParametric([0, 1, 2, 3, 4], [0.0f0, 0.1f0, 0.2f0, 0.3f0, 0.4f0]) - @test iszero(count(iszero(rand(d)) for _ = 1:100_000_000)) + @test iszero(count(iszero(rand(d)) for _ in 1:100_000_000)) d = DiscreteNonParametric([4, 3, 2, 1, 0], [0.4f0, 0.3f0, 0.2f0, 0.1f0, 0.0f0]) - @test iszero(count(iszero(rand(d)) for _ = 1:100_000_000)) + @test iszero(count(iszero(rand(d)) for _ in 1:100_000_000)) # Sampling with integer-valued probabilities; see issue #1111 d = DiscreteNonParametric([1, 2], [0, 1]) - @test iszero(count(isone(rand(d)) for _ = 1:100)) + @test iszero(count(isone(rand(d)) for _ in 1:100)) d = DiscreteNonParametric([2, 1], [1, 0]) - @test iszero(count(isone(rand(d)) for _ = 1:100)) + @test iszero(count(isone(rand(d)) for _ in 1:100)) @testset "type stability" begin d = DiscreteNonParametric([0, 1], [0.5, 0.5]) @@ -189,8 +189,8 @@ end @testset "comparisons" begin d1 = DiscreteNonParametric([1, 2], [0.4, 0.6]) d2 = DiscreteNonParametric([1, 2], [0.6, 0.4]) - d3 = DiscreteNonParametric([1 + 1e-9, 2], [0.4, 0.6]) - d4 = DiscreteNonParametric([1 + 1e-9, 2], [0.6, 0.4]) + d3 = DiscreteNonParametric([1 + 1.0e-9, 2], [0.4, 0.6]) + d4 = DiscreteNonParametric([1 + 1.0e-9, 2], [0.6, 0.4]) d5 = DiscreteNonParametric([9, 2, 4], [0.2, 0.7, 0.1]) # Same distribution @@ -223,11 +223,11 @@ end # issue #1140 @test DiscreteNonParametric(1:2, [0.5, 0.5]) != - DiscreteNonParametric(1:3, [0.2, 0.4, 0.4]) + DiscreteNonParametric(1:3, [0.2, 0.4, 0.4]) # Different types @test DiscreteNonParametric(1:2, [0.5, 0.5]) == - DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) + DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) @test DiscreteNonParametric(1:2, [0.5, 0.5]) ≈ - DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) + DiscreteNonParametric([1, 2], [0.5f0, 0.5f0]) end diff --git a/test/univariate/discrete/discreteuniform.jl b/test/univariate/discrete/discreteuniform.jl index df06bde7bf..f32f7b5f2d 100644 --- a/test/univariate/discrete/discreteuniform.jl +++ b/test/univariate/discrete/discreteuniform.jl @@ -9,7 +9,7 @@ using Test for (a, b) in ((-8, -6), (-8, -5), (-5, 1), (-5, 2), (-2, 3), (-2, 4), (2, 4), (2, 5)) d = DiscreteUniform(a, b) - for x = a:b + for x in a:b @test @inferred(quantile(d, cdf(d, x))) === x end diff --git a/test/univariate/discrete/geometric.jl b/test/univariate/discrete/geometric.jl index fe2ca7ff9e..41a7e95b97 100644 --- a/test/univariate/discrete/geometric.jl +++ b/test/univariate/discrete/geometric.jl @@ -15,8 +15,8 @@ using FiniteDifferences m2 = var(d) + mean(d)^2 @test fdm2(Base.Fix1(mgf, d), 0) ≈ m2 @test fdm2(Base.Fix1(cf, d), 0) ≈ -m2 - test_cgf(Geometric(0.1), (1.0f-1, -1e6)) - test_cgf(Geometric(0.5), (1.0f-1, -1e6)) + test_cgf(Geometric(0.1), (1.0f-1, -1.0e6)) + test_cgf(Geometric(0.5), (1.0f-1, -1.0e6)) end @testset "Support" begin diff --git a/test/univariate/discrete/negativebinomial.jl b/test/univariate/discrete/negativebinomial.jl index 07b8a1e505..0bdcf0181a 100644 --- a/test/univariate/discrete/negativebinomial.jl +++ b/test/univariate/discrete/negativebinomial.jl @@ -7,8 +7,8 @@ using StatsFuns # Currently, most of the tests for NegativeBinomial are in the "ref" folder. # Eventually, we might want to consolidate the tests here -test_cgf(NegativeBinomial(10, 0.5), (-1.0f0, -200.0, -1e6)) -test_cgf(NegativeBinomial(3, 0.1), (-1.0f0, -200.0, -1e6)) +test_cgf(NegativeBinomial(10, 0.5), (-1.0f0, -200.0, -1.0e6)) +test_cgf(NegativeBinomial(3, 0.1), (-1.0f0, -200.0, -1.0e6)) mydiffp(r, p, k) = iszero(k) ? r / p : r / p - k / (1 - p) mydiffr(r, p, k) = @@ -29,13 +29,13 @@ mydiffr(r, p, k) = end @testset "NegativeBinomial r=$r, p=$p, k=$k" for p in exp10.(-10:0) .- eps(), # avoid p==1 since it's not differentiable - r in exp10.(range(-10, stop = 2, length = 25)), - k in (0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024) + r in exp10.(range(-10, stop = 2, length = 25)), + k in (0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024) @test ForwardDiff.derivative(_p -> logpdf(NegativeBinomial(r, _p), k), p) ≈ - mydiffp(r, p, k) rtol = 1e-12 atol = 1e-12 + mydiffp(r, p, k) rtol = 1.0e-12 atol = 1.0e-12 @test ForwardDiff.derivative(_r -> logpdf(NegativeBinomial(_r, p), k), r) ≈ - mydiffr(r, p, k) rtol = 1e-12 atol = 1e-12 + mydiffr(r, p, k) rtol = 1.0e-12 atol = 1.0e-12 end @testset "Check the corner case p==1" begin diff --git a/test/univariate/discrete/poisson.jl b/test/univariate/discrete/poisson.jl index 9172c6b3dc..2de42eb634 100644 --- a/test/univariate/discrete/poisson.jl +++ b/test/univariate/discrete/poisson.jl @@ -2,7 +2,7 @@ using Test, Distributions, OffsetArrays test_cgf(Poisson(1), (1.0f0, 2.0f0, 10.0, 50.0)) test_cgf(Poisson(10), (1.0f0, 2.0f0, 10.0, 50.0)) -test_cgf(Poisson(1e-3), (1.0f0, 2.0f0, 10.0, 50.0)) +test_cgf(Poisson(1.0e-3), (1.0f0, 2.0f0, 10.0, 50.0)) @testset "Poisson suffstats and OffsetArrays" begin a = rand(Poisson(), 11) diff --git a/test/univariate/discrete/poissonbinomial.jl b/test/univariate/discrete/poissonbinomial.jl index 974aee5cf6..97e587b871 100644 --- a/test/univariate/discrete/poissonbinomial.jl +++ b/test/univariate/discrete/poissonbinomial.jl @@ -4,27 +4,27 @@ using ForwardDiff using Test @testset "poissonbinomial" begin - function naive_esf(x::AbstractVector{T}) where {T<:Real} + function naive_esf(x::AbstractVector{T}) where {T <: Real} n = length(x) S = zeros(T, n + 1) - states = hcat(reverse.(digits.(0:(2^n-1), base = 2, pad = n))...)' + states = hcat(reverse.(digits.(0:(2^n - 1), base = 2, pad = n))...)' r_states = vec(mapslices(sum, states, dims = 2)) - for r = 0:n + for r in 0:n idx = findall(r_states .== r) - S[r+1] = sum(mapslices(x -> prod(x[x .!= 0]), states[idx, :] .* x', dims = 2)) + S[r + 1] = sum(mapslices(x -> prod(x[x .!= 0]), states[idx, :] .* x', dims = 2)) end return S end - function naive_pb(p::AbstractVector{T}) where {T<:Real} + function naive_pb(p::AbstractVector{T}) where {T <: Real} x = p ./ (1 .- p) naive_esf(x) * prod(1 ./ (1 .+ x)) end # test PoissonBinomial PMF algorithms - x = [3.5118, 0.6219, 0.2905, 0.8450, 1.8648] + x = [3.5118, 0.6219, 0.2905, 0.845, 1.8648] n = length(x) p = x ./ (1 .+ x) @@ -55,22 +55,22 @@ using Test @test @inferred(kurtosis(d)) ≈ kurtosis(dref) @test @inferred(skewness(d)) ≈ skewness(dref) - for t = 0:5 + for t in 0:5 @test @inferred(mgf(d, t)) ≈ mgf(dref, t) @test @inferred(cf(d, t)) ≈ cf(dref, t) end - for i = 0.1:0.1:0.9 + for i in 0.1:0.1:0.9 @test @inferred(quantile(d, i)) ≈ quantile(dref, i) end - for i = 0:n - @test @inferred(pdf(d, i)) ≈ pdf(dref, i) atol = 1e-14 - @test @inferred(pdf(d, i // 1)) ≈ pdf(dref, i) atol = 1e-14 + for i in 0:n + @test @inferred(pdf(d, i)) ≈ pdf(dref, i) atol = 1.0e-14 + @test @inferred(pdf(d, i // 1)) ≈ pdf(dref, i) atol = 1.0e-14 @test @inferred(logpdf(d, i)) ≈ logpdf(dref, i) @test @inferred(logpdf(d, i // 1)) ≈ logpdf(dref, i) for f in (cdf, ccdf, logcdf, logccdf) - @test @inferred(f(d, i)) ≈ f(dref, i) rtol = 1e-6 - @test @inferred(f(d, i // 1)) ≈ f(dref, i) rtol = 1e-6 - @test @inferred(f(d, i + 0.5)) ≈ f(dref, i) rtol = 1e-6 + @test @inferred(f(d, i)) ≈ f(dref, i) rtol = 1.0e-6 + @test @inferred(f(d, i // 1)) ≈ f(dref, i) rtol = 1.0e-6 + @test @inferred(f(d, i + 0.5)) ≈ f(dref, i) rtol = 1.0e-6 end end @@ -89,17 +89,17 @@ using Test # Test against a sum of three Binomial distributions for (n₁, n₂, n₃, p₁, p₂, p₃) in [ - (10, 10, 10, 0.1, 0.5, 0.9), - (1, 10, 100, 0.99, 0.1, 0.05), - (5, 1, 3, 0.01, 0.99, 0.999), - (10, 7, 10, 0.0, 0.9, 0.5), - ] + (10, 10, 10, 0.1, 0.5, 0.9), + (1, 10, 100, 0.99, 0.1, 0.05), + (5, 1, 3, 0.01, 0.99, 0.999), + (10, 7, 10, 0.0, 0.9, 0.5), + ] n = n₁ + n₂ + n₃ p = zeros(n) p[1:n₁] .= p₁ - p[(n₁+1):(n₁+n₂)] .= p₂ - p[(n₁+n₂+1):end] .= p₃ + p[(n₁ + 1):(n₁ + n₂)] .= p₂ + p[(n₁ + n₂ + 1):end] .= p₃ d = PoissonBinomial(p) println(" testing PoissonBinomial [$(n₁) × $(p₁), $(n₂) × $(p₂), $(n₃) × $(p₃)]") b1 = Binomial(n₁, p₁) @@ -112,22 +112,22 @@ using Test @test @inferred(mean(d)) ≈ (mean(b1) + mean(b2) + mean(b3)) @test @inferred(var(d)) ≈ (var(b1) + var(b2) + var(b3)) - for t = 0:5 + for t in 0:5 @test @inferred(mgf(d, t)) ≈ (mgf(b1, t) * mgf(b2, t) * mgf(b3, t)) @test @inferred(cf(d, t)) ≈ (cf(b1, t) * cf(b2, t) * cf(b3, t)) end - for k = 0:n + for k in 0:n m = 0.0 - for i = 0:min(n₁, k) + for i in 0:min(n₁, k) mc = 0.0 - for j = i:min(i+n₂, k) - mc += (k - j <= n₃) && pmf2[j-i+1] * pmf3[k-j+1] + for j in i:min(i + n₂, k) + mc += (k - j <= n₃) && pmf2[j - i + 1] * pmf3[k - j + 1] end - m += pmf1[i+1] * mc + m += pmf1[i + 1] * mc end - @test @inferred(pdf(d, k)) ≈ m atol = 1e-14 - @test @inferred(pdf(d, k // 1)) ≈ m atol = 1e-14 + @test @inferred(pdf(d, k)) ≈ m atol = 1.0e-14 + @test @inferred(pdf(d, k // 1)) ≈ m atol = 1.0e-14 @test @inferred(logpdf(d, k)) ≈ log(m) @test @inferred(logpdf(d, k // 1)) ≈ log(m) end @@ -154,7 +154,7 @@ using Test # Test autodiff using ForwardDiff f = x -> logpdf(PoissonBinomial(x), 0) at = [0.5, 0.5] - @test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1e-6) + @test isapprox(ForwardDiff.gradient(f, at), fdm(f, at), atol = 1.0e-6) # Test ChainRules definition for f in (Distributions.poissonbinomial_pdf, Distributions.poissonbinomial_pdf_fft) diff --git a/test/univariate/discrete/soliton.jl b/test/univariate/discrete/soliton.jl index ff5fd3b7f4..c74c8e9ff8 100644 --- a/test/univariate/discrete/soliton.jl +++ b/test/univariate/discrete/soliton.jl @@ -17,9 +17,9 @@ using Distributions @test insupport(Ω, 1) && insupport(Ω, 2) && insupport(Ω, K) @test !insupport(Ω, 0) && !insupport(Ω, 2.1) && !insupport(Ω, K + 1) - K, M, δ, atol = 100, 60, 0.2, 1e-3 + K, M, δ, atol = 100, 60, 0.2, 1.0e-3 Ω = Soliton(K, M, δ, atol) - ds = [d for d = 1:K if pdf(Ω, d) > 0] + ds = [d for d in 1:K if pdf(Ω, d) > 0] @test all(Base.Fix1(pdf, Ω).(ds) .> atol) @test cdf(Ω, 0) ≈ 0 @test cdf(Ω, K) ≈ 1 diff --git a/test/univariate/locationscale.jl b/test/univariate/locationscale.jl index 70cf62f1bd..d72536a977 100644 --- a/test/univariate/locationscale.jl +++ b/test/univariate/locationscale.jl @@ -1,10 +1,10 @@ function test_location_scale( - rng::Union{AbstractRNG,Missing}, - μ::Real, - σ::Real, - ρ::UnivariateDistribution, - dref::UnivariateDistribution, -) + rng::Union{AbstractRNG, Missing}, + μ::Real, + σ::Real, + ρ::UnivariateDistribution, + dref::UnivariateDistribution, + ) d = Distributions.AffineDistribution(μ, σ, ρ) @test params(d) == (μ, σ, ρ) @test eltype(d) === eltype(dref) @@ -42,8 +42,8 @@ function test_location_scale( @testset "$k" for (k, dtest) in d_dict if dtest isa Distributions.AffineDistribution @test typeof(dtest.μ) === typeof(dtest.σ) - @test location(dtest) ≈ μ atol = 1e-15 - @test scale(dtest) ≈ σ atol = 1e-15 + @test location(dtest) ≈ μ atol = 1.0e-15 + @test scale(dtest) ≈ σ atol = 1.0e-15 end end end @@ -74,7 +74,7 @@ function test_location_scale( #### Evaluation & Sampling - @testset "Evaluation & Sampling" begin + return @testset "Evaluation & Sampling" begin @testset "$k" for (k, dtest) in d_dict xs = rand(dref, 5) x = first(xs) @@ -90,9 +90,9 @@ function test_location_scale( @test loglikelihood(dtest, xs) ≈ loglikelihood(dref, xs) @test cdf(dtest, x) ≈ cdf(dref, x) - @test logcdf(dtest, x) ≈ logcdf(dref, x) atol = 1e-14 - @test ccdf(dtest, x) ≈ ccdf(dref, x) atol = 1e-14 - @test logccdf(dtest, x) ≈ logccdf(dref, x) atol = 1e-14 + @test logcdf(dtest, x) ≈ logcdf(dref, x) atol = 1.0e-14 + @test ccdf(dtest, x) ≈ ccdf(dref, x) atol = 1.0e-14 + @test logccdf(dtest, x) ≈ logccdf(dref, x) atol = 1.0e-14 @test quantile(dtest, 0.1) ≈ quantile(dref, 0.1) @test quantile(dtest, 0.5) ≈ quantile(dref, 0.5) @@ -128,12 +128,12 @@ function test_location_scale( end function test_location_scale_normal( - rng::Union{AbstractRNG,Missing}, - μ::Real, - σ::Real, - μD::Real, - σD::Real, -) + rng::Union{AbstractRNG, Missing}, + μ::Real, + σ::Real, + μD::Real, + σD::Real, + ) ρ = Normal(μD, σD) dref = Normal(μ + σ * μD, abs(σ) * σD) @test dref === μ + σ * ρ @@ -141,12 +141,12 @@ function test_location_scale_normal( end function test_location_scale_discretenonparametric( - rng::Union{AbstractRNG,Missing}, - μ::Real, - σ::Real, - support, - probs, -) + rng::Union{AbstractRNG, Missing}, + μ::Real, + σ::Real, + support, + probs, + ) ρ = DiscreteNonParametric(support, probs) dref = DiscreteNonParametric(μ .+ σ .* support, probs) return test_location_scale(rng, μ, σ, ρ, dref) @@ -193,6 +193,6 @@ end @test_logs Distributions.AffineDistribution(1.0, 1, Normal()) @test_deprecated ls_norm = LocationScale(1.0, 1, Normal()) - @test ls_norm isa LocationScale{Float64,Continuous,Normal{Float64}} - @test ls_norm isa Distributions.AffineDistribution{Float64,Continuous,Normal{Float64}} + @test ls_norm isa LocationScale{Float64, Continuous, Normal{Float64}} + @test ls_norm isa Distributions.AffineDistribution{Float64, Continuous, Normal{Float64}} end diff --git a/test/univariate/orderstatistic.jl b/test/univariate/orderstatistic.jl index 21d1d8896b..d4e11e0b22 100644 --- a/test/univariate/orderstatistic.jl +++ b/test/univariate/orderstatistic.jl @@ -4,7 +4,7 @@ using StatsBase @testset "OrderStatistic" begin @testset "basic" begin - for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i = 1:n + for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i in 1:n d = OrderStatistic(dist, n, i) @test d isa OrderStatistic if dist isa DiscreteUnivariateDistribution @@ -25,7 +25,7 @@ using StatsBase end @testset "params" begin - for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i = 1:n + for dist in [Uniform(), Normal(), DiscreteUniform(10)], n in [1, 2, 10], i in 1:n d = OrderStatistic(dist, n, i) @test params(d) == (params(dist)..., n, i) @test partype(d) === partype(dist) @@ -34,7 +34,7 @@ using StatsBase @testset "support" begin n = 10 - for i = 1:10 + for i in 1:10 d1 = OrderStatistic(Uniform(), n, i) @test minimum(d1) == 0 @test maximum(d1) == 1 @@ -76,9 +76,9 @@ using StatsBase # test against the exact formula computed using BigFloats @testset for T in (Float32, Float64) @testset for dist in - [Uniform(T(-2), T(1)), Normal(T(3), T(2)), Exponential(T(10))], - n in [1, 10, 100], - i = 1:n + [Uniform(T(-2), T(1)), Normal(T(3), T(2)), Exponential(T(10))], + n in [1, 10, 100], + i in 1:n d = OrderStatistic(dist, n, i) c = factorial(big(n)) / factorial(big(i - 1)) / factorial(big(n - i)) @@ -95,9 +95,9 @@ using StatsBase @testset "discrete" begin # test check that the pdf is the difference of the CDF at adjacent points @testset for dist in - [DiscreteUniform(10, 30), Poisson(100.0), Binomial(20, 0.3)], - n in [1, 10, 100], - i = 1:n + [DiscreteUniform(10, 30), Poisson(100.0), Binomial(20, 0.3)], + n in [1, 10, 100], + i in 1:n d = OrderStatistic(dist, n, i) xs = quantile(dist, 0.01):quantile(dist, 0.99) @@ -113,14 +113,14 @@ using StatsBase @testset "distribution normalizes to 1" begin @testset for dist in [ - Uniform(-2, 1), - Normal(2, 3), - Exponential(5), - DiscreteUniform(10, 40), - Poisson(100), - ], - n in [1, 10, 20], - i = 1:n + Uniform(-2, 1), + Normal(2, 3), + Exponential(5), + DiscreteUniform(10, 40), + Poisson(100), + ], + n in [1, 10, 20], + i in 1:n d = OrderStatistic(dist, n, i) Distributions.expectation(one, d) ≈ 1 @@ -137,12 +137,12 @@ using StatsBase (DiscreteUniform(1, 10), DiscreteUniform(1, 10)), (Poisson(T(20)), Poisson(big(20))), ] - @testset for (dist, bigdist) in dists, n in [10, 100], i = 1:n + @testset for (dist, bigdist) in dists, n in [10, 100], i in 1:n dist isa DiscreteDistribution && T !== Float64 && continue d = OrderStatistic(dist, n, i) # since density is concentrated around the i/n quantile, sample a point # nearby it - x = quantile(dist, clamp(i / n + (rand() - 1 // 2) / 10, 1e-4, 1 - 1e-4)) + x = quantile(dist, clamp(i / n + (rand() - 1 // 2) / 10, 1.0e-4, 1 - 1.0e-4)) p = cdf(bigdist, big(x)) cdf_exp = sum(i:n) do j c = binomial(big(n), big(j)) @@ -169,15 +169,15 @@ using StatsBase xq = @inferred(T, quantile(d, q)) xqc = @inferred(T, cquantile(d, 1 - q)) @test xq ≈ xqc - @test isapprox(xq, T(x); atol = 1e-4) || - (dist isa DiscreteDistribution && xq < x) + @test isapprox(xq, T(x); atol = 1.0e-4) || + (dist isa DiscreteDistribution && xq < x) end end end @testset "rand" begin @testset for T in [Float32, Float64], - dist in [Uniform(T(-2), T(1)), Normal(T(1), T(2))] + dist in [Uniform(T(-2), T(1)), Normal(T(1), T(2))] d = OrderStatistic(dist, 10, 5) rng = Random.default_rng() @@ -202,7 +202,7 @@ using StatsBase tol = quantile(Normal(), 1 - α) @testset for dist in [Uniform(), Exponential(), Poisson(20), Binomial(20, 0.3)] - @testset for n in [1, 10, 100], i = 1:n + @testset for n in [1, 10, 100], i in 1:n d = OrderStatistic(dist, n, i) x = rand(d, ndraws) m, v = mean_and_var(x) diff --git a/test/univariate_bounds.jl b/test/univariate_bounds.jl index c0ae9da898..94868add99 100644 --- a/test/univariate_bounds.jl +++ b/test/univariate_bounds.jl @@ -4,11 +4,11 @@ using Test # to make sure that subtypes provides the required behavior without having to add # a dependency to InteractiveUtils function _subtypes( - m::Module, - x::Type, - sts = Base.IdSet{Any}(), - visited = Base.IdSet{Module}(), -) + m::Module, + x::Type, + sts = Base.IdSet{Any}(), + visited = Base.IdSet{Module}(), + ) push!(visited, m) xt = Base.unwrap_unionall(x) if !isa(xt, DataType) diff --git a/test/univariates.jl b/test/univariates.jl index 50711cde27..00321a732a 100644 --- a/test/univariates.jl +++ b/test/univariates.jl @@ -33,6 +33,7 @@ function verify_and_test_drive(jsonfile, selected, n_tsamples::Int) # verification and testing verify_and_test(dtype, d, dct, n_tsamples) end + return end @@ -47,11 +48,11 @@ _json_value(x::AbstractString) = function verify_and_test( - D::Union{Type,Function}, - d::UnivariateDistribution, - dct::Dict, - n_tsamples::Int, -) + D::Union{Type, Function}, + d::UnivariateDistribution, + dct::Dict, + n_tsamples::Int, + ) # verify properties # # Note: properties include all applicable params and stats @@ -70,7 +71,7 @@ function verify_and_test( @test partype(d) === mapfoldl( typeof, (S, T) -> - T <: Distribution ? promote_type(S, partype(T)) : + T <: Distribution ? promote_type(S, partype(T)) : (T <: Nothing ? S : promote_type(S, eltype(T))), pars; init = Union{}, @@ -99,7 +100,7 @@ function verify_and_test( expect_v = _json_value(val) f = eval(Symbol(fname)) @assert isa(f, Function) - @test isapprox(f(d), expect_v; atol = 1e-12, rtol = 1e-8, nans = true) + @test isapprox(f(d), expect_v; atol = 1.0e-12, rtol = 1.0e-8, nans = true) end @test extrema(d) == (minimum(d), maximum(d)) @@ -113,9 +114,9 @@ function verify_and_test( # pdf method is not implemented for StudentizedRange if !isa(d, StudentizedRange) - @test Base.Fix1(pdf, d).(x) ≈ p atol = 1e-16 rtol = 1e-8 + @test Base.Fix1(pdf, d).(x) ≈ p atol = 1.0e-16 rtol = 1.0e-8 @test Base.Fix1(logpdf, d).(x) ≈ lp atol = - isa(d, NoncentralHypergeometric) ? 1e-4 : 1e-12 + isa(d, NoncentralHypergeometric) ? 1.0e-4 : 1.0e-12 end # cdf method is not implemented for NormalInverseGaussian @@ -123,13 +124,13 @@ function verify_and_test( @test isapprox( cdf(d, x), cf; - atol = isa(d, NoncentralHypergeometric) ? 1e-8 : 1e-12, + atol = isa(d, NoncentralHypergeometric) ? 1.0e-8 : 1.0e-12, ) end end # verify quantiles - if !isa(d, Union{Skellam,VonMises,NormalInverseGaussian}) + if !isa(d, Union{Skellam, VonMises, NormalInverseGaussian}) qts = dct["quans"] for qt in qts q = Float64(qt["q"]) @@ -159,13 +160,13 @@ function verify_and_test( if isa(d, Cosine) n_tsamples = floor(Int, n_tsamples / 10) elseif isa(d, NoncentralBeta) || - isa(d, NoncentralChisq) || - isa(d, NoncentralF) || - isa(d, NoncentralT) + isa(d, NoncentralChisq) || + isa(d, NoncentralF) || + isa(d, NoncentralT) n_tsamples = min(n_tsamples, 100) end - if !isa(d, Union{Skellam,VonMises,NoncentralHypergeometric,NormalInverseGaussian}) + return if !isa(d, Union{Skellam, VonMises, NoncentralHypergeometric, NormalInverseGaussian}) test_distr(d, n_tsamples) end end diff --git a/test/utils.jl b/test/utils.jl index b83536a356..53bb2fdf2d 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -44,7 +44,7 @@ n = 10 areal = randn(n, n) / 2 aimg = randn(n, n) / 2 @testset "For A containing $eltya" for eltya in - (Float32, Float64, ComplexF32, ComplexF64, Int) + (Float32, Float64, ComplexF32, ComplexF64, Int) ainit = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) @@ -53,11 +53,11 @@ aimg = randn(n, n) / 2 notsquare = [ainit ainit] @test !ispossemdef(notsymmetric) @test !ispossemdef(notsquare) - for truerank = 0:n + for truerank in 0:n X = ainit[:, 1:truerank] A = truerank == 0 ? zeros(eltya, n, n) : X * X' @test ispossemdef(A) - for testrank = 0:n + for testrank in 0:n if testrank == truerank @test ispossemdef(A, testrank) else diff --git a/test/vonmises.jl b/test/vonmises.jl index b10bc8c41c..c8dead0a16 100644 --- a/test/vonmises.jl +++ b/test/vonmises.jl @@ -23,7 +23,7 @@ function test_vonmises(μ::Float64, κ::Float64) # Support @test support(d) == RealInterval(d.μ - π, d.μ + π) @test pdf(d, d.μ - 2π) == 0.0 - @test pdf(d, d.μ + 2π) == 0.0 + return @test pdf(d, d.μ + 2π) == 0.0 end From 6fe7afade74b8590d32a621ba7c417c17dde0c2f Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 18:24:44 +0200 Subject: [PATCH 6/8] Add git-blame-ignore-revs for Runic formatting commit --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..cc89156465 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Runic formatting commit +e0864bbf \ No newline at end of file From 2cb35ca184f6e0a7a215d5d5edb6414ac50f7d7d Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 18:24:58 +0200 Subject: [PATCH 7/8] Fix git-blame-ignore-revs with full commit hash --- .git-blame-ignore-revs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index cc89156465..3f9dbfce89 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,2 @@ # Runic formatting commit -e0864bbf \ No newline at end of file +e0864bbfea8e3a766f4d49ec956dc8ea52f19e9e \ No newline at end of file From 6ef4382cac52e19ae133991a68fdd72b3ed67090 Mon Sep 17 00:00:00 2001 From: Mikhail Mikhasenko Date: Mon, 23 Jun 2025 18:28:29 +0200 Subject: [PATCH 8/8] Document .git-blame-ignore-revs file in README --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 405c27f81a..b6ebdcabd1 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,9 @@ The formatting command can be run locally with ```julia julia --project=.formatting -e 'using Pkg; Pkg.instantiate(); include(".formatting/format_check.jl")' ``` +The `.git-blame-ignore-revs` file contains commit hashes for mass formatting changes. +This allows `git blame` to show the actual authors of code changes rather than the formatting commit. +When viewing blame information, use `git blame --ignore-revs-file .git-blame-ignore-revs `. ### Requirements