Skip to content

Commit

Permalink
upgrade to 0.7 (drop support for 0.6)
Browse files Browse the repository at this point in the history
* specialfunctions and all(isfinite, x)

* try dropping 0.6

* Fix deprecations

* fix remaining deprecations that I understand

mostly LinearAlgebra rename, 1.->1.0 etc., dims kw arg, eye->I.

* dont test on 0.6 anymore
  • Loading branch information
kleinschmidt authored Jul 10, 2018
1 parent a12b7e6 commit 5b8a08a
Show file tree
Hide file tree
Showing 16 changed files with 68 additions and 58 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ os:
- linux
- osx
julia:
- 0.6
- 0.7
- nightly
notifications:
email: false
Expand Down
3 changes: 2 additions & 1 deletion REQUIRE
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
julia 0.6
julia 0.7-
PDMats 0.6.0
Distributions 0.14.0
StatsFuns 0.6.0
SpecialFunctions
9 changes: 7 additions & 2 deletions src/ConjugatePriors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,17 @@ __precompile__()

module ConjugatePriors

using Statistics
using LinearAlgebra

using PDMats
using Distributions
using StatsFuns
using SpecialFunctions


import Base: mean
import Base.LinAlg: Cholesky
import Statistics: mean
import LinearAlgebra: Cholesky

import PDMats: PDMat

Expand Down
12 changes: 6 additions & 6 deletions src/dirichlet_multi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,30 +11,30 @@ complete(G::Type{Categorical}, pri::Dirichlet, p::Vector{Float64}) = Categorical

posterior_canon(pri::Dirichlet, ss::CategoricalStats) = DirichletCanon(pri.alpha + ss.h)

function posterior_canon{T<:Integer}(pri::Dirichlet, G::Type{Categorical}, x::Array{T})
function posterior_canon(pri::Dirichlet, G::Type{Categorical}, x::Array{T}) where T<:Integer
DirichletCanon(add_categorical_counts!(copy(pri.alpha), x))
end

function posterior_canon{T<:Integer}(pri::Dirichlet, G::Type{Categorical}, x::Array{T}, w::Array{Float64})
function posterior_canon(pri::Dirichlet, G::Type{Categorical}, x::Array{T}, w::Array{Float64}) where T<:Integer
DirichletCanon(add_categorical_counts!(copy(pri.alpha), x, w))
end

### Dirichlet - Multinomial

posterior_canon(pri::Dirichlet, ss::MultinomialStats) = DirichletCanon(pri.alpha + ss.scnts)

function posterior_canon{T<:Real}(pri::Dirichlet, G::Type{Multinomial}, x::Matrix{T})
function posterior_canon(pri::Dirichlet, G::Type{Multinomial}, x::Matrix{T}) where T<:Real
d = length(pri)
size(x,1) == d || throw(ArgumentError("Inconsistent argument dimensions."))
a = add!(sum(x, 2), pri.alpha)
a = add!(sum(x, dims=2), pri.alpha)
DirichletCanon(vec(a))
end

function posterior_canon{T<:Real}(pri::Dirichlet, G::Type{Multinomial}, x::Matrix{T}, w::Array{Float64})
function posterior_canon(pri::Dirichlet, G::Type{Multinomial}, x::Matrix{T}, w::Array{Float64}) where T<:Real
d = length(pri)
size(x) == (d, length(w)) || throw(ArgumentError("Inconsistent argument dimensions."))
a = copy(pri.alpha)
Base.LinAlg.BLAS.gemv!('N', 1.0, map(Float64, x), vec(w), 1.0, a)
BLAS.gemv!('N', 1.0, map(Float64, x), vec(w), 1.0, a)
DirichletCanon(a)
end

6 changes: 3 additions & 3 deletions src/fallbacks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
posterior_canon(pri, G::IncompleteFormulation, x) = posterior_canon(pri, suffstats(G, x))
posterior_canon(pri, G::IncompleteFormulation, x, w) = posterior_canon(pri, suffstats(G, x, w))

posterior{P<:Distribution}(pri::P, ss::SufficientStats) = Base.convert(P, posterior_canon(pri, ss))
posterior{P<:Distribution}(pri::P, G::IncompleteFormulation, x) = Base.convert(P, posterior_canon(pri, G, x))
posterior{P<:Distribution}(pri::P, G::IncompleteFormulation, x, w) = Base.convert(P, posterior_canon(pri, G, x, w))
posterior(pri::P, ss::SufficientStats) where {P<:Distribution} = Base.convert(P, posterior_canon(pri, ss))
posterior(pri::P, G::IncompleteFormulation, x) where {P<:Distribution} = Base.convert(P, posterior_canon(pri, G, x))
posterior(pri::P, G::IncompleteFormulation, x, w) where {P<:Distribution} = Base.convert(P, posterior_canon(pri, G, x, w))

posterior_rand(pri, ss::SufficientStats) = Base.rand(posterior_canon(pri, ss))
posterior_rand(pri, G::IncompleteFormulation, x) = Base.rand(posterior_canon(pri, G, x))
Expand Down
8 changes: 4 additions & 4 deletions src/mvnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ function posterior_canon(prior::NormalInverseWishart, ss::MvNormalStats)
mu = (kappa0.*mu0 + ss.s) ./ kappa
nu = nu0 + ss.tw

Lam0 = LamC0[:U]'*LamC0[:U]
Lam0 = Matrix(LamC0)
z = prior.zeromean ? ss.m : ss.m - mu0
Lam = Lam0 + ss.s2 + kappa0*ss.tw/kappa*(z*z')

return NormalInverseWishart(mu, kappa, cholfact(Lam), nu)
return NormalInverseWishart(mu, kappa, cholesky(Lam), nu)
end

const MeanAndCovMat = Tuple{Vector{Float64}, Matrix{Float64}}
Expand All @@ -106,11 +106,11 @@ function posterior_canon(prior::NormalWishart, ss::MvNormalStats)
nu = nu0 + ss.tw
mu = (kappa0.*mu0 + ss.s) ./ kappa

Lam0 = TC0[:U]'*TC0[:U]
Lam0 = Matrix(TC0)
z = prior.zeromean ? ss.m : ss.m - mu0
Lam = Lam0 + ss.s2 + kappa0*ss.tw/kappa*(z*z')

return NormalWishart(mu, kappa, cholfact(Lam), nu)
return NormalWishart(mu, kappa, cholesky(Lam), nu)
end

complete(G::Type{MvNormal}, pri::NormalWishart, s::MeanAndCovMat) = MvNormal(s[1], inv(PDMat(s[2])))
4 changes: 2 additions & 2 deletions src/normal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -101,12 +101,12 @@ function posterior_canon(prior::NormalInverseGamma, ss::NormalStats)
# ss.tw contains the number of observations if weight wasn't used to
# compute the sufficient statistics.

vn_inv = 1./v0 + ss.tw
vn_inv = 1.0/v0 + ss.tw
mu = (mu0/v0 + ss.s) / vn_inv # ss.s = ss.tw*ss.m = n*xbar
shape = shape0 + 0.5*ss.tw
scale = scale0 + 0.5*ss.s2 + 0.5/(vn_inv*v0)*ss.tw*(ss.m-mu0).^2

return NormalInverseGamma(mu, 1./vn_inv, shape, scale)
return NormalInverseGamma(mu, 1.0/vn_inv, shape, scale)
end

complete(G::Type{Normal}, pri::NormalInverseGamma, t::NTuple{2,Float64}) = Normal(t[1], sqrt(t[2]))
Expand Down
8 changes: 4 additions & 4 deletions src/normalgamma.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ insupport(::Type{NormalGamma}, x::T, tau2::T) where T<:Real =

# Probably should guard agains dividing by and taking the log of 0.
function pdf(d::NormalGamma, x::T, tau2::T) where T<:Real
Zinv = d.rate.^d.shape / gamma(d.shape) * sqrt(d.nu / (2.*pi))
return Zinv * tau2.^(d.shape-0.5) * exp(-0.5*tau2*(d.nu*(x-d.mu).^2 + 2.*d.rate))
Zinv = d.rate.^d.shape / gamma(d.shape) * sqrt(d.nu / (2.0*pi))
return Zinv * tau2.^(d.shape-0.5) * exp(-0.5*tau2*(d.nu*(x-d.mu).^2 + 2.0*d.rate))
end
function logpdf(d::NormalGamma, x::T, tau2::T) where T<:Real
lZinv = d.shape*log(d.rate) - lgamma(d.shape) + 0.5*(log(d.nu) - log(2.*pi))
lZinv = d.shape*log(d.rate) - lgamma(d.shape) + 0.5*(log(d.nu) - log(2.0*pi))
return lZinv + (d.shape-0.5)*log(tau2) - 0.5*tau2*(d.nu*(x-d.mu).^2 + 2*d.rate)
end

Expand All @@ -45,6 +45,6 @@ function rand(d::NormalGamma)
if tau2 <= zero(Float64)
tau2 = eps(Float64)
end
mu = rand(Normal(d.mu, sqrt(1./(tau2*d.nu))))
mu = rand(Normal(d.mu, sqrt(1.0/(tau2*d.nu))))
return mu, tau2
end
2 changes: 1 addition & 1 deletion src/normalinversechisq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -78,4 +78,4 @@ function mode(d::NormalInverseChisq)
return μ, σ2
end

rand(d::NormalInverseChisq) = rand(NormalInverseGamma(d))
rand(d::NormalInverseChisq) = rand(convert(NormalInverseGamma, d))
4 changes: 2 additions & 2 deletions src/normalinversegamma.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ insupport(::Type{NormalInverseGamma}, x::T, sig2::T) where T<:Real =
# Probably should guard agains dividing by and taking the log of 0.

function pdf(d::NormalInverseGamma, x::T, sig2::T) where T<:Real
Zinv = d.scale.^d.shape / gamma(d.shape) / sqrt(d.v0 * 2.*pi)
return Zinv * 1./(sqrt(sig2)*sig2.^(d.shape+1.)) * exp(-d.scale/sig2 - 0.5/(sig2*d.v0)*(x-d.mu).^2)
Zinv = d.scale.^d.shape / gamma(d.shape) / sqrt(d.v0 * 2.0*pi)
return Zinv * 1.0/(sqrt(sig2)*sig2.^(d.shape+1.0)) * exp(-d.scale/sig2 - 0.5/(sig2*d.v0)*(x-d.mu).^2)
end

function logpdf(d::NormalInverseGamma, x::T, sig2::T) where T<:Real
Expand Down
8 changes: 4 additions & 4 deletions src/normalinversewishart.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ end
function NormalInverseWishart(mu::Vector{U}, kappa::Real,
Lambda::Matrix{S}, nu::Real) where {S<:Real, U<:Real}
T = promote_type(eltype(mu), typeof(kappa), typeof(nu), S)
return NormalInverseWishart{T}(Vector{T}(mu), T(kappa), Cholesky{T}(cholfact(Lambda)), T(nu))
return NormalInverseWishart{T}(Vector{T}(mu), T(kappa), Cholesky{T}(cholesky(Lambda)), T(nu))
end

function insupport(::Type{NormalInverseWishart}, x::Vector{T}, Sig::Matrix{T}) where T<:Real
return (all(isfinite(x)) &&
return (all(isfinite, x) &&
size(Sig, 1) == size(Sig, 2) &&
isApproxSymmmetric(Sig) &&
size(Sig, 1) == length(x) &&
Expand Down Expand Up @@ -77,11 +77,11 @@ function logpdf(niw::NormalInverseWishart, x::Vector{T}, Sig::Matrix{T}) where T
logp::T = hnu * logdet(Lamchol)
logp -= hnu * p * log(2.)
logp -= logmvgamma(p, hnu)
logp -= hp * (log(2.*pi) - log(kappa))
logp -= hp * (log(2.0*pi) - log(kappa))

# Inverse-Wishart
logp -= (hnu + hp + 1.) * logdet(Sig)
logp -= 0.5 * trace(Sig \ (Lamchol[:U]' * Lamchol[:U]))
logp -= 0.5 * tr(Sig \ Matrix(Lamchol))

# Normal
z = niw.zeromean ? x : x - mu
Expand Down
6 changes: 3 additions & 3 deletions src/normalwishart.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@ end

function NormalWishart(mu::Vector{T}, kappa::T,
Tmat::Matrix{T}, nu::T) where T<:Real
NormalWishart{T}(mu, kappa, cholfact(Tmat), nu)
NormalWishart{T}(mu, kappa, cholesky(Tmat), nu)
end

function insupport(::Type{NormalWishart}, x::Vector{T}, Lam::Matrix{T}) where T<:Real
return (all(isfinite(x)) &&
return (all(isfinite, x) &&
size(Lam, 1) == size(Lam, 2) &&
isApproxSymmmetric(Lam) &&
size(Lam, 1) == length(x) &&
Expand Down Expand Up @@ -69,7 +69,7 @@ function logpdf(nw::NormalWishart, x::Vector{T}, Lam::Matrix{T}) where T<:Real

# Wishart (MvNormal contributes 0.5 as well)
logp += (hnu - hp) * logdet(Lam)
logp -= 0.5 * trace(Tchol \ Lam)
logp -= 0.5 * tr(Tchol \ Lam)

# Normal
z = nw.zeromean ? x : x - mu
Expand Down
2 changes: 1 addition & 1 deletion test/conjugates.jl
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ end
x = rand(Multinomial(10, [0.2, 0.3, 0.5]), n)
p = posterior(pri, Multinomial, x)
@test isa(p, Dirichlet)
@test p.alpha pri.alpha + vec(sum(x, 2))
@test p.alpha pri.alpha + vec(sum(x, dims=2))

r = posterior_mode(pri, Multinomial, x)
@test r mode(p)
Expand Down
34 changes: 17 additions & 17 deletions test/conjugates_mvnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
p = 4
X = reshape(Float64[1:12;], p, n)
w = rand(n)
Xw = X * diagm(w)
Xw = X * Diagonal(w)

Sigma = 0.75 * eye(p) + fill(0.25, 4, 4)
Sigma = 0.75I + fill(0.25, p, p)
ss = suffstats(MvNormalKnownCov(Sigma), X)
ssw = suffstats(MvNormalKnownCov(Sigma), X, w)

s_t = sum(X, 2)
ws_t = sum(Xw, 2)
s_t = sum(X, dims=2)
ws_t = sum(Xw, dims=2)
tw_t = length(w)
wtw_t = sum(w)

Expand All @@ -41,18 +41,18 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
n = 10
# n = 100
mu_true = [2., 3.]
Sig_true = eye(2)
Sig_true = Matrix(1.0I, 2, 2)
Sig_true[1,2] = Sig_true[2,1] = 0.25
mu0 = [2.5, 2.5]
Sig0 = eye(2)
Sig0 = Matrix(1.0I, 2, 2)
Sig0[1,2] = Sig0[2,1] = 0.5
X = rand(MultivariateNormal(mu_true, Sig_true), n)
pri = MultivariateNormal(mu0, Sig0)

post = posterior((pri, Sig_true), MvNormal, X)
@test isa(post, FullNormal)

@test post.μ inv(inv(Sig0) + n*inv(Sig_true))*(n*inv(Sig_true)*mean(X,2) + inv(Sig0)*mu0)
@test post.μ inv(inv(Sig0) + n*inv(Sig_true))*(n*inv(Sig_true)*mean(X,dims=2) + inv(Sig0)*mu0)
@test post.Σ.mat inv(inv(Sig0) + n*inv(Sig_true))

# posterior_sample
Expand All @@ -67,17 +67,17 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
@testset "NormalInverseWishart - MvNormal" begin

mu_true = [2., 2.]
Sig_true = eye(2)
Sig_true = Matrix(1.0I, 2, 2)
Sig_true[1,2] = Sig_true[2,1] = 0.25

X = rand(MultivariateNormal(mu_true, Sig_true), n)
Xbar = mean(X,2)
Xm = X .- mean(X,2)
Xbar = mean(X,dims=2)
Xm = X .- mean(X,dims=2)

mu0 = [2., 3.]
kappa0 = 3.
nu0 = 4.
T0 = eye(2)
T0 = Matrix(1.0I, 2, 2)
T0[1,2] = T0[2,1] = .5
pri = NormalInverseWishart(mu0, kappa0, T0, nu0)

Expand All @@ -86,7 +86,7 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
@test post.mu (kappa0*mu0 + n*Xbar)./(kappa0 + n)
@test post.kappa kappa0 + n
@test post.nu nu0 + n
@test (post.Lamchol[:U]'*post.Lamchol[:U]) T0 + A_mul_Bt(Xm, Xm) + kappa0*n/(kappa0+n)*(Xbar-mu0)*(Xbar-mu0)'
@test Matrix(post.Lamchol) T0 + Xm*transpose(Xm) + kappa0*n/(kappa0+n)*(Xbar-mu0)*(Xbar-mu0)'

ps = posterior_randmodel(pri, MultivariateNormal, X)

Expand All @@ -108,7 +108,7 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
@test all(post_nix2.μ .≈ post_niw.mu)
@test post_nix2.κ post_niw.kappa
@test post_nix2.ν post_niw.nu
@test all(post_nix2.σ2 .≈ full(post_niw.Lamchol)[1] ./ post_niw.nu)
@test all(post_nix2.σ2 .≈ Matrix(post_niw.Lamchol)[1] ./ post_niw.nu)

μ, σ2 = rand(post_nix2)
@test logpdf(post_nix2, μ, σ2) logpdf(post_niw, [μ], reshape([σ2], 1, 1))
Expand All @@ -122,17 +122,17 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
@testset "NormalWishart - MvNormal" begin

mu_true = [2., 2.]
Lam_true = eye(2)
Lam_true = Matrix(1.0I, 2, 2)
Lam_true[1,2] = Lam_true[2,1] = 0.25

X = rand(MvNormal(mu_true, inv(Lam_true)), n)
Xbar = mean(X,2)
Xbar = mean(X,dims=2)
Xm = X .- Xbar

mu0 = [2., 3.]
kappa0 = 3.
nu0 = 4.
T0 = eye(2)
T0 = Matrix(1.0I, 2, 2)
T0[1,2] = T0[2,1] = .5
pri = NormalWishart(mu0, kappa0, T0, nu0)

Expand All @@ -141,7 +141,7 @@ ConjugatePriors.NormalInverseWishart(nix2::NormalInverseChisq) =
@test post.mu (kappa0*mu0 + n*Xbar)./(kappa0 + n)
@test post.kappa kappa0 + n
@test post.nu nu0 + n
@test (post.Tchol[:U]'*post.Tchol[:U]) T0 + A_mul_Bt(Xm, Xm) + kappa0*n/(kappa0+n)*(Xbar-mu0)*(Xbar-mu0)'
@test Matrix(post.Tchol) T0 + Xm*transpose(Xm) + kappa0*n/(kappa0+n)*(Xbar-mu0)*(Xbar-mu0)'

ps = posterior_randmodel(pri, MvNormal, X)

Expand Down
16 changes: 9 additions & 7 deletions test/conjugates_normal.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
using Distributions
using ConjugatePriors

using Random: srand

using ConjugatePriors: NormalGamma, NormalInverseGamma, NormalInverseChisq
using ConjugatePriors: posterior, posterior_rand, posterior_mode, posterior_randmodel, fit_map

Expand Down Expand Up @@ -123,10 +125,10 @@ w = rand(100)
post = posterior(pri, Normal, x)
@test isa(post, NormalInverseGamma)

@test post.mu (mu0/v0 + n*mean(x))/(1./v0 + n)
@test post.v0 1./(1./v0 + n)
@test post.mu (mu0/v0 + n*mean(x))/(1.0/v0 + n)
@test post.v0 1.0/(1.0/v0 + n)
@test post.shape shape0 + 0.5*n
@test post.scale scale0 + 0.5*(n-1)*var(x) + n./v0./(n + 1./v0)*0.5*(mean(x)-mu0).^2
@test post.scale scale0 + 0.5*(n-1)*var(x) + n./v0./(n + 1.0/v0)*0.5*(mean(x)-mu0).^2

ps = posterior_randmodel(pri, Normal, x)

Expand Down Expand Up @@ -154,9 +156,9 @@ w = rand(100)
ν0 = 10.0

pri = NormalInverseChisq(μ0, σ20, κ0, ν0)
pri2 = NormalInverseGamma(pri)
pri2 = convert(NormalInverseGamma, pri)

@test NormalInverseChisq(pri2) == pri
@test convert(NormalInverseChisq, pri2) == pri

@test mode(pri2) == mode(pri)
@test mean(pri2) == mean(pri)
Expand All @@ -168,7 +170,7 @@ w = rand(100)
post = posterior(pri, Normal, x)
post2 = posterior(pri2, Normal, x)
@test isa(post, NormalInverseChisq)
@test NormalInverseChisq(post2) == post
@test convert(NormalInverseChisq, post2) == post

for _ in 1:10
x = rand(post)
Expand All @@ -189,7 +191,7 @@ w = rand(100)

mu_true = 2.
tau2_true = 3.
x = rand(Normal(mu_true, 1./tau2_true), n)
x = rand(Normal(mu_true, 1.0/tau2_true), n)

mu0 = 2.
nu0 = 3.
Expand Down
Loading

0 comments on commit 5b8a08a

Please sign in to comment.