Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"julia.environmentPath": "/Users/au568658/Library/CloudStorage/OneDrive-Aarhusuniversitet/Academ/Projects/Software/ADTests.jl"
}
6 changes: 6 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918"
Chairmarks = "0ca39b1e-fe0b-4e98-acfc-b1656634c4de"
DelayDiffEq = "bcd4f6db-9728-5f36-b5f7-82caef46ccdb"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Expand All @@ -9,11 +11,15 @@ DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8"
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c"
LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
MultivariateStats = "6f286f6a-111f-5878-ab1e-185364afe411"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Expand Down
7 changes: 5 additions & 2 deletions main.jl
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,11 @@ end
@include_model "Effect of model size" "n500"
@include_model "PosteriorDB" "pdb_eight_schools_centered"
@include_model "PosteriorDB" "pdb_eight_schools_noncentered"
@include_model "Integrations with other packages" "metabayesian_MH"
@include_model "Integrations with other packages" "ordinary_diffeq"
@include_model "External libraries" "metabayesian_Turing_AdvancedMH_MH"
@include_model "External libraries" "DifferentialEquations_ODE"
@include_model "External libraries" "DifferentialEquations_DDE"
@include_model "External libraries" "Lux_nn"
@include_model "External libraries" "AbstractGPs_GP"

# The entry point to this script itself begins here
if ARGS == ["--list-model-keys"]
Expand Down
24 changes: 24 additions & 0 deletions models/AbstractGPs_GP.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#=
This is an implementation of using AbstractGPs.jl with Turing to model a Gaussian Process.
The model is adapted from the Turing documentation: https://turinglang.org/docs/tutorials/gaussian-processes-introduction/
=#

using AbstractGPs
using LogExpFunctions

# Load data
distance = [2.,3.,4.,5.,6.]
n = [1443, 694, 455, 353, 272]
y = [1346, 577, 337, 208, 149]

# Make Turing model
@model function AbstractGPs_GP(d, n, y; jitter=1e-4)
v ~ Gamma(2, 1)
l ~ Gamma(4, 1)
f = GP(v * with_lengthscale(SEKernel(), l))
f_latent ~ f(d, jitter)
y ~ product_distribution(Binomial.(n, logistic.(f_latent)))
return (fx=f(d, jitter), f_latent=f_latent, y=y)
end

model = AbstractGPs_GP(distance, n, y)
41 changes: 41 additions & 0 deletions models/DifferentialEquations_DDE.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#=
This is an example of using DifferentialEquations.jl with Turing to model a delayed Lotka–Volterra equations (predator-prey model).
The model is adapted from the Turing documentation: https://turinglang.org/docs/tutorials/bayesian-differential-equations/
=#
using DelayDiffEq: DDEProblem, solve, MethodOfSteps, Tsit5

# SciMLSensitivity is needed for reverse-mode AD on differential equations
import SciMLSensitivity

function delay_lotka_volterra(du, u, h, p, t)
α, β, γ, δ = p
x, y = u
du[1] = α * h(p, t - 1; idxs=1) - β * x * y
du[2] = -γ * y + δ * x * y
return nothing
end
p = (1.5, 1.0, 3.0, 1.0)
u0 = [1.0; 1.0]
tspan = (0.0, 10.0)
h(p, t; idxs::Int) = 1.0
prob_dde = DDEProblem(delay_lotka_volterra, u0, h, tspan, p)
sol_dde = solve(prob_dde; saveat=0.1)
q = 1.7
ddedata = rand.(Poisson.(q .* Array(sol_dde)))

@model function DifferentialEquations_DDE(data, prob)
α ~ truncated(Normal(1.5, 0.2); lower=0.5, upper=2.5)
β ~ truncated(Normal(1.1, 0.2); lower=0, upper=2)
γ ~ truncated(Normal(3.0, 0.2); lower=1, upper=4)
δ ~ truncated(Normal(1.0, 0.2); lower=0, upper=2)
q ~ truncated(Normal(1.7, 0.2); lower=0, upper=3)
p = [α, β, γ, δ]
predicted = solve(prob, MethodOfSteps(Tsit5()); p=p, saveat=0.1, abstol=1e-6, reltol=1e-6)
ϵ = 1e-5
for i in eachindex(predicted)
data[:, i] ~ arraydist(Poisson.(q .* predicted[i] .+ ϵ))
end
return nothing
end

model = DifferentialEquations_DDE(ddedata, prob_dde)
39 changes: 39 additions & 0 deletions models/DifferentialEquations_ODE.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#=
This is an example of using DifferentialEquations.jl with Turing to model the Lotka–Volterra equations (predator-prey model).
The model is adapted from the Turing documentation: https://turinglang.org/docs/tutorials/bayesian-differential-equations/
=#
using OrdinaryDiffEq: ODEProblem, solve, Tsit5

# SciMLSensitivity is needed for reverse-mode AD on differential equations
import SciMLSensitivity

function lotka_volterra(du, u, p, t)
α, β, γ, δ = p
x, y = u
du[1] = (α - β * y) * x # prey
du[2] = (δ * x - γ) * y # predator
return nothing
end
u0 = [1.0, 1.0]
p = [1.5, 1.0, 3.0, 1.0]
tspan = (0.0, 10.0)
prob = ODEProblem(lotka_volterra, u0, tspan, p)
sol = solve(prob, Tsit5(); saveat = 0.1)
q = 1.7
odedata = rand.(Poisson.(q * Array(sol)))

@model function DifferentialEquations_ODE(data, prob)
α ~ truncated(Normal(1.5, 0.2); lower = 0.5, upper = 2.5)
β ~ truncated(Normal(1.1, 0.2); lower = 0, upper = 2)
γ ~ truncated(Normal(3.0, 0.2); lower = 1, upper = 4)
δ ~ truncated(Normal(1.0, 0.2); lower = 0, upper = 2)
q ~ truncated(Normal(1.7, 0.2); lower = 0, upper = 3)
p = [α, β, γ, δ]
predicted = solve(prob, Tsit5(); p = p, saveat = 0.1, abstol = 1e-6, reltol = 1e-6)
for i in eachindex(predicted)
data[:, i] ~ product_distribution(Poisson.(q .* predicted[i] .+ 1e-5))
end
return nothing
end

model = DifferentialEquations_ODE(odedata, prob)
79 changes: 79 additions & 0 deletions models/Lux_nn.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#=
This is an implementation of using Flux.jl with Turing to implement a Bayesian neural network.
The model is adapted from the Turing documentation: https://turinglang.org/docs/tutorials/bayesian-neural-networks/
=#
using Lux
using Random
using LinearAlgebra
using Functors


## Simulate data ##
# Number of points to generate
N = 80
M = round(Int, N / 4)
rng = Random.default_rng()
Random.seed!(rng, 1234)
Comment on lines +15 to +16
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's fine to not seed the rng, as long as AD can run on any values, it's not too important which values it runs on. There's of course nothing wrong with seeding it but it's generally best IMO to strip the model to the bare minimum needed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've removed it everywhere I could. Lux.setup seemed to require it, though. Want me to look for ways to avoid it, or is it okay?


# Generate artificial data
x1s = rand(Float32, M) * 4.5f0;
x2s = rand(Float32, M) * 4.5f0;
xt1s = Array([[x1s[i] + 0.5f0; x2s[i] + 0.5f0] for i in 1:M])
x1s = rand(Float32, M) * 4.5f0;
x2s = rand(Float32, M) * 4.5f0;
append!(xt1s, Array([[x1s[i] - 5.0f0; x2s[i] - 5.0f0] for i in 1:M]))

x1s = rand(Float32, M) * 4.5f0;
x2s = rand(Float32, M) * 4.5f0;
xt0s = Array([[x1s[i] + 0.5f0; x2s[i] - 5.0f0] for i in 1:M])
x1s = rand(Float32, M) * 4.5f0;
x2s = rand(Float32, M) * 4.5f0;
append!(xt0s, Array([[x1s[i] - 5.0f0; x2s[i] + 0.5f0] for i in 1:M]))

# Store all the data for later
xs = [xt1s; xt0s]
ts = [ones(2 * M); zeros(2 * M)]


## Create neural network ##
# Construct a neural network using Lux
nn_initial = Chain(Dense(2 => 3, tanh), Dense(3 => 2, tanh), Dense(2 => 1, σ))

# Initialize the model weights and state
ps, st = Lux.setup(rng, nn_initial)

# Create a regularization term and a Gaussian prior variance term.
alpha = 0.09
sigma = sqrt(1.0 / alpha)

function vector_to_parameters(ps_new::AbstractVector, ps::NamedTuple)
@assert length(ps_new) == Lux.parameterlength(ps)
i = 1
function get_ps(x)
z = reshape(view(ps_new, i:(i + length(x) - 1)), size(x))
i += length(x)
return z
end
return fmap(get_ps, ps)
end

const nn = StatefulLuxLayer{true}(nn_initial, nothing, st)


## Create Turing model ##
# Specify the probabilistic model.
@model function Lux_nn(xs, ts; sigma = sigma, ps = ps, nn = nn)
# Sample the parameters
nparameters = Lux.parameterlength(nn_initial)
parameters ~ MvNormal(zeros(nparameters), Diagonal(abs2.(sigma .* ones(nparameters))))

# Forward NN to make predictions
preds = Lux.apply(nn, xs, f32(vector_to_parameters(parameters, ps)))

# Observe each prediction.
for i in eachindex(ts)
ts[i] ~ Bernoulli(preds[i])
end
end

model = Lux_nn(reduce(hcat, xs), ts)
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#=
This is a "meta-Bayesian" model, where the generative model includes an inversion of a different generative model.
These types of models are common in cognitive modelling, where systems of interest (e.g. human subjects) are thought to use Bayesian inference to navigate their environment.
Here we use a Metropolis-Hasting sampler implemented with Turing as the inversion of the inner "subjective" model.
Here we use a Metropolis-Hasting sampler implemented with Turing and AdvancedMH as the inversion of the inner "subjective" model.
=#
using Random: Xoshiro

Expand All @@ -14,7 +14,7 @@ using Random: Xoshiro
end

# Outer model function
@model function metabayesian_MH(
@model function metabayesian_Turing_AdvancedMH_MH(
observation,
action,
inner_sampler = MH(),
Expand All @@ -41,4 +41,4 @@ end
action ~ Normal(subj_mean_expectationₜ, β)
end

model = metabayesian_MH(0.0, 1.0)
model = metabayesian_Turing_AdvancedMH_MH(0.0, 1.0)
34 changes: 0 additions & 34 deletions models/ordinary_diffeq.jl

This file was deleted.