Skip to content

Commit

Permalink
Merge pull request #75 from Vaibhavdixit02/editbranch2
Browse files Browse the repository at this point in the history
Added first difference to L2Loss and changes in Likelihood tests
  • Loading branch information
ChrisRackauckas authored Apr 9, 2018
2 parents 96c9801 + 52b1e7f commit 553b72b
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 68 deletions.
34 changes: 28 additions & 6 deletions src/cost_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,36 +35,58 @@ end

CostVData(t,data;loss_func = L2DistLoss,weight=nothing) = CostVData(t,data,loss_func,weight)

struct L2Loss{T,D,W} <: DECostFunction
struct L2Loss{T,D,U,W} <: DECostFunction
t::T
data::D
weight::W
differ_weight::U
data_weight::W
end

function (f::L2Loss)(sol::DESolution)
data = f.data
weight = f.weight
weight = f.data_weight
diff_weight = f.differ_weight
fill_length = length(f.t)-length(sol)
for i in 1:fill_length
push!(sol.u,fill(Inf,size(sol[1])))
end
sumsq = 0.0
if weight == nothing
@inbounds for i in 1:length(sol)
@inbounds for i in 2:length(sol)
for j in 1:length(sol[i])
sumsq +=(data[j,i] - sol[j,i])^2
end
if diff_weight != nothing
for j in 1:length(sol[i])
if typeof(diff_weight) <: Real
sumsq += diff_weight*((data[j,i] - data[j,i-1] - sol[j,i] + sol[j,i-1])^2)
else
sumsq += diff_weight[j,i]*((data[j,i] - data[j,i-1] - sol[j,i] + sol[j,i-1])^2)
end
end
end
end
else
@inbounds for i in 1:length(sol)
@inbounds for i in 2:length(sol)
for j in 1:length(sol[i])
sumsq = sumsq + ((data[j,i] - sol[j,i])^2)*weight[j,i]
end
if diff_weight != nothing
for j in 1:length(sol[i])
if typeof(diff_weight) <: Real
sumsq += diff_weight*((data[j,i] - data[j,i-1] - sol[j,i] + sol[j,i-1])^2)
else
sumsq += diff_weight[j,i]*((data[j,i] - data[j,i-1] - sol[j,i] + sol[j,i-1])^2)
end
end
end
end
end
sumsq
end
L2Loss(t,data;weight=nothing) = L2Loss(t,data,weight)
L2Loss(t,data;data_weight=nothing,differ_weight=nothing) = L2Loss(t,data,data_weight,differ_weight)
L2Loss(t,data,differ_weight=1;data_weight=nothing) = L2Loss(t,data,differ_weight,data_weight)
# L2Loss(t,data,differ_data,differ_weight;weight=nothing) = L2Loss(t,data,differ_data,weight,differ_weight)

function (f::L2Loss)(sol::AbstractMonteCarloSolution)
mean(f.(sol.u))
Expand Down
61 changes: 2 additions & 59 deletions test/likelihood.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,100 +10,43 @@ tspan = (0.0,10.0)
p = [1.5,1.0]
prob1 = ODEProblem(pf_func,u0,tspan,p)
sol = solve(prob1,Tsit5())

t = collect(linspace(0,10,200))
function generate_data(sol,t)
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
end
aggregate_data = convert(Array,VectorOfArray([generate_data(sol,t) for i in 1:100]))
distributions = [fit_mle(Normal,aggregate_data[i,j,:]) for i in 1:2, j in 1:200]

distributions = [fit_mle(Normal,aggregate_data[i,j,:]) for i in 1:2, j in 1:200]
obj = build_loss_objective(prob1,Tsit5(),LogLikeLoss(t,distributions),
maxiters=10000,verbose=false)

bound1 = Tuple{Float64, Float64}[(0.5, 5),(0.5, 5)]
result = bboptimize(obj;SearchRange = bound1, MaxSteps = 11e3)

@test result.archive_output.best_candidate [1.5,1.0] atol = 1e-1

pf_func = function (du,u,p,t)
du[1] = p[1] * u[1] - p[2] * u[1]*u[2]
du[2] = -3.0 * u[2] + u[1]*u[2]
end
u0 = [1.0;1.0]
tspan = (0.0,10.0)
p = [1.5,1.0]
prob1 = ODEProblem(pf_func,u0,tspan,p)
t = collect(linspace(0,10,200))
sol = solve(prob1,Tsit5(),saveat=t)

function generate_data(sol,t)
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
end
aggregate_data = convert(Array,VectorOfArray([generate_data(sol,t) for i in 1:100]))
data_distributions = [fit_mle(Normal,aggregate_data[i,j,:]) for i in 1:2, j in 1:200]
diff_distributions = [fit_mle(Normal,aggregate_data[i,j,:]-aggregate_data[i,j-1,:]) for j in 2:200, i in 1:2 ]

obj = build_loss_objective(prob1,Tsit5(),LogLikeLoss(t,data_distributions,diff_distributions),
maxiters=10000,verbose=false)

bound1 = Tuple{Float64, Float64}[(0.5, 5),(0.5, 5)]
result = bboptimize(obj;SearchRange = bound1, MaxSteps = 11e3)

@test result.archive_output.best_candidate [1.5,1.0] atol = 1e-1

pf_func = function (du,u,p,t)
du[1] = p[1] * u[1] - p[2] * u[1]*u[2]
du[2] = -3.0 * u[2] + u[1]*u[2]
end
u0 = [1.0;1.0]
tspan = (0.0,10.0)
p = [1.5,1.0]
prob1 = ODEProblem(pf_func,u0,tspan,p)
t = collect(linspace(0,10,200))
sol = solve(prob1,Tsit5(),saveat=t)

function generate_data(sol,t)
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
end
aggregate_data = convert(Array,VectorOfArray([generate_data(sol,t) for i in 1:100]))

data_distributions = [fit_mle(Normal,aggregate_data[i,j,:]) for i in 1:2, j in 1:200]
diff_distributions = [fit_mle(Normal,aggregate_data[i,j,:]-aggregate_data[i,j-1,:]) for j in 2:200, i in 1:2 ]

obj = build_loss_objective(prob1,Tsit5(),LogLikeLoss(t,data_distributions,diff_distributions,0.3),
maxiters=10000,verbose=false)

bound1 = Tuple{Float64, Float64}[(0.5, 5),(0.5, 5)]
result = bboptimize(obj;SearchRange = bound1, MaxSteps = 11e3)

@test result.archive_output.best_candidate [1.5,1.0] atol = 1e-1

pf_func = function (du,u,p,t)
du[1] = p[1] * u[1] - p[2] * u[1]*u[2]
du[2] = -3.0 * u[2] + u[1]*u[2]
end
u0 = [1.0;1.0]
tspan = (0.0,10.0)
p = [1.5,1.0]
prob1 = ODEProblem(pf_func,u0,tspan,p)
sol = solve(prob1,Tsit5())

t = collect(linspace(0,10,200))
function generate_data(sol,t)
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
end
aggregate_data = convert(Array,VectorOfArray([generate_data(sol,t) for i in 1:100]))
distributions = [fit_mle(MvNormal,aggregate_data[:,j,:]) for j in 1:200]
diff_distributions = [fit_mle(MvNormal,aggregate_data[:,j,:]-aggregate_data[:,j-1,:]) for j in 2:200]

obj = build_loss_objective(prob1,Tsit5(),LogLikeLoss(t,distributions,diff_distributions),
maxiters=10000,verbose=false)

bound1 = Tuple{Float64, Float64}[(0.5, 5),(0.5, 5)]
result = bboptimize(obj;SearchRange = bound1, MaxSteps = 11e3)

@test result.archive_output.best_candidate [1.5,1.0] atol = 1e-1
19 changes: 19 additions & 0 deletions test/tests_on_odes/l2loss_test.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
using BlackBoxOptim

cost_function = build_loss_objective(prob1,Tsit5(),L2Loss(t,data,nothing),
maxiters=10000,verbose=false)
bound1 = Tuple{Float64, Float64}[(1, 2)]
result = bboptimize(cost_function;SearchRange = bound1, MaxSteps = 11e3)
@test result.archive_output.best_candidate[1] 1.5 atol=3e-1

cost_function = build_loss_objective(prob2,Tsit5(),L2Loss(t,data,0.3),
maxiters=10000,verbose=false)
bound2 = Tuple{Float64, Float64}[(1, 2),(2, 4)]
result = bboptimize(cost_function;SearchRange = bound2, MaxSteps = 11e3)
@test result.archive_output.best_candidate [1.5;3.0] atol=3e-1

cost_function = build_loss_objective(prob3,Tsit5(),L2Loss(t,data),
maxiters=10000,verbose=false)
bound3 = Tuple{Float64, Float64}[(1, 2),(0, 2), (2, 4), (0, 2)]
result = bboptimize(cost_function;SearchRange = bound3, MaxSteps = 11e3)
@test result.archive_output.best_candidate [1.5;1.0;3.0;1.0] atol=5e-1
6 changes: 3 additions & 3 deletions test/tests_on_odes/regularization_test.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
using PenaltyFunctions, Optim

cost_function_1 = build_loss_objective(prob1,Tsit5(),L2Loss(t,data),
cost_function_1 = build_loss_objective(prob1,Tsit5(),L2Loss(t,data,nothing),
Regularization(0.6,L2Penalty()),maxiters=10000,verbose=false)
cost_function_2 = build_loss_objective(prob2,Tsit5(),L2Loss(t,data),verbose=false,
cost_function_2 = build_loss_objective(prob2,Tsit5(),L2Loss(t,data,nothing),verbose=false,
Regularization(0.1,MahalanobisPenalty(eye(2))),maxiters=10000)
cost_function_3 = build_loss_objective(prob3,Tsit5(),L2Loss(t,data),verbose=false,
cost_function_3 = build_loss_objective(prob3,Tsit5(),L2Loss(t,data,nothing),verbose=false,
Regularization(0.1,MahalanobisPenalty(eye(4))),maxiters=10000)

println("Use Optim BFGS to fit the parameter")
Expand Down

0 comments on commit 553b72b

Please sign in to comment.