Skip to content

Commit

Permalink
Changed iteration check to case instead of guard
Browse files Browse the repository at this point in the history
  • Loading branch information
lucaspellegrinelli committed Jun 25, 2024
1 parent 2b6dd58 commit 032ec51
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 126 deletions.
69 changes: 36 additions & 33 deletions src/gleastsq/internal/methods/gauss_newton.gleam
Original file line number Diff line number Diff line change
Expand Up @@ -65,42 +65,45 @@ fn do_gauss_newton(
tolerance: Float,
lambda_reg: Float,
) {
use <- bool.guard(iterations == 0, Error(NonConverged))
case iterations {
0 -> Error(NonConverged)
_ -> {
let m = list.length(params)
let y_fit = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(y, y_fit)
use j <- result.try(result.replace_error(
jacobian(x, y_fit, func, params, epsilon),
JacobianTaskError,
))

let m = list.length(params)
let y_fit = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(y, y_fit)
use j <- result.try(result.replace_error(
jacobian(x, y_fit, func, params, epsilon),
JacobianTaskError,
))
let jt = nx.transpose(j)
let eye = nx.eye(m) |> nx.multiply(lambda_reg)
let jtj = nx.add(nx.dot(jt, j), eye)
let jt_r = nx.dot(jt, r)

let jt = nx.transpose(j)
let eye = nx.eye(m) |> nx.multiply(lambda_reg)
let jtj = nx.add(nx.dot(jt, j), eye)
let jt_r = nx.dot(jt, r)
use delta_solve <- result.try(result.map_error(
nx.solve(jtj, jt_r),
SolveError,
))
let delta = delta_solve |> nx.to_list_1d
let delta_norm = norm(delta, 2.0)

use delta_solve <- result.try(result.map_error(
nx.solve(jtj, jt_r),
SolveError,
))
let delta = delta_solve |> nx.to_list_1d
let delta_norm = norm(delta, 2.0)
let new_params =
list.zip(params, delta)
|> list.map(fn(p) { p.0 +. p.1 })

let new_params =
list.zip(params, delta)
|> list.map(fn(p) { p.0 +. p.1 })
use <- bool.guard(delta_norm <. tolerance, Ok(new_params))

use <- bool.guard(delta_norm <. tolerance, Ok(new_params))

do_gauss_newton(
x,
y,
func,
new_params,
iterations - 1,
epsilon,
tolerance,
lambda_reg,
)
do_gauss_newton(
x,
y,
func,
new_params,
iterations - 1,
epsilon,
tolerance,
lambda_reg,
)
}
}
}
83 changes: 43 additions & 40 deletions src/gleastsq/internal/methods/levenberg_marquardt.gleam
Original file line number Diff line number Diff line change
Expand Up @@ -88,49 +88,52 @@ fn do_levenberg_marquardt(
damping_increase: Float,
damping_decrease: Float,
) {
use <- bool.guard(iterations == 0, Error(NonConverged))
case iterations {
0 -> Error(NonConverged)
_ -> {
let m = list.length(params)
let y_fit = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(y, y_fit)
use j <- result.try(result.replace_error(
jacobian(x, y_fit, func, params, epsilon),
JacobianTaskError,
))

let m = list.length(params)
let y_fit = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(y, y_fit)
use j <- result.try(result.replace_error(
jacobian(x, y_fit, func, params, epsilon),
JacobianTaskError,
))
let jt = nx.transpose(j)
let lambda_eye = nx.eye(m) |> nx.multiply(damping)
let h_damped = nx.add(nx.dot(jt, j), lambda_eye)
let g = nx.dot(jt, r)

let jt = nx.transpose(j)
let lambda_eye = nx.eye(m) |> nx.multiply(damping)
let h_damped = nx.add(nx.dot(jt, j), lambda_eye)
let g = nx.dot(jt, r)
use delta_solve <- result.try(result.map_error(
nx.solve(h_damped, g),
SolveError,
))
let delta = delta_solve |> nx.to_list_1d
let delta_norm = norm(delta, 2.0)

use delta_solve <- result.try(result.map_error(
nx.solve(h_damped, g),
SolveError,
))
let delta = delta_solve |> nx.to_list_1d
let delta_norm = norm(delta, 2.0)
let new_params =
list.zip(params, delta)
|> list.map(fn(p) { p.0 +. p.1 })

let new_params =
list.zip(params, delta)
|> list.map(fn(p) { p.0 +. p.1 })
use <- bool.guard(delta_norm <. tolerance, Ok(new_params))

use <- bool.guard(delta_norm <. tolerance, Ok(new_params))

let new_y_fit = list.map(x, func(_, new_params)) |> nx.tensor
let new_r = nx.subtract(y, new_y_fit)
let prev_error = nx.sum(nx.pow(r, 2.0)) |> nx.to_number
let new_error = nx.sum(nx.pow(new_r, 2.0)) |> nx.to_number
let impr = new_error <. prev_error
do_levenberg_marquardt(
x,
y,
func,
ternary(impr, new_params, params),
iterations - 1,
epsilon,
tolerance,
damping *. ternary(impr, damping_decrease, damping_increase),
damping_increase,
damping_decrease,
)
let new_y_fit = list.map(x, func(_, new_params)) |> nx.tensor
let new_r = nx.subtract(y, new_y_fit)
let prev_error = nx.sum(nx.pow(r, 2.0)) |> nx.to_number
let new_error = nx.sum(nx.pow(new_r, 2.0)) |> nx.to_number
let impr = new_error <. prev_error
do_levenberg_marquardt(
x,
y,
func,
ternary(impr, new_params, params),
iterations - 1,
epsilon,
tolerance,
damping *. ternary(impr, damping_decrease, damping_increase),
damping_increase,
damping_decrease,
)
}
}
}
110 changes: 57 additions & 53 deletions src/gleastsq/internal/methods/trust_region_reflective.gleam
Original file line number Diff line number Diff line change
Expand Up @@ -185,58 +185,62 @@ fn do_trust_region_reflective(
delta: Float,
lambda_reg: Float,
) {
use <- bool.guard(iterations == 0, Error(NonConverged))
let m = list.length(params)

let f = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(f, y)
use j <- result.try(result.replace_error(
jacobian(x, f, func, params, epsilon),
JacobianTaskError,
))

let lambda_eye = nx.eye(m) |> nx.multiply(lambda_reg)
let jt = nx.transpose(j)
let b = nx.add(nx.dot(jt, j), lambda_eye)
let g = nx.dot(jt, r)

let g_norm = nx.norm(g) |> nx.to_number
use <- bool.guard(g_norm <. tolerance, Ok(params))

use non_bounded_p <- result.try(dogleg(j, g, b, delta))
let p =
dogleg_impose_bounds(
non_bounded_p,
nx.tensor(params),
lower_bounds,
upper_bounds,
)
let rho = rho(x, y, func, params, p, g)

let p_norm = nx.norm(p) |> nx.to_number
use <- bool.guard(p_norm <. tolerance, Ok(params))

let new_delta = case rho {
x if x >. 0.75 -> float.max(delta, 2.0 *. nx.to_number(nx.norm(p)))
x if x <. 0.25 -> delta *. 0.5
_ -> delta
case iterations {
0 -> Error(NonConverged)
_ -> {
let m = list.length(params)

let f = list.map(x, func(_, params)) |> nx.tensor
let r = nx.subtract(f, y)
use j <- result.try(result.replace_error(
jacobian(x, f, func, params, epsilon),
JacobianTaskError,
))

let lambda_eye = nx.eye(m) |> nx.multiply(lambda_reg)
let jt = nx.transpose(j)
let b = nx.add(nx.dot(jt, j), lambda_eye)
let g = nx.dot(jt, r)

let g_norm = nx.norm(g) |> nx.to_number
use <- bool.guard(g_norm <. tolerance, Ok(params))

use non_bounded_p <- result.try(dogleg(j, g, b, delta))
let p =
dogleg_impose_bounds(
non_bounded_p,
nx.tensor(params),
lower_bounds,
upper_bounds,
)
let rho = rho(x, y, func, params, p, g)

let p_norm = nx.norm(p) |> nx.to_number
use <- bool.guard(p_norm <. tolerance, Ok(params))

let new_delta = case rho {
x if x >. 0.75 -> float.max(delta, 2.0 *. nx.to_number(nx.norm(p)))
x if x <. 0.25 -> delta *. 0.5
_ -> delta
}

let new_params =
list.zip(params, nx.to_list_1d(p))
|> list.map(fn(p) { p.0 +. p.1 })

do_trust_region_reflective(
x,
y,
func,
ternary(rho >. 0.0, new_params, params),
lower_bounds,
upper_bounds,
iterations - 1,
epsilon,
tolerance,
new_delta,
lambda_reg,
)
}
}

let new_params =
list.zip(params, nx.to_list_1d(p))
|> list.map(fn(p) { p.0 +. p.1 })

do_trust_region_reflective(
x,
y,
func,
ternary(rho >. 0.0, new_params, params),
lower_bounds,
upper_bounds,
iterations - 1,
epsilon,
tolerance,
new_delta,
lambda_reg,
)
}

0 comments on commit 032ec51

Please sign in to comment.