diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 79ee55403..90f317179 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,8 +13,8 @@ jobs: fail-fast: false matrix: version: - - '1' - - '^1.9.0-0' #^1-0 and ^1.9-0 are not recognised + - '1.8' + - '1.9' #^1-0 and ^1.9-0 are not recognised # - 'nightly' # fails for zygote os: - ubuntu-latest @@ -26,6 +26,10 @@ jobs: include: - os: ubuntu-latest prefix: xvfb-run + # - version: 'nightly' + # os: ubuntu-latest + # arch: x64 + # allow_failure: true steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@latest diff --git a/.gitignore b/.gitignore index 2f122df4c..e8c3ac551 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,4 @@ fevd*.png solution*.png conditional_fcst*.png .CondaPkg/ +estimation__* diff --git a/Project.toml b/Project.toml index 424e11861..406927d03 100644 --- a/Project.toml +++ b/Project.toml @@ -1,15 +1,19 @@ name = "MacroModelling" uuid = "687ffad2-3618-405e-ac50-e0f7b9c75e44" authors = ["Thore Kockerols "] -version = "0.1.19" +version = "0.1.23" [deps] +AbstractDifferentiation = "c29ec348-61ec-40c8-8164-b8c60e9d9f3d" +AdvancedHMC = "0bf59076-c3b1-5ca4-86bd-e02cd72cde3d" AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" BlockTriangularForm = "adeb47b7-70bf-415a-bb24-c358563e873a" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +DifferentiableStateSpaceModels = "beacd9db-9e5e-4956-9b09-459a4b2028df" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb" DynarePreprocessor_jll = "23afba7c-24e5-5ee2-bc2c-b42e07f0492a" FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -21,6 +25,7 @@ LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +MuseInference = "43b88160-90c7-4f71-933b-9d65205cd921" Optim = "429524aa-4258-5aef-a3af-852621145aeb" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -38,6 +43,10 @@ Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +[weakdeps] +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" +Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" + [compat] AxisKeys = "^0.2" BlockTriangularForm = "^0.1" @@ -60,9 +69,11 @@ Requires = "^1" RuntimeGeneratedFunctions = "^0.5" SpecialFunctions = "^2" SpeedMapping = "^0.3" +StatsPlots = "^0.15" Subscripts = "^0.1" SymPy = "^1" Symbolics = "^5" +Turing = "^0.21" julia = "1.8" [extras] diff --git a/docs/src/tutorials/rbc.md b/docs/src/tutorials/rbc.md index 7768f2906..66110c584 100644 --- a/docs/src/tutorials/rbc.md +++ b/docs/src/tutorials/rbc.md @@ -175,7 +175,7 @@ For simulations this is possible by calling [`simulate`](@ref): simulate(RBC) ``` -which returns the simulated data in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. +which returns the simulated data in levels in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. ## Conditional forecasts @@ -206,7 +206,7 @@ Note that for the first 4 periods the shock has no predetermined value and is de Finally we can get the conditional forecast: ```@repl tutorial_1 -get_conditional_forecast(RBC, conditions, shocks = shocks) +get_conditional_forecast(RBC, conditions, shocks = shocks, conditions_in_levels = false) ``` The function returns a `KeyedArray` with the values of the endogenous variables and shocks matching the conditions exactly. @@ -214,9 +214,11 @@ The function returns a `KeyedArray` with the values of the endogenous variables We can also plot the conditional forecast. Please note that you need to import the `StatsPlots` packages once before the first plot. In order to plot we can use: ```@repl tutorial_1 -plot_conditional_forecast(RBC, conditions, shocks = shocks) +plot_conditional_forecast(RBC, conditions, shocks = shocks, conditions_in_levels = false) ``` ![RBC conditional forecast](../assets/conditional_fcst__RBC__conditional_forecast__1.png) +and we need to set `conditions_in_levels = false` since the conditions are defined in deviations. + Note that the stars indicate the values the model is conditioned on. diff --git a/docs/src/tutorials/sw03.md b/docs/src/tutorials/sw03.md index b1cd8c66b..a173abf9f 100644 --- a/docs/src/tutorials/sw03.md +++ b/docs/src/tutorials/sw03.md @@ -308,7 +308,7 @@ For simulations this is possible by calling [`simulate`](@ref): simulate(SW03) ``` -which returns the simulated data in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. +which returns the simulated data in levels in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. ## Conditional forecasts @@ -340,7 +340,7 @@ The above shock `Matrix` means that for the first two periods shocks 1, 2, 3, 5, Finally we can get the conditional forecast: ```@repl tutorial_2 -get_conditional_forecast(SW03, conditions, shocks = shocks, variables = [:Y,:pi,:W]) +get_conditional_forecast(SW03, conditions, shocks = shocks, variables = [:Y,:pi,:W], conditions_in_levels = false) ``` The function returns a `KeyedArray` with the values of the endogenous variables and shocks matching the conditions exactly. @@ -348,11 +348,13 @@ The function returns a `KeyedArray` with the values of the endogenous variables We can also plot the conditional forecast. Please note that you need to import the `StatsPlots` packages once before the first plot. ```@repl tutorial_2 -plot_conditional_forecast(SW03,conditions, shocks = shocks, plots_per_page = 6,variables = [:Y,:pi,:W]) +plot_conditional_forecast(SW03,conditions, shocks = shocks, plots_per_page = 6,variables = [:Y,:pi,:W],conditions_in_levels = false) ``` ![SW03 conditional forecast 1](../assets/conditional_fcst__SW03__conditional_forecast__1.png) ![SW03 conditional forecast 2](../assets/conditional_fcst__SW03__conditional_forecast__2.png) +and we need to set `conditions_in_levels = false` since the conditions are defined in deviations. + Note that the stars indicate the values the model is conditioned on. diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 58cb3c519..5c89f2501 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,8 +3,24 @@ ## High priority - [ ] add balanced growth path handling +- [ ] add JOSS article (see Makie.jl) +- [ ] write docs for (non-linear) solution algorithms +- [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input +- [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then +- [ ] have parser accept rss | (r[ss] - 1) * 400 = rss +- [ ] when doing calibration with optimiser have better return values when he doesnt find a solution (probably NaN) +- [ ] sampler returned negative std. investigate and come up with solution ensuring sampler can continue +- [ ] automatically adjust plots for different legend widhts and heights +- [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies +- [ ] write to mod file for unicode characters. have them take what you would type: \alpha\bar +- [ ] have get_std take variables as an input +- [ ] more informative errors when something goes wrong when writing a model +- [ ] initial state accept keyed array +- [ ] bring solution error into an object of the model so we dont have to pass it on as output +- [ ] check that there is an error if he cant find SS +- [ ] plot_model_estimates with unconditional forecast at the end - [ ] check if you can do analytic derivatives for higher order derivatives -- [ ] kick out unsused parameters from m.parameters +- [ ] kick out unused parameters from m.parameters - [ ] higher order solution derivs with Zygote - [ ] use cache for gradient calc in estimation (see DifferentiableStateSpaceModels) - [ ] use krylov instead of linearsolve and speed up sparse matrix calcs in implicit diff of higher order funcs @@ -29,7 +45,10 @@ - [ ] figure out combinations for inputs (parameters and variables in different formats for get_irf for example) - [ ] Find any SS by optimising over both SS guesses and parameter inputs - [ ] weed out SS solver and saved objects - + +- [x] fix ss of pruned solution in plotsolution. seems detached +- [x] get solution higher order with multidimensional array (states, 1 and 2 partial derivatives variables names as dimensions in 2order case) +- [x] add pruning - [x] add other outputs from estimation (smoothed, filter states and shocks) - [x] shorten plot_irf (take inspiration from model estimate) - [x] fix solution plot @@ -96,7 +115,6 @@ - [ ] rewrite first order with riccati equation MatrixEquations.jl - [ ] exploit variable incidence and compression for higher order derivatives - [ ] for estimation use CUDA with st order: linear time iteration starting from last 1st order solution and then LinearSolveCUDA solvers for higher orders. this should bring benefits for large models and HANK models -- [ ] test on highly [nonlinear model](https://www.sciencedirect.com/science/article/pii/S0165188917300970) - [ ] pull request in StatsFuns to have norminv... accept type numbers and add translation from matlab: norminv to StatsFuns norminvcdf - [ ] more informative errors when declaring equations/ calibration - [ ] unit equation errors @@ -106,6 +124,7 @@ - [ ] print legend for algorithm in last subplot of plot only - [ ] select variables for moments +- [x] test on highly [nonlinear model](https://www.sciencedirect.com/science/article/pii/S0165188917300970) # caldara et al is actually epstein zin wiht stochastic vol - [x] conditional forecasting - [x] find way to recover from failed SS solution which is written to init guess - [x] redo ugly solution for selecting parameters to differentiate for diff --git a/models/SW03.jl b/models/SW03.jl index fbf7c4c35..1505c9b55 100644 --- a/models/SW03.jl +++ b/models/SW03.jl @@ -1,105 +1,162 @@ @model SW03 begin -q[0] + beta * ((1 - tau) * q[1] + epsilon_b[1] * (r_k[1] * z[1] - psi^-1 * r_k[ss] * (-1 + exp(psi * (-1 + z[1])))) * (C[1] - h * C[0])^(-sigma_c)) + -q_f[0] + beta * ((1 - tau) * q_f[1] + epsilon_b[1] * (r_k_f[1] * z_f[1] - psi^-1 * r_k_f[ss] * (-1 + exp(psi * (-1 + z_f[1])))) * (C_f[1] - h * C_f[0])^(-sigma_c)) + -r_k[0] + alpha * epsilon_a[0] * mc[0] * L[0]^(1 - alpha) * (K[-1] * z[0])^(-1 + alpha) + -r_k_f[0] + alpha * epsilon_a[0] * mc_f[0] * L_f[0]^(1 - alpha) * (K_f[-1] * z_f[0])^(-1 + alpha) + -G[0] + T[0] + -G[0] + G_bar * epsilon_G[0] + -G_f[0] + T_f[0] + -G_f[0] + G_bar * epsilon_G[0] + -L[0] + nu_w[0]^-1 * L_s[0] + -L_s_f[0] + L_f[0] * (W_i_f[0] * W_f[0]^-1)^(lambda_w^-1 * (-1 - lambda_w)) + L_s_f[0] - L_f[0] + L_s_f[0] + lambda_w^-1 * L_f[0] * W_f[0]^-1 * (-1 - lambda_w) * (-W_disutil_f[0] + W_i_f[0]) * (W_i_f[0] * W_f[0]^-1)^(-1 + lambda_w^-1 * (-1 - lambda_w)) + Pi_ws_f[0] - L_s_f[0] * (-W_disutil_f[0] + W_i_f[0]) + Pi_ps_f[0] - Y_f[0] * (-mc_f[0] + P_j_f[0]) * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + -Q[0] + epsilon_b[0]^-1 * q[0] * (C[0] - h * C[-1])^(sigma_c) + -Q_f[0] + epsilon_b[0]^-1 * q_f[0] * (C_f[0] - h * C_f[-1])^(sigma_c) + -W[0] + epsilon_a[0] * mc[0] * (1 - alpha) * L[0]^(-alpha) * (K[-1] * z[0])^alpha + -W_f[0] + epsilon_a[0] * mc_f[0] * (1 - alpha) * L_f[0]^(-alpha) * (K_f[-1] * z_f[0])^alpha + -Y_f[0] + Y_s_f[0] + Y_s[0] - nu_p[0] * Y[0] + -Y_s_f[0] + Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + beta * epsilon_b[1] * (C_f[1] - h * C_f[0])^(-sigma_c) - epsilon_b[0] * R_f[0]^-1 * (C_f[0] - h * C_f[-1])^(-sigma_c) + beta * epsilon_b[1] * pi[1]^-1 * (C[1] - h * C[0])^(-sigma_c) - epsilon_b[0] * R[0]^-1 * (C[0] - h * C[-1])^(-sigma_c) + Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) - lambda_p^-1 * Y_f[0] * (1 + lambda_p) * (-mc_f[0] + P_j_f[0]) * P_j_f[0]^(-1 - lambda_p^-1 * (1 + lambda_p)) + epsilon_b[0] * W_disutil_f[0] * (C_f[0] - h * C_f[-1])^(-sigma_c) - omega * epsilon_b[0] * epsilon_L[0] * L_s_f[0]^sigma_l + -1 + xi_p * (pi[0]^-1 * pi[-1]^gamma_p)^(-lambda_p^-1) + (1 - xi_p) * pi_star[0]^(-lambda_p^-1) + -1 + (1 - xi_w) * (w_star[0] * W[0]^-1)^(-lambda_w^-1) + xi_w * (W[-1] * W[0]^-1)^(-lambda_w^-1) * (pi[0]^-1 * pi[-1]^gamma_w)^(-lambda_w^-1) + -Phi - Y_s[0] + epsilon_a[0] * L[0]^(1 - alpha) * (K[-1] * z[0])^alpha + -Phi - Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + epsilon_a[0] * L_f[0]^(1 - alpha) * (K_f[-1] * z_f[0])^alpha + std_eta_b * eta_b[x] - log(epsilon_b[0]) + rho_b * log(epsilon_b[-1]) + -std_eta_L * eta_L[x] - log(epsilon_L[0]) + rho_L * log(epsilon_L[-1]) + std_eta_I * eta_I[x] - log(epsilon_I[0]) + rho_I * log(epsilon_I[-1]) + std_eta_w * eta_w[x] - f_1[0] + f_2[0] + std_eta_a * eta_a[x] - log(epsilon_a[0]) + rho_a * log(epsilon_a[-1]) + std_eta_p * eta_p[x] - g_1[0] + g_2[0] * (1 + lambda_p) + std_eta_G * eta_G[x] - log(epsilon_G[0]) + rho_G * log(epsilon_G[-1]) + -f_1[0] + beta * xi_w * f_1[1] * (w_star[0]^-1 * w_star[1])^(lambda_w^-1) * (pi[1]^-1 * pi[0]^gamma_w)^(-lambda_w^-1) + epsilon_b[0] * w_star[0] * L[0] * (1 + lambda_w)^-1 * (C[0] - h * C[-1])^(-sigma_c) * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)) + -f_2[0] + beta * xi_w * f_2[1] * (w_star[0]^-1 * w_star[1])^(lambda_w^-1 * (1 + lambda_w) * (1 + sigma_l)) * (pi[1]^-1 * pi[0]^gamma_w)^(-lambda_w^-1 * (1 + lambda_w) * (1 + sigma_l)) + omega * epsilon_b[0] * epsilon_L[0] * (L[0] * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)))^(1 + sigma_l) + -g_1[0] + beta * xi_p * pi_star[0] * g_1[1] * pi_star[1]^-1 * (pi[1]^-1 * pi[0]^gamma_p)^(-lambda_p^-1) + epsilon_b[0] * pi_star[0] * Y[0] * (C[0] - h * C[-1])^(-sigma_c) + -g_2[0] + beta * xi_p * g_2[1] * (pi[1]^-1 * pi[0]^gamma_p)^(-lambda_p^-1 * (1 + lambda_p)) + epsilon_b[0] * mc[0] * Y[0] * (C[0] - h * C[-1])^(-sigma_c) + -nu_w[0] + (1 - xi_w) * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)) + xi_w * nu_w[-1] * (W[-1] * pi[0]^-1 * W[0]^-1 * pi[-1]^gamma_w)^(-lambda_w^-1 * (1 + lambda_w)) + -nu_p[0] + (1 - xi_p) * pi_star[0]^(-lambda_p^-1 * (1 + lambda_p)) + xi_p * nu_p[-1] * (pi[0]^-1 * pi[-1]^gamma_p)^(-lambda_p^-1 * (1 + lambda_p)) + -K[0] + K[-1] * (1 - tau) + I[0] * (1 - 0.5 * varphi * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])^2) + -K_f[0] + K_f[-1] * (1 - tau) + I_f[0] * (1 - 0.5 * varphi * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])^2) + U[0] - beta * U[1] - epsilon_b[0] * ((1 - sigma_c)^-1 * (C[0] - h * C[-1])^(1 - sigma_c) - omega * epsilon_L[0] * (1 + sigma_l)^-1 * L_s[0]^(1 + sigma_l)) + U_f[0] - beta * U_f[1] - epsilon_b[0] * ((1 - sigma_c)^-1 * (C_f[0] - h * C_f[-1])^(1 - sigma_c) - omega * epsilon_L[0] * (1 + sigma_l)^-1 * L_s_f[0]^(1 + sigma_l)) + -epsilon_b[0] * (C[0] - h * C[-1])^(-sigma_c) + q[0] * (1 - 0.5 * varphi * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])^2 - varphi * I[-1]^-1 * epsilon_I[0] * I[0] * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])) + beta * varphi * I[0]^-2 * epsilon_I[1] * q[1] * I[1]^2 * (-1 + I[0]^-1 * epsilon_I[1] * I[1]) + -epsilon_b[0] * (C_f[0] - h * C_f[-1])^(-sigma_c) + q_f[0] * (1 - 0.5 * varphi * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])^2 - varphi * I_f[-1]^-1 * epsilon_I[0] * I_f[0] * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])) + beta * varphi * I_f[0]^-2 * epsilon_I[1] * q_f[1] * I_f[1]^2 * (-1 + I_f[0]^-1 * epsilon_I[1] * I_f[1]) - std_eta_pi * eta_pi[x] - log(pi_obj[0]) + rho_pi_bar * log(pi_obj[-1]) + log(calibr_pi_obj) * (1 - rho_pi_bar) + -C[0] - I[0] - T[0] + Y[0] - psi^-1 * r_k[ss] * K[-1] * (-1 + exp(psi * (-1 + z[0]))) - -calibr_pi + std_eta_R * eta_R[x] - log(R[ss]^-1 * R[0]) + r_Delta_pi * (-log(pi[ss]^-1 * pi[-1]) + log(pi[ss]^-1 * pi[0])) + r_Delta_y * (-log(Y[ss]^-1 * Y[-1]) + log(Y[ss]^-1 * Y[0]) + log(Y_f[ss]^-1 * Y_f[-1]) - log(Y_f[ss]^-1 * Y_f[0])) + rho * log(R[ss]^-1 * R[-1]) + (1 - rho) * (log(pi_obj[0]) + r_pi * (-log(pi_obj[0]) + log(pi[ss]^-1 * pi[-1])) + r_Y * (log(Y[ss]^-1 * Y[0]) - log(Y_f[ss]^-1 * Y_f[0]))) + -C_f[0] - I_f[0] + Pi_ws_f[0] - T_f[0] + Y_f[0] + L_s_f[0] * W_disutil_f[0] - L_f[0] * W_f[0] - psi^-1 * r_k_f[ss] * K_f[-1] * (-1 + exp(psi * (-1 + z_f[0]))) + epsilon_b[0] * (K[-1] * r_k[0] - r_k[ss] * K[-1] * exp(psi * (-1 + z[0]))) * (C[0] - h * C[-1])^(-sigma_c) + epsilon_b[0] * (K_f[-1] * r_k_f[0] - r_k_f[ss] * K_f[-1] * exp(psi * (-1 + z_f[0]))) * (C_f[0] - h * C_f[-1])^(-sigma_c) + + + # Perceived inflation objective + std_eta_pi * eta_pi[x] - log(pi_obj[0]) + rho_pi_bar * log(pi_obj[-1]) + log(calibr_pi_obj) * (1 - rho_pi_bar) + + # Taylor rule + -calibr_pi + std_eta_R * eta_R[x] - log(R[ss]^-1 * R[0]) + r_Delta_pi * (-log(pi[ss]^-1 * pi[-1]) + log(pi[ss]^-1 * pi[0])) + r_Delta_y * (-log(Y[ss]^-1 * Y[-1]) + log(Y[ss]^-1 * Y[0]) + log(Y_f[ss]^-1 * Y_f[-1]) - log(Y_f[ss]^-1 * Y_f[0])) + rho * log(R[ss]^-1 * R[-1]) + (1 - rho) * (log(pi_obj[0]) + r_pi * (-log(pi_obj[0]) + log(pi[ss]^-1 * pi[-1])) + r_Y * (log(Y[ss]^-1 * Y[0]) - log(Y_f[ss]^-1 * Y_f[0]))) + end @parameters SW03 begin - lambda_p = .368 - G_bar = .362 + lambda_p = 0.368 + G_bar = 0.362 lambda_w = 0.5 - Phi = .819 + Phi = 0.819 - alpha = 0.3 - beta = 0.99 + alpha = 0.3 + beta = 0.99 gamma_w = 0.763 gamma_p = 0.469 - h = 0.573 - omega = 1 - psi = 0.169 + h = 0.573 + omega = 1 + psi = 0.169 - r_pi = 1.684 - r_Y = 0.099 - r_Delta_pi = 0.14 - r_Delta_y = 0.159 + r_pi = 1.684 + r_Y = 0.099 + r_Delta_pi = 0.14 + r_Delta_y = 0.159 sigma_c = 1.353 sigma_l = 2.4 - tau = 0.025 - varphi = 6.771 - xi_w = 0.737 - xi_p = 0.908 + tau = 0.025 + varphi = 6.771 + xi_w = 0.737 + xi_p = 0.908 - rho = 0.961 - rho_b = 0.855 - rho_L = 0.889 - rho_I = 0.927 - rho_a = 0.823 - rho_G = 0.949 - rho_pi_bar = 0.924 + rho = 0.961 + rho_b = 0.855 + rho_L = 0.889 + rho_I = 0.927 + rho_a = 0.823 + rho_G = 0.949 + rho_pi_bar = 0.924 std_scaling_factor = 10 - std_eta_b = 0.336 / std_scaling_factor - std_eta_L = 3.52 / std_scaling_factor - std_eta_I = 0.085 / std_scaling_factor - std_eta_a = 0.598 / std_scaling_factor - std_eta_w = 0.6853261 / std_scaling_factor - std_eta_p = 0.7896512 / std_scaling_factor - std_eta_G = 0.325 / std_scaling_factor - std_eta_R = 0.081 / std_scaling_factor - std_eta_pi = 0.017 / std_scaling_factor + std_eta_b = 0.336 / std_scaling_factor + std_eta_L = 3.52 / std_scaling_factor + std_eta_I = 0.085 / std_scaling_factor + std_eta_a = 0.598 / std_scaling_factor + std_eta_w = 0.6853261 / std_scaling_factor + std_eta_p = 0.7896512 / std_scaling_factor + std_eta_G = 0.325 / std_scaling_factor + std_eta_R = 0.081 / std_scaling_factor + std_eta_pi = 0.017 / std_scaling_factor calibr_pi_obj | 1 = pi_obj[ss] calibr_pi | pi[ss] = pi_obj[ss] diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 206fac42e..3f4d60ae7 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -58,7 +58,7 @@ export get_irfs, get_irf, get_IRF, simulate, get_simulation export get_conditional_forecast, plot_conditional_forecast export get_solution, get_first_order_solution, get_perturbation_solution export get_steady_state, get_SS, get_ss, get_non_stochastic_steady_state, get_stochastic_steady_state, get_SSS, steady_state, SS, SSS -export get_moments, get_covariance, get_standard_deviation, get_variance, get_var, get_std, get_cov, var, std, cov +export get_moments, get_statistics, get_covariance, get_standard_deviation, get_variance, get_var, get_std, get_cov, var, std, cov export get_autocorrelation, get_correlation, get_variance_decomposition, get_corr, get_autocorr, get_var_decomp, corr, autocorr export get_fevd, fevd, get_forecast_error_variance_decomposition, get_conditional_variance_decomposition export calculate_jacobian, calculate_hessian, calculate_third_order_derivatives @@ -68,7 +68,7 @@ export plotlyjs_backend, gr_backend export Beta, InverseGamma, Gamma, Normal export translate_mod_file, translate_dynare_file, import_model, import_dynare -export write_mod_file, write_dynare_file, write_to_dynare_file, export_dynare, export_to_dynare, export_mod_file +export write_mod_file, write_dynare_file, write_to_dynare_file, write_to_dynare, export_dynare, export_to_dynare, export_mod_file, export_model # Internal export irf, girf @@ -1211,7 +1211,7 @@ end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³; +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³, pruning::Bool; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ @@ -1222,22 +1222,35 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: 1 shock] - sol = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] + if pruning + pruned_aug_state = copy(aug_state) + + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + end, + tol = tol, maps_limit = 10000) + else + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - end, - tol = tol, maps_limit = 10000) + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + end, + tol = tol, maps_limit = 10000) + end return sol.minimizer, sol.converged - end -function second_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂, SSS, 𝓂::β„³) +function second_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂, SSS, 𝓂::β„³, pruning::Bool) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ shock = zeros(𝓂.timings.nExo) @@ -1245,12 +1258,20 @@ function second_order_stochastic_steady_state_iterative_solution_condition(𝐒 aug_state = [SSS[𝓂.timings.past_not_future_and_mixed_idx] 1 shock] - - 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - SSS + + if pruning + pruned_aug_state = [zeros(𝓂.timings.nPast_not_future_and_mixed) + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 - SSS + else + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - SSS + end end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³, pruning::Bool) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚ = β„±.value.(𝐒₁𝐒₂) @@ -1259,12 +1280,12 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂)' # get f(vs) - val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂) + val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂, pruning) if converged # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂), S₁Sβ‚‚) - A = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚, x, 𝓂), val) + B = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂, pruning), S₁Sβ‚‚) + A = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚, x, 𝓂, pruning), val) AΜ‚ = RF.lu(A, check = false) @@ -1289,12 +1310,12 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: end -function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false) where M +function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where M SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1302,7 +1323,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂) + state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂, pruning) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1321,7 +1342,7 @@ end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³; +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³, pruning::Bool; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ @@ -1332,21 +1353,36 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ 1 shock] - sol = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - end, - tol = tol, maps_limit = 10000) + if pruning + pruned_aug_state = copy(aug_state) + + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6 + end, + tol = tol, maps_limit = 10000) + else + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 + end, + tol = tol, maps_limit = 10000) + end + return sol.minimizer, sol.converged end -function third_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂𝐒₃, SSS, 𝓂::β„³) +function third_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂𝐒₃, SSS, 𝓂::β„³, pruning::Bool) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ shock = zeros(𝓂.timings.nExo) @@ -1355,11 +1391,19 @@ function third_order_stochastic_steady_state_iterative_solution_condition(𝐒 1 shock] - 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - SSS + if pruning + pruned_aug_state = [zeros(𝓂.timings.nPast_not_future_and_mixed) + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6 - SSS + else + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - SSS + end end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³, pruning::Bool) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚S₃ = β„±.value.(𝐒₁𝐒₂𝐒₃) @@ -1368,12 +1412,12 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂𝐒₃)' # get f(vs) - val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂) + val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂, pruning) if converged # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂), S₁Sβ‚‚S₃) - A = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚S₃, x, 𝓂), val) + B = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂, pruning), S₁Sβ‚‚S₃) + A = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚S₃, x, 𝓂, pruning), val) AΜ‚ = RF.lu(A, check = false) @@ -1398,12 +1442,12 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ end -function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false) where M +function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where M SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1415,7 +1459,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂) + state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂, pruning) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1442,33 +1486,43 @@ function solve!(𝓂::β„³; verbose::Bool = false, silent::Bool = false) - @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :third_order] + @assert algorithm ∈ all_available_algorithms if dynamics 𝓂.solution.outdated_algorithms = union(intersect(𝓂.solution.algorithms,[algorithm]),𝓂.solution.outdated_algorithms) 𝓂.solution.algorithms = union(𝓂.solution.algorithms,[algorithm]) end - + write_parameters_input!(𝓂, parameters, verbose = verbose) - if 𝓂.model_hessian == Function[] && algorithm == :second_order + if 𝓂.model_hessian == Function[] && algorithm ∈ [:second_order, :pruned_second_order] start_time = time() write_functions_mapping!(𝓂, 2) if !silent println("Take symbolic derivatives up to second order:\t",round(time() - start_time, digits = 3), " seconds") end - elseif 𝓂.model_third_order_derivatives == Function[] && algorithm == :third_order + elseif 𝓂.model_third_order_derivatives == Function[] && algorithm ∈ [:third_order, :pruned_third_order] start_time = time() write_functions_mapping!(𝓂, 3) if !silent println("Take symbolic derivatives up to third order:\t",round(time() - start_time, digits = 3), " seconds") end end if dynamics - if any([:riccati, :first_order, :second_order, :third_order] .∈ ([algorithm],)) && any([:riccati, :first_order, :second_order, :third_order] .∈ (𝓂.solution.outdated_algorithms,)) + if (any([:riccati, :first_order] .∈ ([algorithm],)) && + any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || + (any([:second_order,:pruned_second_order] .∈ ([algorithm],)) && + any([:second_order,:pruned_second_order] .∈ (𝓂.solution.outdated_algorithms,))) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) + # @assert solution_error < eps() "Could not find non stochastic steady steady." + βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol_mat, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + @assert solved "Could not find stable first order solution." + state_update₁ = function(state::Vector{Float64}, shock::Vector{Float64}) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end 𝓂.solution.perturbation.first_order = perturbation_solution(sol_mat, state_update₁) @@ -1479,7 +1533,11 @@ function solve!(𝓂::β„³; end - if any([:second_order, :third_order] .∈ ([algorithm],)) && any([:second_order, :third_order] .∈ (𝓂.solution.outdated_algorithms,)) + if (:second_order == algorithm && + :second_order ∈ 𝓂.solution.outdated_algorithms) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1496,7 +1554,34 @@ function solve!(𝓂::β„³; 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:second_order]) end + if (:pruned_second_order == algorithm && + :pruned_second_order ∈ 𝓂.solution.outdated_algorithms) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) + + @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." + + state_updateβ‚‚ = function(state::Vector{Float64}, shock::Vector{Float64}, pruned_state::Vector{Float64}) + aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + pruned_aug_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2, 𝐒₁ * pruned_aug_state + end + + 𝓂.solution.perturbation.pruned_second_order = higher_order_perturbation_solution(𝐒₂,stochastic_steady_state,state_updateβ‚‚) + + 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:pruned_second_order]) + end + if :third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1513,7 +1598,31 @@ function solve!(𝓂::β„³; 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:third_order]) end + if :pruned_third_order == algorithm && :pruned_third_order ∈ 𝓂.solution.outdated_algorithms + + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) + + @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." + + state_update₃ = function(state::Vector{Float64}, shock::Vector{Float64}, pruned_state::Vector{Float64}) + aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + pruned_aug_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6, 𝐒₁ * pruned_aug_state + end + + 𝓂.solution.perturbation.pruned_third_order = higher_order_perturbation_solution(𝐒₃,stochastic_steady_state,state_update₃) + + 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:pruned_third_order]) + end + if any([:quadratic_iteration, :binder_pesaran] .∈ ([algorithm],)) && any([:quadratic_iteration, :binder_pesaran] .∈ (𝓂.solution.outdated_algorithms,)) + SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) @@ -1547,6 +1656,7 @@ function solve!(𝓂::β„³; end end + return nothing end @@ -1879,7 +1989,7 @@ function write_parameters_input!(𝓂::β„³, parameters::Dict{Symbol,Float64}; ve if !all(𝓂.parameter_values[ntrsct_idx] .== collect(values(parameters))) if verbose println("Parameter changes: ") end - 𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + 𝓂.solution.outdated_algorithms = Set(all_available_algorithms) end for i in 1:length(parameters) @@ -1939,7 +2049,7 @@ function write_parameters_input!(𝓂::β„³, parameters::Vector{Float64}; verbose println("Parameters unchanged.") else if !all(parameters .== 𝓂.parameter_values[1:length(parameters)]) - 𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + 𝓂.solution.outdated_algorithms = Set(all_available_algorithms) match_idx = [] for (i, v) in enumerate(parameters) @@ -1973,11 +2083,11 @@ end -function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1985,11 +2095,11 @@ function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N end -function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1997,11 +2107,11 @@ function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, para end -function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -2009,11 +2119,11 @@ function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S, end -function SSS_second_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_second_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -2256,7 +2366,7 @@ end -function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false) +function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false)::Tuple{Matrix{Float64},Bool} βˆ‡β‚Š = @view βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] βˆ‡β‚€ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed .+ range(1, T.nVars)] βˆ‡β‚‹ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed + T.nVars .+ range(1, T.nPast_not_future_and_mixed)] @@ -2300,9 +2410,12 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ẑ₁₁ = RF.lu(Z₁₁, check = false) if !β„’.issuccess(Ẑ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ẑ₁₁) + Ẑ₁₁ = β„’.svd(Z₁₁, check = false) end + if !β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false + end else eigenselect = abs.(schdcmp.Ξ² ./ schdcmp.Ξ±) .< 1 @@ -2317,16 +2430,16 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ẑ₁₁ = RF.lu(Z₁₁, check = false) if !β„’.issuccess(Ẑ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false end end - + Ŝ₁₁ = RF.lu(S₁₁, check = false) if !β„’.issuccess(Ŝ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ŝ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false end - + D = Z₂₁ / Ẑ₁₁ L = Z₁₁ * (Ŝ₁₁ \ T₁₁) / Ẑ₁₁ @@ -2336,11 +2449,11 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Aβ‚Šα΅€ = @view Aβ‚Š[1:T.nPresent_only,:] AΜƒβ‚€α΅€ = @view Aβ‚€[1:T.nPresent_only, T.present_but_not_only_idx] Aβ‚‹α΅€ = @view Aβ‚‹[1:T.nPresent_only,:] - + AΜ„Μ‚β‚€α΅€ = RF.lu(AΜ„β‚€α΅€, check = false) if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(AΜ„Μ‚β‚€α΅€) + AΜ„Μ‚β‚€α΅€ = β„’.svd(collect(AΜ„β‚€α΅€)) end A = @views vcat(-(AΜ„Μ‚β‚€α΅€ \ (Aβ‚Šα΅€ * D * L + AΜƒβ‚€α΅€ * sol[T.dynamic_order,:] + Aβ‚‹α΅€)), sol) @@ -2348,8 +2461,7 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = return @view(A[T.reorder,:]), true end - -function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) #::AbstractMatrix{Real}, +function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) expand = @ignore_derivatives @views [β„’.diagm(ones(T.nVars))[T.future_not_past_and_mixed_idx,:], β„’.diagm(ones(T.nVars))[T.past_not_future_and_mixed_idx,:]] A = @views βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] * expand[1] @@ -2371,45 +2483,41 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, βˆ‡β‚)' - # get f(vs) - val, success = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) + val, solved = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) - if success + if solved # get J(f, vs) * ps (cheating). Write your custom rule here B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) - # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] - # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] - + AΜ‚ = RF.lu(A, check = false) - + if !β„’.issuccess(AΜ‚) AΜ‚ = β„’.svd(A) end jvp = -(AΜ‚ \ B) * ps else - jvp = fill(0,length(val),length(βˆ‡β‚)) * ps + jvp = fill(0,length(val),length(βˆ‡Μ‚β‚)) * ps end # pack: SoA -> AoS return reshape(map(val, eachrow(jvp)) do v, p β„±.Dual{Z}(v, p...) # Z is the tag - end,size(val)), success + end,size(val)), solved end -# riccati_AD = ImplicitFunction(riccati_forward, riccati_conditions) - -riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive), (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) +riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) -function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false) where S <: Real +function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) - A, success = riccati(βˆ‡β‚) - # A = riccati_forward(βˆ‡β‚, T = T, explosive = explosive) + A = riccati(βˆ‡β‚) - if !success - return hcat(A, zeros(T.nVars,T.nExo)), success + solved = @ignore_derivatives !(isapprox(sum(abs,A), 0, rtol = eps())) + + if !solved + return hcat(A, zeros(size(A,1),T.nExo)), solved end Jm = @view(β„’.diagm(ones(S,T.nVars))[T.past_not_future_and_mixed_idx,:]) @@ -2420,7 +2528,7 @@ function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive B = -((βˆ‡β‚Š * A * Jm + βˆ‡β‚€) \ βˆ‡β‚‘) - return hcat(A, B), success + return hcat(A, B), solved end @@ -2547,7 +2655,7 @@ end -function calculate_third_order_solution(βˆ‡β‚::AbstractMatrix{<: Real}, #first order derivatives +function calculate_third_order_solution(βˆ‡β‚::AbstractMatrix{<: Real}, #first order derivatives βˆ‡β‚‚::SparseMatrixCSC{<: Real}, #second order derivatives βˆ‡β‚ƒ::SparseMatrixCSC{<: Real}, #third order derivatives 𝑺₁::AbstractMatrix{<: Real}, #first order solution @@ -2677,7 +2785,11 @@ end -function irf(state_update::Function, initial_state::Vector{Float64}, level::Vector{Float64}, T::timings; +function irf(state_update::Function, + initial_state::Vector{Float64}, + level::Vector{Float64}, + pruning::Bool, + T::timings; periods::Int = 40, shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, variables::Symbol_input = :all, @@ -2694,12 +2806,12 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] @assert length(setdiff(shock_input, T.exo)) == 0 "Provided shocks which are not part of the model." - + shock_history = zeros(T.nExo, periods) shock_history[indexin(shock_input,T.exo),1:size(shocks)[2]] = shocks @@ -2715,10 +2827,19 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_history = randn(T.nExo,periods) Y = zeros(T.nVars,periods,1) - Y[:,1,1] = state_update(initial_state,shock_history[:,1]) - for t in 1:periods-1 - Y[:,t+1,1] = state_update(Y[:,t,1],shock_history[:,t+1]) + if pruning + Y[:,1,1], pruned_state = state_update(initial_state, shock_history[:,1], initial_state) + + for t in 1:periods-1 + Y[:,t+1,1], pruned_state = state_update(Y[:,t,1], shock_history[:,t+1], pruned_state) + end + else + Y[:,1,1] = state_update(initial_state,shock_history[:,1]) + + for t in 1:periods-1 + Y[:,t+1,1] = state_update(Y[:,t,1],shock_history[:,t+1]) + end end return KeyedArray(Y[var_idx,:,:] .+ level[var_idx]; Variables = T.var[var_idx], Periods = 1:periods, Shocks = [:simulate]) @@ -2727,10 +2848,18 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shck = T.nExo == 0 ? Vector{Float64}(undef, 0) : zeros(T.nExo) - Y[:,1,1] = state_update(initial_state,shck) + if pruning + Y[:,1,1], pruned_state = state_update(initial_state, shck, initial_state) - for t in 1:periods-1 - Y[:,t+1,1] = state_update(Y[:,t,1],shck) + for t in 1:periods-1 + Y[:,t+1,1], pruned_state = state_update(Y[:,t,1], shck, pruned_state) + end + else + Y[:,1,1] = state_update(initial_state,shck) + + for t in 1:periods-1 + Y[:,t+1,1] = state_update(Y[:,t,1],shck) + end end return KeyedArray(Y[var_idx,:,:] .+ level[var_idx]; Variables = T.var[var_idx], Periods = 1:periods, Shocks = [:none]) @@ -2743,10 +2872,18 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_history[ii,1] = negative_shock ? -1 : 1 end - Y[:,1,i] = state_update(initial_state,shock_history[:,1]) + if pruning + Y[:,1,i], pruned_state = state_update(initial_state, shock_history[:,1], initial_state) - for t in 1:periods-1 - Y[:,t+1,i] = state_update(Y[:,t,i],shock_history[:,t+1]) + for t in 1:periods-1 + Y[:,t+1,i], pruned_state = state_update(Y[:,t,i], shock_history[:,t+1],pruned_state) + end + else + Y[:,1,i] = state_update(initial_state,shock_history[:,1]) + + for t in 1:periods-1 + Y[:,t+1,i] = state_update(Y[:,t,i],shock_history[:,t+1]) + end end end @@ -2759,6 +2896,7 @@ end function girf(state_update::Function, initial_state::Vector{Float64}, level::Vector{Float64}, + pruning::Bool, T::timings; periods::Int = 40, shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, @@ -2778,7 +2916,7 @@ function girf(state_update::Function, shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] @@ -2796,11 +2934,17 @@ function girf(state_update::Function, var_idx = parse_variables_input_to_index(variables, T) Y = zeros(T.nVars, periods + 1, length(shock_idx)) + + pruned_initial_state = copy(initial_state) for (i,ii) in enumerate(shock_idx) for draw in 1:draws for i in 1:warmup_periods - initial_state = state_update(initial_state, randn(T.nExo)) + if pruning + initial_state, pruned_initial_state = state_update(initial_state, randn(T.nExo), pruned_initial_state) + else + initial_state = state_update(initial_state, randn(T.nExo)) + end end Y1 = zeros(T.nVars, periods + 1) @@ -2813,14 +2957,24 @@ function girf(state_update::Function, shock_history[ii,1] = negative_shock ? -1 : 1 end - Y1[:,1] = state_update(initial_state, baseline_noise) - Y2[:,1] = state_update(initial_state, baseline_noise) + if pruning + Y1[:,1], pruned_state1 = state_update(initial_state, baseline_noise, pruned_initial_state) + Y2[:,1], pruned_state2 = state_update(initial_state, baseline_noise, pruned_initial_state) + else + Y1[:,1] = state_update(initial_state, baseline_noise) + Y2[:,1] = state_update(initial_state, baseline_noise) + end for t in 1:periods baseline_noise = randn(T.nExo) - Y1[:,t+1] = state_update(Y1[:,t],baseline_noise) - Y2[:,t+1] = state_update(Y2[:,t],baseline_noise + shock_history[:,t]) + if pruning + Y1[:,t+1], pruned_state1 = state_update(Y1[:,t], baseline_noise, pruned_state1) + Y2[:,t+1], pruned_state2 = state_update(Y2[:,t], baseline_noise + shock_history[:,t], pruned_state2) + else + Y1[:,t+1] = state_update(Y1[:,t],baseline_noise) + Y2[:,t+1] = state_update(Y2[:,t],baseline_noise + shock_history[:,t]) + end end Y[:,:,i] += Y2 - Y1 @@ -2904,18 +3058,25 @@ end function parse_algorithm_to_state_update(algorithm::Symbol, 𝓂::β„³) if :linear_time_iteration == algorithm state_update = 𝓂.solution.perturbation.linear_time_iteration.state_update - + pruning = false elseif algorithm ∈ [:riccati, :first_order] state_update = 𝓂.solution.perturbation.first_order.state_update - + pruning = false elseif :second_order == algorithm state_update = 𝓂.solution.perturbation.second_order.state_update - + pruning = false + elseif :pruned_second_order == algorithm + state_update = 𝓂.solution.perturbation.pruned_second_order.state_update + pruning = true elseif :third_order == algorithm state_update = 𝓂.solution.perturbation.third_order.state_update + pruning = false + elseif :pruned_third_order == algorithm + state_update = 𝓂.solution.perturbation.pruned_third_order.state_update + pruning = true end - return state_update + return state_update, pruning end @@ -2924,7 +3085,7 @@ function calculate_covariance(parameters::Vector{<: Real}, 𝓂::β„³; verbose::B βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) covar_raw = calculate_covariance_forward(sol,T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars)) @@ -2994,7 +3155,7 @@ function calculate_kalman_filter_loglikelihood(𝓂::β„³, data::AbstractArray{Fl if isnothing(parameters) parameters = 𝓂.parameter_values else - ub = @ignore_derivatives fill(1e12+rand(),length(𝓂.parameters)) + ub = @ignore_derivatives fill(1e12+rand(),length(𝓂.parameters) + length(𝓂.βž•_vars)) lb = @ignore_derivatives -ub for (i,v) in enumerate(𝓂.bounded_vars) @@ -3023,7 +3184,11 @@ function calculate_kalman_filter_loglikelihood(𝓂::β„³, data::AbstractArray{Fl βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + + if !solved + return -Inf + end observables_and_states = @ignore_derivatives sort(union(𝓂.timings.past_not_future_and_mixed_idx,indexin(observables,sort(union(𝓂.aux,𝓂.var,𝓂.exo_present))))) @@ -3095,7 +3260,7 @@ function filter_and_smooth(𝓂::β„³, data_in_deviations::AbstractArray{Float64} βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views sol[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[𝓂.timings.past_not_future_and_mixed_idx,:] diff --git a/src/dynare.jl b/src/dynare.jl index 94b1b5acb..105934921 100644 --- a/src/dynare.jl +++ b/src/dynare.jl @@ -142,7 +142,7 @@ function write_mod_file(m::β„³) [print(io,string(p) * " ") for p in m.parameters_in_equations] - println(io,";\n\n# Parameter definitions:") + println(io,";\n\n% Parameter definitions:") for (i,p) in enumerate(m.parameters) println(io, "\t" * string(p) * "\t=\t" * string(m.parameter_values[i]) * ";") end @@ -196,4 +196,9 @@ write_to_dynare_file = write_mod_file """ See [`write_mod_file`](@ref) """ -write_to_dynare = write_mod_file \ No newline at end of file +write_to_dynare = write_mod_file + +""" +See [`write_mod_file`](@ref) +""" +export_model = write_mod_file \ No newline at end of file diff --git a/src/get_functions.jl b/src/get_functions.jl index 4944d7d2b..7307a6c87 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -32,7 +32,7 @@ end; simulation = simulate(RBC); -get_shock_decomposition(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_shock_decomposition(RBC,simulation([:c],:,:simulate)) # output 3-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables ∈ 4-element Vector{Symbol} @@ -63,7 +63,7 @@ And data, 4Γ—2Γ—40 Array{Float64, 3}: ``` """ function get_shock_decomposition(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, # shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, @@ -71,13 +71,15 @@ function get_shock_decomposition(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -85,7 +87,7 @@ function get_shock_decomposition(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) # var_idx = parse_variables_input_to_index(variables, 𝓂.timings) @@ -129,7 +131,7 @@ end; simulation = simulate(RBC); -get_estimated_shocks(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_shocks(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Shocks ∈ 1-element Vector{Symbol} @@ -140,19 +142,21 @@ And data, 1Γ—40 Matrix{Float64}: ``` """ function get_estimated_shocks(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, data_in_levels::Bool = true, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -160,7 +164,7 @@ function get_estimated_shocks(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 3 : 7]; Shocks = map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.timings.exo), Periods = 1:size(data,2)) end @@ -205,7 +209,7 @@ end; simulation = simulate(RBC); -get_estimated_variables(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_variables(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables ∈ 4-element Vector{Symbol} @@ -219,7 +223,7 @@ And data, 4Γ—40 Matrix{Float64}: ``` """ function get_estimated_variables(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, data_in_levels::Bool = true, @@ -227,13 +231,15 @@ function get_estimated_variables(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -241,7 +247,7 @@ function get_estimated_variables(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(levels ? filtered_and_smoothed[smooth ? 1 : 5] .+ reference_steady_state[1:length(𝓂.var)] : filtered_and_smoothed[smooth ? 1 : 5]; Variables = 𝓂.timings.var, Periods = 1:size(data,2)) end @@ -284,7 +290,7 @@ end; simulation = simulate(RBC); -get_estimated_variable_standard_deviations(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_variable_standard_deviations(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Standard_deviations ∈ 4-element Vector{Symbol} @@ -298,20 +304,22 @@ And data, 4Γ—40 Matrix{Float64}: ``` """ function get_estimated_variable_standard_deviations(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, data_in_levels::Bool = true, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -319,7 +327,7 @@ function get_estimated_variable_standard_deviations(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 2 : 6]; Standard_deviations = 𝓂.timings.var, Periods = 1:size(data,2)) end @@ -342,7 +350,7 @@ Limited to the first order perturbation solution of the model. - `periods` [Default: `40`, Type: `Int`]: the total number of periods is the sum of the argument provided here and the maximum of periods of the shocks or conditions argument. - $PARAMETERS - $VARIABLES -- `conditions_in_levels` [Default: `false`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. +- `conditions_in_levels` [Default: `true`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. - $LEVELS - $VERBOSE @@ -383,7 +391,7 @@ conditions[2,2] = .02 shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) shocks[1,1] = .05 -get_conditional_forecast(RBC_CME, conditions, shocks = shocks) +get_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables_and_shocks ∈ 9-element Vector{Symbol} @@ -421,10 +429,11 @@ And data, 9Γ—42 Matrix{Float64}: function get_conditional_forecast(𝓂::β„³, conditions::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}}; shocks::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}, Nothing} = nothing, + initial_state::Vector{Float64} = [0.0], periods::Int = 40, parameters = nothing, variables::Symbol_input = :all_including_auxilliary, - conditions_in_levels::Bool = false, + conditions_in_levels::Bool = true, levels::Bool = false, verbose::Bool = false) @@ -478,14 +487,15 @@ function get_conditional_forecast(𝓂::β„³, shocks = Matrix{Union{Nothing,Float64}}(undef,length(𝓂.exo),periods) end - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) - state_update = parse_algorithm_to_state_update(:first_order, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(:first_order, 𝓂) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) + initial_state = initial_state == [0.0] ? zeros(𝓂.timings.nVars) : initial_state - reference_steady_state[1:length(𝓂.var)] var_idx = parse_variables_input_to_index(variables, 𝓂.timings) @@ -515,9 +525,9 @@ function get_conditional_forecast(𝓂::β„³, shocks[free_shock_idx,1] .= 0 - shocks[free_shock_idx,1] = CC \ (conditions[cond_var_idx,1] - state_update(zeros(size(C,1)), Float64[shocks[:,1]...])[cond_var_idx]) + shocks[free_shock_idx,1] = CC \ (conditions[cond_var_idx,1] - state_update(initial_state, Float64[shocks[:,1]...])[cond_var_idx]) - Y[:,1] = state_update(zeros(size(C,1)), Float64[shocks[:,1]...]) + Y[:,1] = state_update(initial_state, Float64[shocks[:,1]...]) for i in 2:size(conditions,2) cond_var_idx = findall(conditions[:,i] .!= nothing) @@ -531,11 +541,12 @@ function get_conditional_forecast(𝓂::β„³, @assert length(free_shock_idx) >= length(cond_var_idx) "Exact matching only possible with more free shocks than conditioned variables. Period " * repr(i) * " has " * repr(length(free_shock_idx)) * " free shock(s) and " * repr(length(cond_var_idx)) * " conditioned variable(s)." - CC = C[cond_var_idx,free_shock_idx] + CC = C[cond_var_idx,free_shock_idx] if length(cond_var_idx) == 1 @assert any(CC .!= 0) "Free shocks have no impact on conditioned variable in period " * repr(i) * "." elseif length(free_shock_idx) == length(cond_var_idx) + CC = RF.lu(CC, check = false) @assert β„’.issuccess(CC) "Numerical stabiltiy issues for restrictions in period " * repr(i) * "." @@ -624,7 +635,7 @@ function get_irf(𝓂::β„³, shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] @@ -643,7 +654,7 @@ function get_irf(𝓂::β„³, βˆ‡β‚ = calculate_jacobian(parameters, reference_steady_state, 𝓂) - sol_mat, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) state_update = function(state::Vector, shock::Vector) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end @@ -688,7 +699,7 @@ end """ $(SIGNATURES) -Return impulse response functions (IRFs) of the model in a 3-dimensional KeyedArray +Return impulse response functions (IRFs) of the model in a 3-dimensional KeyedArray. Values are returned in absolute deviations from the (non) stochastic steady state by default. # Arguments - $MODEL @@ -750,31 +761,37 @@ function get_irf(𝓂::β„³; levels::Bool = false, verbose::Bool = false) - write_parameters_input!(𝓂,parameters, verbose = verbose) - - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) shocks = 𝓂.timings.nExo == 0 ? :none : shocks @assert !(shocks == :none && generalised_irf) "Cannot compute generalised IRFs for model without shocks." - state_update = parse_algorithm_to_state_update(algorithm, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(algorithm, 𝓂) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) if algorithm == :second_order SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state else SSS_delta = zeros(length(𝓂.var)) end if levels if algorithm == :second_order - reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state#[indexin(full_SS,sort(union(𝓂.var,𝓂.exo_present)))] + reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + reference_steady_state = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order - reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state#[indexin(full_SS,sort(union(𝓂.var,𝓂.exo_present)))] + reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + reference_steady_state = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state end end @@ -784,6 +801,7 @@ function get_irf(𝓂::β„³; girfs = girf(state_update, SSS_delta, levels ? reference_steady_state : SSS_delta, + pruning, 𝓂.timings; periods = periods, shocks = shocks, @@ -794,6 +812,7 @@ function get_irf(𝓂::β„³; irfs = irf(state_update, initial_state, levels ? reference_steady_state : SSS_delta, + pruning, 𝓂.timings; periods = periods, shocks = shocks, @@ -816,14 +835,14 @@ See [`get_irf`](@ref) get_IRF = get_irf """ -Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. +Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. Function returns values in levels by default. """ -simulate(args...; kwargs...) = get_irf(args...; kwargs..., shocks = :simulate)#[:,:,1] +simulate(args...; kwargs...) = get_irf(args...; levels = true, kwargs..., shocks = :simulate)#[:,:,1] """ -Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. +Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. Function returns values in levels by default. """ -get_simulation(args...; kwargs...) = get_irf(args...; kwargs..., shocks = :simulate)#[:,:,1] +get_simulation(args...; kwargs...) = get_irf(args...; levels = true, kwargs..., shocks = :simulate)#[:,:,1] """ Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. @@ -894,9 +913,9 @@ function get_steady_state(𝓂::β„³; verbose::Bool = false, silent::Bool = true) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) vars_in_ss_equations = sort(collect(setdiff(reduce(union,get_symbols.(𝓂.ss_aux_equations)),union(𝓂.parameters_in_equations,𝓂.βž•_vars)))) @@ -922,6 +941,12 @@ function get_steady_state(𝓂::β„³; if algorithm == :third_order solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) + SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) + SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state else solve!(𝓂, verbose = verbose, dynamics = true, algorithm = :second_order, silent = silent) SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.second_order.stochastic_steady_state#[indexin(sort(union(𝓂.var,𝓂.exo_present)),sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))] @@ -943,19 +968,41 @@ function get_steady_state(𝓂::β„³; if derivatives if stochastic if algorithm == :third_order + dSSS = β„±.jacobian(x->begin SSS = SSS_third_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose) [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] end, 𝓂.parameter_values[param_idx]) return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + + elseif algorithm == :pruned_third_order + + dSSS = β„±.jacobian(x->begin + SSS = SSS_third_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose, pruning = true) + [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] + end, 𝓂.parameter_values[param_idx]) + + return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + + elseif algorithm == :pruned_second_order + + dSSS = β„±.jacobian(x->begin + SSS = SSS_second_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose, pruning = true) + [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] + end, 𝓂.parameter_values[param_idx]) + + return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + else + dSSS = β„±.jacobian(x->begin SSS = SSS_second_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose) [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] end, 𝓂.parameter_values[param_idx]) return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + end else # dSS = β„±.jacobian(x->𝓂.SS_solve_func(x, 𝓂),𝓂.parameter_values) @@ -1035,7 +1082,7 @@ get_ss = get_steady_state """ $(SIGNATURES) -Return the linearised solution and the non stochastic steady state (SS) of the model. +Return the solution of the model. In the linear case it returns the linearised solution and the non stochastic steady state (SS) of the model. In the nonlinear case (higher order perturbation) the function returns a multidimensional array with the endogenous variables as the second dimension and the state variables and shocks as the other dimensions. # Arguments - $MODEL @@ -1044,7 +1091,7 @@ Return the linearised solution and the non stochastic steady state (SS) of the m - `algorithm` [Default: `:first_order`, Type: `Symbol`]: algorithm to solve for the dynamics of the model. Only linear algorithms allowed. - $VERBOSE -The returned `KeyedArray` shows the SS, policy and transition functions of the model. The columns show the varibales including auxilliary endogenous and exogenous variables (due to leads and lags > 1). The rows are the SS, followed by the states, and exogenous shocks. +The returned `KeyedArray` shows as columns the endogenous variables inlcuding the auxilliary endogenous and exogenous variables (due to leads and lags > 1). The rows and other dimensions (depending on the chosen perturbation order) include the SS for the linear case only, followed by the states, and exogenous shocks. Subscripts following variable names indicate the timing (e.g. `variableβ‚β‚‹β‚β‚Ž` indicates the variable being in the past). Superscripts indicate leads or lags (e.g. `variableᴸ⁽²⁾` indicates the variable being in lead by two periods). If no super- or subscripts follow the variable name, the variable is in the present. # Examples ```jldoctest @@ -1083,11 +1130,11 @@ function get_solution(𝓂::β„³; algorithm::Symbol = :first_order, verbose::Bool = false) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) - @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran] "This function only works for linear solutions. Choose a respective algorithm." + # @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran] "This function only works for linear solutions. Choose a respective algorithm." - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) if algorithm == :linear_time_iteration solution_matrix = 𝓂.solution.perturbation.linear_time_iteration.solution_matrix @@ -1097,9 +1144,51 @@ function get_solution(𝓂::β„³; solution_matrix = 𝓂.solution.perturbation.quadratic_iteration.solution_matrix end - KeyedArray([𝓂.solution.non_stochastic_steady_state[1:length(𝓂.var)] solution_matrix]'; - Steady_state__States__Shocks = [:Steady_state; map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], - Variables = 𝓂.var) + if algorithm == :second_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.second_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :pruned_second_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.pruned_second_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :third_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.third_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3,4]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :pruned_third_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.pruned_third_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3,4]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + else + return KeyedArray([𝓂.solution.non_stochastic_steady_state[1:length(𝓂.var)] solution_matrix]'; + Steady_state__States__Shocks = [:Steady_state; map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var) + end end @@ -1136,28 +1225,36 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) if solution_error > tol || isnan(solution_error) - return -Inf + if algorithm == :second_order + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), spzeros(𝓂.var,2), false + elseif algorithm == :third_order + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), spzeros(𝓂.var,2), spzeros(𝓂.var,2), false + else + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), false + end end βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) - if algorithm == :second_order - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.timings.nVars,2), success + if !solved + if algorithm == :second_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), false + elseif algorithm == :third_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), spzeros(𝓂.var,2), false + else + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, false end + end + if algorithm == :second_order βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, true elseif algorithm == :third_order - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.timings.nVars,2), spzeros(𝓂.timings.nVars,2), success - end - βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) @@ -1166,13 +1263,9 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol 𝐒₃ = calculate_third_order_solution(βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃, true else - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, success - end - - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, true end end @@ -1260,15 +1353,15 @@ function get_conditional_variance_decomposition(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) SS_and_pars, _ = 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - 𝑺₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝑺₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views 𝑺₁[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[indexin(𝓂.timings.past_not_future_and_mixed_idx,1:𝓂.timings.nVars),:] @@ -1380,15 +1473,15 @@ function get_variance_decomposition(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) SS_and_pars, solution_error = 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) variances_by_shock = reduce(hcat,[β„’.diag(calculate_covariance_forward(sol[:,[1:𝓂.timings.nPast_not_future_and_mixed..., 𝓂.timings.nPast_not_future_and_mixed+i]], T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars))) for i in 1:𝓂.timings.nExo]) @@ -1453,9 +1546,9 @@ function get_correlation(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) covar_dcmp, ___, __, _ = calculate_covariance(𝓂.parameter_values, 𝓂, verbose = verbose) @@ -1526,9 +1619,9 @@ function get_autocorrelation(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) covar_dcmp, sol, __, _ = calculate_covariance(𝓂.parameter_values, 𝓂, verbose = verbose) @@ -1628,9 +1721,9 @@ function get_moments(𝓂::β„³; parameter_derivatives::Symbol_input = :all, verbose::Bool = false)#limit output by selecting pars and vars like for plots and irfs!? - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) if parameter_derivatives == :all length_par = length(𝓂.parameters) @@ -1750,10 +1843,12 @@ Function to use when differentiating model moments with repect to parameters. - $MODEL - $PARAMETER_VALUES # Keyword Arguments -- `non_stochastic_steady_state` [Default: `true`, Type: `Bool`]: switch to return SS of endogenous variables -- `standard_deviation` [Default: `true`, Type: `Bool`]: switch to return standard deviation of endogenous variables -- `variance` [Default: `false`, Type: `Bool`]: switch to return variance of endogenous variables -- `covariance` [Default: `false`, Type: `Bool`]: switch to return covariance matrix of endogenous variables +- `non_stochastic_steady_state` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: switch to return SS of endogenous variables +- `standard_deviation` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the standard deviation of the mentioned variables +- `variance` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the variance of the mentioned variables +- `covariance` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the covariance of the mentioned variables +- `autocorrelation` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the autocorrelation of the mentioned variables +- `autocorrelation_periods` [Default: `1:5`]: periods for which to return the autocorrelation of the mentioned variables - $VERBOSE # Examples @@ -1775,53 +1870,80 @@ end; Ξ² = 0.95 end; -get_moments(RBC, RBC.parameter_values) +get_statistics(RBC, RBC.parameter_values, parameters = RBC.parameters, standard_deviation = RBC.var) # output -2-element Vector{Any}: - [5.936252888048724, 47.39025414828808, 6.884057971014486, 0.0] - [0.026664203785255254, 0.26467737291222343, 0.07393254045396497, 0.010206207261596576] +1-element Vector{Any}: + [0.02666420378525503, 0.26467737291221793, 0.07393254045396483, 0.010206207261596574] ``` """ -function get_moments(𝓂::β„³, parameters::Vector; - non_stochastic_steady_state::Bool = true, - standard_deviation::Bool = true, - variance::Bool = false, - covariance::Bool = false, - verbose::Bool = false) +function get_statistics(𝓂, parameter_values::Vector{T}; + parameters::Vector{Symbol} = Symbol[], + non_stochastic_steady_state::Vector{Symbol} = Symbol[], + standard_deviation::Vector{Symbol} = Symbol[], + variance::Vector{Symbol} = Symbol[], + covariance::Vector{Symbol} = Symbol[], + autocorrelation::Vector{Symbol} = Symbol[], + autocorrelation_periods::U = 1:5, + verbose::Bool = false) where {U,T} + + @assert !(non_stochastic_steady_state == Symbol[]) || !(standard_deviation == Symbol[]) || !(variance == Symbol[]) || !(covariance == Symbol[]) || !(autocorrelation == Symbol[]) "Provide variables for at least one output." + + SS_var_idx = indexin(non_stochastic_steady_state, 𝓂.var) + + std_var_idx = indexin(standard_deviation, 𝓂.var) + + var_var_idx = indexin(variance, 𝓂.var) + + covar_var_idx = indexin(covariance, 𝓂.var) + + autocorr_var_idx = indexin(autocorrelation, 𝓂.var) + + other_parameter_values = 𝓂.parameter_values[indexin(setdiff(𝓂.parameters, parameters), 𝓂.parameters)] + + sort_idx = sortperm(vcat(indexin(setdiff(𝓂.parameters, parameters), 𝓂.parameters), indexin(parameters, 𝓂.parameters))) + + all_parameters = vcat(other_parameter_values, parameter_values)[sort_idx] solve!(𝓂, verbose = verbose) - covar_dcmp, __, _, SS_and_pars = calculate_covariance(parameters,𝓂, verbose = verbose) + covar_dcmp, sol, _, SS_and_pars = calculate_covariance(all_parameters,𝓂, verbose = verbose) SS = SS_and_pars[1:end - length(𝓂.calibration_equations)] - if variance + if !(variance == Symbol[]) varrs = convert(Vector{Real},β„’.diag(covar_dcmp)) - if standard_deviation + if !(standard_deviation == Symbol[]) st_dev = sqrt.(varrs) end + elseif !(autocorrelation == Symbol[]) + A = @views sol[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[𝓂.timings.past_not_future_and_mixed_idx,:] + + autocorr = reduce(hcat,[β„’.diag(A ^ i * covar_dcmp ./ β„’.diag(covar_dcmp)) for i in autocorrelation_periods]) else - if standard_deviation + if !(standard_deviation == Symbol[]) st_dev = sqrt.(convert(Vector{Real},β„’.diag(covar_dcmp))) end end ret = [] - if non_stochastic_steady_state - push!(ret,SS) + if !(non_stochastic_steady_state == Symbol[]) + push!(ret,SS[SS_var_idx]) end - if standard_deviation - push!(ret,st_dev) + if !(standard_deviation == Symbol[]) + push!(ret,st_dev[std_var_idx]) end - if variance - push!(ret,varrs) + if !(variance == Symbol[]) + push!(ret,varrs[var_var_idx]) end - if covariance + if !(covariance == Symbol[]) covar_dcmp_sp = sparse(β„’.triu(covar_dcmp)) droptol!(covar_dcmp_sp,eps(Float64)) - push!(ret,covar_dcmp_sp) + push!(ret,covar_dcmp_sp[covar_var_idx,covar_var_idx]) + end + if !(autocorrelation == Symbol[]) + push!(ret,autocorr[autocorr_var_idx,:] ) end return ret diff --git a/src/macros.jl b/src/macros.jl index 699038e7c..53e73902c 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -1,6 +1,8 @@ import MacroTools: postwalk, unblock +const all_available_algorithms = [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order] + """ $(SIGNATURES) @@ -787,11 +789,13 @@ macro model(𝓂,ex) perturbation_solution(SparseMatrixCSC{Float64, Int64}(β„’.I,0,0), x->x), perturbation_solution(SparseMatrixCSC{Float64, Int64}(β„’.I,0,0), x->x), higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), + higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), + higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x) ), Float64[], Set([:first_order]), - Set([:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :third_order]), + Set(all_available_algorithms), true, false ) @@ -1304,7 +1308,7 @@ macro parameters(𝓂,ex...) # time_dynamic_derivs = @elapsed write_functions_mapping!(mod.$𝓂, $perturbation_order) - mod.$𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + mod.$𝓂.solution.outdated_algorithms = Set(all_available_algorithms) if !$silent if $perturbation_order == 1 diff --git a/src/plotting.jl b/src/plotting.jl index 105226b9d..a850f3c1e 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -73,7 +73,7 @@ end simulation = simulate(RBC_CME) -plot_model_estimates(RBC_CME, simulation([:k],:,:simulate), data_in_levels = false) +plot_model_estimates(RBC_CME, simulation([:k],:,:simulate)) ``` """ function plot_model_estimates(𝓂::β„³, @@ -102,13 +102,15 @@ function plot_model_estimates(𝓂::β„³, tickfontsize = 8, framestyle = :box) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) var_idx = parse_variables_input_to_index(variables, 𝓂.timings) shock_idx = parse_shocks_input_to_index(shocks,𝓂.timings) @@ -118,7 +120,7 @@ function plot_model_estimates(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) variables_to_plot = filtered_and_smoothed[smooth ? 1 : 5] shocks_to_plot = filtered_and_smoothed[smooth ? 3 : 7] @@ -364,11 +366,11 @@ function plot_irf(𝓂::β„³; tickfontsize = 8, framestyle = :box) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) - state_update = parse_algorithm_to_state_update(algorithm, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(algorithm, 𝓂) NSSS, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) @@ -381,16 +383,24 @@ function plot_irf(𝓂::β„³; if algorithm == :second_order SSS_delta = reference_steady_state - 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order SSS_delta = reference_steady_state - 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state else SSS_delta = zeros(length(reference_steady_state)) end if algorithm == :second_order reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + reference_steady_state = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + reference_steady_state = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state end initial_state = initial_state == [0.0] ? zeros(𝓂.timings.nVars) - SSS_delta : initial_state[indexin(full_SS, sort(union(𝓂.var,𝓂.exo_present)))] - reference_steady_state @@ -410,9 +420,9 @@ function plot_irf(𝓂::β„³; var_idx = parse_variables_input_to_index(variables, 𝓂.timings) if generalised_irf - Y = girf(state_update, SSS_delta, zeros(𝓂.timings.nVars), 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock)#, warmup_periods::Int = 100, draws::Int = 50, iterations_to_steady_state::Int = 500) + Y = girf(state_update, SSS_delta, zeros(𝓂.timings.nVars), pruning, 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock)#, warmup_periods::Int = 100, draws::Int = 50, iterations_to_steady_state::Int = 500) else - Y = irf(state_update, initial_state, zeros(𝓂.timings.nVars), 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock) .+ SSS_delta[var_idx] + Y = irf(state_update, initial_state, zeros(𝓂.timings.nVars), pruning, 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock) .+ SSS_delta[var_idx] end if shocks isa KeyedArray{Float64} || shocks isa Matrix{Float64} @@ -455,13 +465,17 @@ function plot_irf(𝓂::β„³; title = string(𝓂.timings.var[var_idx[i]]), ylabel = "Level", label = "") + if can_dual_axis StatsPlots.plot!(StatsPlots.twinx(), 100*((Y[i,:,shock] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") + + StatsPlots.hline!(can_dual_axis ? [SS 0] : [SS], + color = :black, + label = "") end) if !(plot_count % plots_per_page == 0) @@ -737,6 +751,8 @@ Plot the solution of the model (mapping of past states to present variables) aro The (non) stochastic steady state is plotted along with the mapping from the chosen past state to one present variable per plot. All other (non-chosen) states remain in the (non) stochastic steady state. +In the case of pruned solutions the "pruned" state has as a baseline the non stochastic steady state and the "actual" state refers to the stochastic steady state. The plot then shows the mapping from `Οƒ` standard deviations added to these two steady states and the present variables. Note that there is no unique mapping between the "pruned" and "actual" states. Furthermore, the mapping of the "actual" state is itself dependend on the "pruned" state so that the plots shown are just one realisation of inifite possible mappings. + # Arguments - $MODEL - `state` [Type: `Symbol`]: state variable to be shown on x-axis. @@ -804,38 +820,36 @@ function plot_solution(𝓂::β„³, @assert state ∈ 𝓂.timings.past_not_future_and_mixed "Invalid state. Choose one from:"*repr(𝓂.timings.past_not_future_and_mixed) - @assert length(setdiff(algorithm isa Symbol ? [algorithm] : algorithm, [:third_order, :second_order, :first_order])) == 0 "Invalid algorithm. Choose any combination of: :third_order, :second_order, :first_order" + @assert length(setdiff(algorithm isa Symbol ? [algorithm] : algorithm, [:third_order, :pruned_third_order, :second_order, :pruned_second_order, :first_order])) == 0 "Invalid algorithm. Choose any combination of: :third_order, :second_order, :first_order" if algorithm isa Symbol - max_algorithm = algorithm - min_algorithm = algorithm + solve!(𝓂, verbose = verbose, algorithm = algorithm, dynamics = true, parameters = parameters) algorithm = [algorithm] else - if :third_order ∈ algorithm - max_algorithm = :third_order - elseif :second_order ∈ algorithm - max_algorithm = :second_order - else - max_algorithm = :first_order - end - - if :first_order ∈ algorithm - min_algorithm = :first_order - elseif :second_order ∈ algorithm - min_algorithm = :second_order + if :third_order ∈ algorithm && :pruned_third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :third_order, dynamics = true, parameters = parameters) + solve!(𝓂, verbose = verbose, algorithm = :pruned_third_order, dynamics = true, parameters = parameters) + elseif :third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :third_order, dynamics = true, parameters = parameters) + elseif :pruned_third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :pruned_third_order, dynamics = true, parameters = parameters) + elseif :second_order ∈ algorithm && :pruned_second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :second_order, dynamics = true, parameters = parameters) + solve!(𝓂, verbose = verbose, algorithm = :pruned_second_order, dynamics = true, parameters = parameters) + elseif :second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :second_order, dynamics = true, parameters = parameters) + elseif :pruned_second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :pruned_second_order, dynamics = true, parameters = parameters) else - min_algorithm = :third_order + solve!(𝓂, verbose = verbose, algorithm = :first_order, dynamics = true, parameters = parameters) end end - solve!(𝓂, verbose = verbose, algorithm = max_algorithm, dynamics = true, parameters = parameters) - SS_and_std = get_moments(𝓂, derivatives = false, parameters = parameters, verbose = verbose) - full_NSSS = sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)) full_NSSS[indexin(𝓂.aux,full_NSSS)] = map(x -> Symbol(replace(string(x), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")), 𝓂.aux) full_SS = [s ∈ 𝓂.exo_present ? 0 : SS_and_std[1](s) for s in full_NSSS] @@ -856,6 +870,7 @@ function plot_solution(𝓂::β„³, legend_plot = StatsPlots.plot(framestyle = :none) + if :first_order ∈ algorithm StatsPlots.plot!(fill(0,1,1), framestyle = :none, @@ -868,12 +883,24 @@ function plot_solution(𝓂::β„³, legend = :inside, label = "2nd order perturbation") end + if :pruned_second_order ∈ algorithm + StatsPlots.plot!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Pruned 2nd order perturbation") + end if :third_order ∈ algorithm StatsPlots.plot!(fill(0,1,1), framestyle = :none, legend = :inside, label = "3rd order perturbation") end + if :pruned_third_order ∈ algorithm + StatsPlots.plot!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Pruned 3rd order perturbation") + end if :first_order ∈ algorithm StatsPlots.scatter!(fill(0,1,1), @@ -882,17 +909,38 @@ function plot_solution(𝓂::β„³, label = "Non Stochastic Steady State") end if :second_order ∈ algorithm + SSS2 = 𝓂.solution.perturbation.second_order.stochastic_steady_state + StatsPlots.scatter!(fill(0,1,1), framestyle = :none, legend = :inside, label = "Stochastic Steady State (2nd order)") end + if :pruned_second_order ∈ algorithm + SSS2p = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state + + StatsPlots.scatter!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Stochastic Steady State (Pruned 2nd order)") + end if :third_order ∈ algorithm + SSS3 = 𝓂.solution.perturbation.third_order.stochastic_steady_state + StatsPlots.scatter!(fill(0,1,1), framestyle = :none, legend = :inside, label = "Stochastic Steady State (3rd order)") end + if :pruned_third_order ∈ algorithm + SSS3p = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state + + StatsPlots.scatter!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Stochastic Steady State (Pruned 3rd order)") + end + StatsPlots.scatter!(fill(0,1,1), label = "", marker = :rect, @@ -904,12 +952,24 @@ function plot_solution(𝓂::β„³, framestyle = :none, legend = :inside) + variable_first_list = [] + variable_second_list = [] + variable_pruned_second_list = [] + variable_third_list = [] + variable_pruned_third_list = [] + has_impact_list = [] for k in vars_to_plot - kk = Symbol(replace(string(k), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")) has_impact = false + + variable_first = [] + variable_second = [] + variable_pruned_second = [] + variable_third = [] + variable_pruned_third = [] + if :first_order ∈ algorithm variable_first = [𝓂.solution.perturbation.first_order.state_update(state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] @@ -919,18 +979,22 @@ function plot_solution(𝓂::β„³, end if :second_order ∈ algorithm - SSS2 = 𝓂.solution.perturbation.second_order.stochastic_steady_state - variable_second = [𝓂.solution.perturbation.second_order.state_update(SSS2 - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] variable_second = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_second] has_impact = has_impact || sum(abs2,variable_second .- sum(variable_second)/length(variable_second))/(length(variable_second)-1) > eps() end + + if :pruned_second_order ∈ algorithm + variable_pruned_second = [𝓂.solution.perturbation.pruned_second_order.state_update(SSS2p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] - if :third_order ∈ algorithm - SSS3 = 𝓂.solution.perturbation.third_order.stochastic_steady_state + variable_pruned_second = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_second] + + has_impact = has_impact || sum(abs2,variable_pruned_second .- sum(variable_pruned_second)/length(variable_pruned_second))/(length(variable_pruned_second)-1) > eps() + end + if :third_order ∈ algorithm variable_third = [𝓂.solution.perturbation.third_order.state_update(SSS3 - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] variable_third = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_third] @@ -938,27 +1002,65 @@ function plot_solution(𝓂::β„³, has_impact = has_impact || sum(abs2,variable_third .- sum(variable_third)/length(variable_third))/(length(variable_third)-1) > eps() end - if !has_impact continue end + if :pruned_third_order ∈ algorithm + variable_pruned_third = [𝓂.solution.perturbation.pruned_third_order.state_update(SSS3p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] + + variable_pruned_third = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_third] + + has_impact = has_impact || sum(abs2,variable_pruned_third .- sum(variable_pruned_third)/length(variable_pruned_third))/(length(variable_pruned_third)-1) > eps() + end + + push!(variable_first_list, variable_first) + push!(variable_second_list, variable_second) + push!(variable_pruned_second_list, variable_pruned_second) + push!(variable_third_list, variable_third) + push!(variable_pruned_third_list, variable_pruned_third) + push!(has_impact_list, has_impact) + + if !has_impact + n_subplots -= 1 + end + end + + + for (i,k) in enumerate(vars_to_plot) + kk = Symbol(replace(string(k), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")) + + if !has_impact_list[i] continue end push!(pp,begin Pl = StatsPlots.plot() if :first_order ∈ algorithm StatsPlots.plot!(state_range .+ SS_and_std[1](state), - variable_first, + variable_first_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end if :second_order ∈ algorithm StatsPlots.plot!(state_range .+ SSS2[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], - variable_second, + variable_second_list[i], + ylabel = string(k)*"β‚β‚€β‚Ž", + xlabel = string(state)*"β‚β‚‹β‚β‚Ž", + label = "") + end + if :pruned_second_order ∈ algorithm + StatsPlots.plot!(state_range .+ SSS2p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], + variable_pruned_second_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end if :third_order ∈ algorithm StatsPlots.plot!(state_range .+ SSS3[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], - variable_third, + variable_third_list[i], + ylabel = string(k)*"β‚β‚€β‚Ž", + xlabel = string(state)*"β‚β‚‹β‚β‚Ž", + label = "") + end + if :pruned_third_order ∈ algorithm + StatsPlots.plot!(state_range .+ SSS3p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], + variable_pruned_third_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") @@ -972,10 +1074,18 @@ function plot_solution(𝓂::β„³, StatsPlots.scatter!([SSS2[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS2[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], label = "") end + if :pruned_second_order ∈ algorithm + StatsPlots.scatter!([SSS2p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS2p[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], + label = "") + end if :third_order ∈ algorithm StatsPlots.scatter!([SSS3[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS3[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], label = "") end + if :pruned_third_order ∈ algorithm + StatsPlots.scatter!([SSS3p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS3p[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], + label = "") + end Pl end) @@ -989,7 +1099,7 @@ function plot_solution(𝓂::β„³, p = StatsPlots.plot(ppp, legend_plot, - layout = StatsPlots.grid(2, 1, heights=[0.8, 0.2]), + layout = StatsPlots.grid(2, 1, heights = length(algorithm) > 3 ? [0.65, 0.35] : [0.8, 0.2]), plot_title = "Model: "*𝓂.model_name*" ("*string(pane)*"/"*string(Int(ceil(n_subplots/plots_per_page)))*")" ) @@ -1013,7 +1123,7 @@ function plot_solution(𝓂::β„³, p = StatsPlots.plot(ppp, legend_plot, - layout = StatsPlots.grid(2, 1, heights=[0.8, 0.2]), + layout = StatsPlots.grid(2, 1, heights = length(algorithm) > 3 ? [0.65, 0.35] : [0.8, 0.2]), plot_title = "Model: "*𝓂.model_name*" ("*string(pane)*"/"*string(Int(ceil(n_subplots/plots_per_page)))*")" ) @@ -1047,7 +1157,7 @@ Limited to the first order perturbation solution of the model. - $SHOCK_CONDITIONS - `periods` [Default: `40`, Type: `Int`]: the total number of periods is the sum of the argument provided here and the maximum of periods of the shocks or conditions argument. - $VARIABLES -`conditions_in_levels` [Default: `false`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. +`conditions_in_levels` [Default: `true`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. - $LEVELS - `show_plots` [Default: `true`, Type: `Bool`]: show plots. Separate plots per shocks and varibles depending on number of variables and `plots_per_page`. - `save_plots` [Default: `false`, Type: `Bool`]: switch to save plots using path and extension from `save_plots_path` and `save_plots_format`. Separate files per shocks and variables depending on number of variables and `plots_per_page` @@ -1091,7 +1201,7 @@ conditions[2,2] = .02 shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) shocks[1,1] = .05 -plot_conditional_forecast(RBC_CME, conditions, shocks = shocks) +plot_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) # The same can be achieved with the other input formats: # conditions = Matrix{Union{Nothing,Float64}}(undef,7,2) @@ -1114,10 +1224,11 @@ plot_conditional_forecast(RBC_CME, conditions, shocks = shocks) function plot_conditional_forecast(𝓂::β„³, conditions::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}}; shocks::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}, Nothing} = nothing, + initial_state::Vector{Float64} = [0.0], periods::Int = 40, parameters = nothing, variables::Symbol_input = :all_including_auxilliary, - conditions_in_levels::Bool = false, + conditions_in_levels::Bool = true, levels::Bool = false, show_plots::Bool = true, save_plots::Bool = false, @@ -1139,6 +1250,7 @@ function plot_conditional_forecast(𝓂::β„³, Y = get_conditional_forecast(𝓂, conditions, shocks = shocks, + initial_state = initial_state, periods = periods, parameters = parameters, variables = variables, @@ -1218,7 +1330,7 @@ function plot_conditional_forecast(𝓂::β„³, return_plots = [] for i in 1:length(var_idx) - if all(isapprox.(Y[i,:], 0, atol = eps(Float32))) + if all(isapprox.(Y[i,:], 0, atol = eps(Float32))) && !(any(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) n_subplots -= 1 end end @@ -1226,75 +1338,44 @@ function plot_conditional_forecast(𝓂::β„³, for i in 1:length(var_idx) SS = reference_steady_state[i] if !(all(isapprox.(Y[i,:],0,atol = eps(Float32)))) || length(findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) > 0 - if !(plot_count % plots_per_page == 0) - plot_count += 1 - if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") - end) - end - else - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - end) - end - - end + + if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + end) else - - plot_count = 1 - if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + end) + end + else + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) end) - else + else push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") end) end - else - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - end) - end - end + end + + if !(plot_count % plots_per_page == 0) + plot_count += 1 + else + plot_count = 1 shock_string = "Conditional forecast" diff --git a/src/structures.jl b/src/structures.jl index cf9a208e6..4a2ad72ba 100644 --- a/src/structures.jl +++ b/src/structures.jl @@ -165,7 +165,9 @@ mutable struct perturbation linear_time_iteration::perturbation_solution quadratic_iteration::perturbation_solution second_order::higher_order_perturbation_solution + pruned_second_order::higher_order_perturbation_solution third_order::higher_order_perturbation_solution + pruned_third_order::higher_order_perturbation_solution end diff --git a/test/ff_ng.jl b/test/ff_ng.jl new file mode 100644 index 000000000..3d00f4974 --- /dev/null +++ b/test/ff_ng.jl @@ -0,0 +1,335 @@ +using MacroModelling +import Turing, StatsPlots, Random, Statistics +import LinearAlgebra as β„’ + +using HypothesisTests, Distributions + +@model RBC begin + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] +end + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.25 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end + +# draw shocks +Random.seed!(1) +periods = 20 +shockdist = Turing.TDist(3) # Turing.Beta(10,1) # +shocks = rand(shockdist,1,periods) # shocks = randn(1,periods) + +#shocks /= Statistics.std(shocks) # antithetic shocks +#shocks .-= Statistics.mean(shocks) # antithetic shocks +# Test for non-normality + HypothesisTests.ExactOneSampleKSTest(shocks[1,:],Turing.Normal(0,1)) + StatsPlots.plot(Distributions.Normal(0,1), fill=(0, .5,:blue)) + StatsPlots.density!(shocks') +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect + +# plot simulation +MacroModelling.plot_irf(RBC,shocks = shocks, periods = 0) +#StatsPlots.plot(shocks') +Ξ© = 10^(-5)# eps() +n_samples = 1000 + + +# define loglikelihood model - KF +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) + + # Ξ± = 0.25 + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + # solution = get_solution(m, parameters, algorithm = algorithm) + + # if solution[end] != true + # return Turing.@addlogprob! Inf + # end + # # draw_shocks(m) + # x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + # calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + # long_run_covariance = calculate_covariance_(solution[2]) + + # initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 + + # 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + # Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + # Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + # aug_state = [initial_conditions + # 1 + # Ο΅[:,1]] + + # state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + # for t in 2:size(data, 2) + # aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + # 1 + # Ο΅[:,t]] + + # state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + # end + + # observables_index = sort(indexin(observables, m.timings.var)) + + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol + + +StatsPlots.plot(samps) +kf_estimated_parameters = Turing.describe(samps)[1].nt.parameters +kf_estimated_means = Turing.describe(samps)[1].nt.mean +kf_estimated_std = Turing.describe(samps)[1].nt.std +kfmean= kf_estimated_means +kfstd = kf_estimated_std +Turing.@model function loglikelihood_scaling_function_ff(m, data, observables, Ξ©) #, kfmean, kfstd + + # Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + # ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + #Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) + + #Ξ± ~ Turing.Uniform(kfmean[1]-2*kfstd[1], kfmean[1]+2*kfstd[1]) + #Ξ² ~ Turing.Uniform(kfmean[2]-2*kfstd[2], kfmean[2]+2*kfstd[2]) + #Ξ΄ ~ Turing.Uniform(kfmean[3]-2*kfstd[3], kfmean[3]+2*kfstd[3]) + #Οƒ ~ Turing.Uniform(0.0, kfmean[4]+2*kfstd[4]) + #ρ ~ Turing.Uniform(0.0, kfmean[5]+2*kfstd[5]) + #Ξ³ ~ Turing.Uniform(0.0, kfmean[6]+2*kfstd[6]) + + + # Ξ± = 0.25 + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + # skewness + shock_distribution = Turing.TDist(3.0) + + # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + # draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling_ff = loglikelihood_scaling_function_ff(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # ,kf_estimated_means, kf_estimated_std # Filter free + +n_samples = 1000 +samps_ff = Turing.sample(loglikelihood_scaling_ff, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +StatsPlots.plot(samps_ff) + +ff_estimated_parameters = Turing.describe(samps_ff)[1].nt.parameters +ff_estimated_means = Turing.describe(samps_ff)[1].nt.mean +ff_estimated_std = Turing.describe(samps_ff)[1].nt.std + + +ff_bias= ( ff_estimated_means[1:6]- RBC.parameter_values[[2, 3, 5, 1, 4,6]]) +kf_bias= ( kf_estimated_means[1:6]- RBC.parameter_values[[2, 3, 5, 1, 4,6]]) + +ff_z = (ff_bias)./ff_estimated_std[1:6] +kf_z = ( kf_bias)./kf_estimated_std[1:6] + +grouplabel = repeat(["KF", "FF"], inner = 6) + +StatsPlots.groupedbar( repeat(kf_estimated_parameters, outer =2) , [kf_bias ff_bias], group = grouplabel, xlabel = "Structural Parameters Biases") +StatsPlots.groupedbar( repeat(kf_estimated_parameters, outer =2), [kf_z ff_z], group = grouplabel, xlabel = "Structural Parameter z-scores") +data = KeyedArray(Array(collect(simulated_data(:k,:,:Shock_matrix)))',row = [:k], col = 1:1:20) + + + +kf_filtered_shocks = MacroModelling.get_estimated_shocks(RBC, data, parameters = kf_estimated_means[[4, 1, 2, 5, 3,6]]) + + +ff_estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], ff_estimated_parameters ) +StatsPlots.plot(ff_estimated_means[ff_estimated_parameters_indices], + ribbon = 1.96 * ff_estimated_std[ff_estimated_parameters_indices], + label = "Posterior mean", + title = "Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") +StatsPlots.plot!(collect(kf_filtered_shocks'), label = "KF filtered shocks") + + +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(samps_ff) + +#Plot true and estimated latents to see how well we backed them out + + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end diff --git a/test/filter_free.jl b/test/filter_free.jl new file mode 100644 index 000000000..546c728bf --- /dev/null +++ b/test/filter_free.jl @@ -0,0 +1,238 @@ +using MacroModelling +import Turing, StatsPlots, Random, Statistics +import LinearAlgebra as β„’ + +@model RBC begin + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] +end + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.25 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end + +# draw shocks +periods = 10 +shocks = randn(1,periods) +shocks /= Statistics.std(shocks) # antithetic shocks +shocks .-= Statistics.mean(shocks) # antithetic shocks + +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect + +# plot simulation +plot_irf(RBC,shocks = shocks, periods = 0) +StatsPlots.plot(shocks') + + +# function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) +# abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 +# end + +# define loglikelihood model +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) + + # Ξ± = 0.25 + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + # solution = get_solution(m, parameters, algorithm = algorithm) + + # if solution[end] != true + # return Turing.@addlogprob! Inf + # end + # # draw_shocks(m) + # x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + # calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + # long_run_covariance = calculate_covariance_(solution[2]) + + # initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 + + # 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + # Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + # Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + # aug_state = [initial_conditions + # 1 + # Ο΅[:,1]] + + # state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + # for t in 2:size(data, 2) + # aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + # 1 + # Ο΅[:,t]] + + # state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + # end + + # observables_index = sort(indexin(observables, m.timings.var)) + + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +Ξ© = 1e-2#eps() +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman +loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free + +n_samples = 1000 + +# solution = get_solution(RBC, RBC.parameter_values, algorithm = :first_order)[1] + +# simulated_data(:k,:,:Shock_matrix) .- solution[1][observables_index...] +# using AdvancedHMC +# D = 18 + +# n_samples, n_adapts = 1_000, 300 +# metric = DiagEuclideanMetric(D) +# hamiltonian = Hamiltonian(metric, logprob, +# ForwardDiff) + +# integrator = Leapfrog(find_good_stepsize(hamiltonian, +# initial_theta)) + +# proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) +# adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), +# StepSizeAdaptor(0.8, integrator)) + +# samples, stats = sample(hamiltonian, proposal, initial_theta, n_samples, adaptor, n_adapts; progress=true) + +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(samps) + +#Plot true and estimated latents to see how well we backed them out +estimated_parameters = Turing.describe(samps)[1].nt.parameters +estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], estimated_parameters) +estimated_means = Turing.describe(samps)[1].nt.mean +estimated_std = Turing.describe(samps)[1].nt.std + + +StatsPlots.plot(estimated_means[estimated_parameters_indices], + ribbon = 1.96 * estimated_std[estimated_parameters_indices], + label = "Posterior mean", + title = "First-Order Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") + + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end diff --git a/test/filter_free_muse_inference.jl b/test/filter_free_muse_inference.jl new file mode 100644 index 000000000..d4ab1fb19 --- /dev/null +++ b/test/filter_free_muse_inference.jl @@ -0,0 +1,252 @@ +using MacroModelling +import Turing, StatsPlots, Random, Statistics +import LinearAlgebra as β„’ + +@model RBC begin + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] +end + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.25 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end + +# draw shocks +periods = 100 +shocks = randn(1,periods) +shocks /= Statistics.std(shocks) # antithetic shocks +shocks .-= Statistics.mean(shocks) # antithetic shocks + +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect + +# plot simulation +plot_irf(RBC,shocks = shocks, periods = 0) +StatsPlots.plot(shocks') + + +# function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) +# abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 +# end + +# define loglikelihood model +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + Ξ΄ ~ MacroModelling.Normal(0.02, 0.05) + # Ξ³ ~ Turing.Normal(1, 0.05) + Οƒ ~ MacroModelling.Normal(0.01, 0.05) + + # Ξ± ~ Turing.Uniform(0.15, 0.45) + # Ξ² ~ Turing.Uniform(0.92, 0.9999) + # Ξ΄ ~ Turing.Uniform(0.0, 1.0) + # Οƒ ~ Turing.Uniform(0.0, 1.0) + # ρ ~ Turing.Uniform(0.0, 1.0) + # Ξ³ ~ Turing.Uniform(0.0, 1.5) + + Ξ± = 0.25 + Ξ² = 0.95 + # Ξ΄ = 0.02 + # Οƒ = 0.01 + ρ = 0.2 + Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + # draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + states ~ Turing.MvNormal(vec(state[observables_index,:] .+ solution[1][observables_index...]), Ξ© * β„’.I(length(state[observables_index,:]))) + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +Ξ© = 1e-0#eps() +# loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman +loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free + +n_samples = 1000 + +# solution = get_solution(RBC, RBC.parameter_values, algorithm = :first_order)[1] + +# simulated_data(:k,:,:Shock_matrix) .- solution[1][observables_index...] +# using AdvancedHMC +# D = 18 + +# n_samples, n_adapts = 1_000, 300 +# metric = DiagEuclideanMetric(D) +# hamiltonian = Hamiltonian(metric, logprob, +# ForwardDiff) + +# integrator = Leapfrog(find_good_stepsize(hamiltonian, +# initial_theta)) + +# proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) +# adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), +# StepSizeAdaptor(0.8, integrator)) + +# samples, stats = sample(hamiltonian, proposal, initial_theta, n_samples, adaptor, n_adapts; progress=true) + +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +using MuseInference, StatsPlots +import Turing +using AbstractDifferentiation + +model = loglikelihood_scaling | (states = vec(simulated_data(:k,:,:Shock_matrix)'),) +prob = MuseInference.TuringMuseProblem(model, params=(:Ξ΄, :Οƒ)) + +result = muse(prob, (Ξ΄=0.2, Οƒ=0.01,)) + +# get solution +result = muse(prob, (Ξ΄=0.5, Οƒ=0.1)) + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(samps) + +#Plot true and estimated latents to see how well we backed them out +estimated_parameters = Turing.describe(samps)[1].nt.parameters +estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], estimated_parameters) +estimated_means = Turing.describe(samps)[1].nt.mean +estimated_std = Turing.describe(samps)[1].nt.std + + +StatsPlots.plot(estimated_means[estimated_parameters_indices], + ribbon = 1.96 * estimated_std[estimated_parameters_indices], + label = "Posterior mean", + title = "First-Order Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") + + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 018dbf265..b6a991635 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -21,13 +21,6 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = nsss = get_SS(m) if algorithm == :first_order - sols_nv = get_solution(m) - sols = get_solution(m, verbose = true) - new_sols1 = get_solution(m, verbose = true, parameters = m.parameter_values * 1.0001) - new_sols2 = get_solution(m, verbose = true, parameters = (m.parameters[1] => m.parameter_values[1] * 1.0001)) - new_sols3 = get_solution(m, verbose = true, parameters = Tuple(m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001)) - new_sols4 = get_solution(m, verbose = true, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] / 1.0001)) - old_sols = get_solution(m, verbose = true, parameters = old_par_vals) auto_corr_nv = get_autocorrelation(m) auto_corrr = get_autocorrelation(m, verbose = true) @@ -148,53 +141,53 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = Matrix{Union{Nothing, Float64}}(undef,size(new_sub_irfs_all,3),1) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end conditions = spzeros(size(new_sub_irfs_all,1),2) conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = spzeros(size(new_sub_irfs_all,3),1) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end conditions = KeyedArray(Matrix{Union{Nothing, Float64}}(undef,2,2), Variables = varnames[var_idxs[1:2]], Periods = 1:2) conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = KeyedArray(Matrix{Union{Nothing, Float64}}(undef,1,1), Shocks = [shocknames[1]], Periods = [1]) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end - cond_fcst = get_conditional_forecast(m, conditions) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) if plots - plot_conditional_forecast(m, conditions, save_plots = false, show_plots = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = false, show_plots = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) end NSSS = get_SS(m,derivatives = false) @@ -206,9 +199,9 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = conditions_lvl[var_idxs[1],1] = .01 + reference_steady_state[var_idxs[1]] conditions_lvl[var_idxs[2],2] = .02 + reference_steady_state[var_idxs[2]] - cond_fcst = get_conditional_forecast(m, conditions_lvl, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], conditions_in_levels = true, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions_lvl, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = varnames[1], levels = true, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = varnames[1], levels = true, verbose = true) # Test filtering and smoothing sol = get_solution(m) @@ -221,8 +214,8 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) - data = simulation(m.var[var_idxs],:,:simulate) - data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] + data_in_levels = simulation(m.var[var_idxs],:,:simulate) + data = data_in_levels .- m.solution.non_stochastic_steady_state[var_idxs] estim_vars1 = get_estimated_variables(m, data, data_in_levels = false, verbose = true) estim_vars2 = get_estimated_variables(m, data_in_levels, verbose = true) @@ -272,10 +265,20 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = estim_decomp2 = get_shock_decomposition(m, data, data_in_levels = false, smooth = false, verbose = true, parameters = old_par_vals) end - if algorithm ∈ [:second_order, :third_order] + if algorithm ∈ [:second_order, :pruned_second_order, :third_order, :pruned_third_order] SSS = get_stochastic_steady_state(m, algorithm = algorithm) end + + # get_solution + sols_nv = get_solution(m, algorithm = algorithm) + sols = get_solution(m, algorithm = algorithm, verbose = true) + new_sols1 = get_solution(m, algorithm = algorithm, verbose = true, parameters = m.parameter_values * 1.0001) + new_sols2 = get_solution(m, algorithm = algorithm, verbose = true, parameters = (m.parameters[1] => m.parameter_values[1] * 1.0001)) + new_sols3 = get_solution(m, algorithm = algorithm, verbose = true, parameters = Tuple(m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001)) + new_sols4 = get_solution(m, algorithm = algorithm, verbose = true, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] / 1.0001)) + old_sols = get_solution(m, algorithm = algorithm, verbose = true, parameters = old_par_vals) + # irfs irfs_nv = get_irf(m, algorithm = algorithm) irfs = get_irf(m, verbose = true, algorithm = algorithm) @@ -303,9 +306,10 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :simulate) new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1]))) new_sub_lvl_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1])), levels = true) - @test isapprox(collect(new_sub_lvl_irfs(:,1,:)), collect(lvl_irfs(:,6,m.exo[1])),rtol = eps(Float32)) - + if algorithm ∈ setdiff(MacroModelling.all_available_algorithms, [:pruned_second_order,:pruned_third_order]) + @test isapprox(collect(new_sub_lvl_irfs(:,1,:)), collect(lvl_irfs(:,6,m.exo[1])),rtol = eps(Float32)) + end new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, variables = m.timings.var[1]) new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, variables = m.timings.var[end-1:end]) @@ -382,9 +386,9 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) - data = simulation(m.var[var_idxs],:,:simulate) - data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] - + data_in_levels = simulation(m.var[var_idxs],:,:simulate) + data = data_in_levels .- m.solution.non_stochastic_steady_state[var_idxs] + plot_model_estimates(m, data, data_in_levels = false) plot_model_estimates(m, data, data_in_levels = false, verbose = true) plot_model_estimates(m, data, data_in_levels = false, verbose = true) diff --git a/test/runtests.jl b/test/runtests.jl index 7a361cca5..a6ea6f2e3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -24,8 +24,9 @@ include("functionality_tests.jl") @testset verbose = true "FS2000" begin include("models/FS2000.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -83,8 +84,9 @@ GC.gc() @testset verbose = true "RBC_CME with calibration equations and parameter definitions" begin include("models/RBC_CME_calibration_equations_and_parameter_definitions.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order) - functionality_test(m, algorithm = :third_order) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -92,8 +94,9 @@ GC.gc() @testset verbose = true "RBC_CME with calibration equations" begin include("models/RBC_CME_calibration_equations.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -101,8 +104,9 @@ GC.gc() @testset verbose = true "RBC_CME" begin include("models/RBC_CME.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() diff --git a/test/test_filterfree.jl b/test/test_filterfree.jl new file mode 100644 index 000000000..87589d650 --- /dev/null +++ b/test/test_filterfree.jl @@ -0,0 +1,214 @@ +using MacroModelling +import Turing +import Turing: NUTS, sample, logpdf +# import Optim, LineSearches, Plots +using Random, CSV, DataFrames, MCMCChains, AxisKeys +import DynamicPPL: logjoint +import LinearAlgebra as β„’ +import ChainRulesCore: @ignore_derivatives, ignore_derivatives + +cd("C:/Users/fm007/Documents/GitHub/MacroModelling.jl/test") +include("models/FS2000.jl") + +FS2000 = m + +# load data +dat = CSV.read("data/FS2000_data.csv", DataFrame) +data = KeyedArray(Array(dat)',Variable = Symbol.("log_".*names(dat)),Time = 1:size(dat)[1]) +data = log.(data) + +# declare observables +observables = sort(Symbol.("log_".*names(dat))) + +# subset observables in data +data = data(observables,:) + +# declare parameters +alp = 0.356 +bet = 0.993 +gam = 0.0085 +mst = 1.0002 +rho = 0.129 +psi = 0.65 +del = 0.01 +z_e_a = 0.035449 +z_e_m = 0.008862 +parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] +# filter data with parameters +filtered_errors = MacroModelling.get_estimated_shocks(FS2000, data; parameters= parameters) # filtered_states = get_estimated_variables(FS2000, data; parameters= parameters) + +# Define DSGE Turing model +Turing.@model function FS2000_filter_free_loglikelihood_function(data, model, observables) + + alp ~ Beta(0.356, 0.02, ΞΌΟƒ = true) + #bet ~ Beta(0.993, 0.002, ΞΌΟƒ = true) + #gam ~ Normal(0.0085, 0.003) + #mst ~ Normal(1.0002, 0.007) + rho ~ Beta(0.129, 0.223, ΞΌΟƒ = true) + #psi ~ Beta(0.65, 0.05, ΞΌΟƒ = true) + #del ~ Beta(0.01, 0.005, ΞΌΟƒ = true) + #z_e_a ~ InverseGamma(0.035449, Inf, ΞΌΟƒ = true) + #z_e_m ~ InverseGamma(0.008862, Inf, ΞΌΟƒ = true) + + #alp = 0.356 + bet = 0.993 + gam = 0.0085 + mst = 1.0002 + #rho = 0.129 + psi = 0.65 + del = 0.01 + z_e_a = 0.035449 + z_e_m = 0.008862 + + # Log likehood function inputs - + # I did not manage to delegate the sampling to another function - Would it be possible to call it in with an include() command? + shock_distribution = Turing.Normal() + algorithm = :first_order + parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] + verbose::Bool = false + tol::AbstractFloat = eps() + filter = :filter_free + +# BEGINNING OF OBJECTIVE FUNCTION + + # draw intial conditions + x0 ~ Turing.filldist(shock_distribution,m.timings.nPast_not_future_and_mixed) # Initial conditions + + # draw errors + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks + + # reshape errors to vector + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # Checks + @assert length(observables) == size(data)[1] "Data columns and number of observables are not identical. Make sure the data contains only the selected observables." + @assert length(observables) <= m.timings.nExo "Cannot estimate model with more observables than exogenous shocks. Have at least as many shocks as observable variables." + + @ignore_derivatives sort!(observables) + @ignore_derivatives solve!(m, verbose = verbose) + + if isnothing(parameters) + parameters = m.parameter_values + else + ub = @ignore_derivatives fill(1e12 + rand(), length(m.parameters) + length(m.βž•_vars)) + lb = @ignore_derivatives - ub + + for (i,v) in enumerate(m.bounded_vars) + if v ∈ m.parameters + @ignore_derivatives lb[i] = m.lower_bounds[i] + @ignore_derivatives ub[i] = m.upper_bounds[i] + end + end + + if min(max(parameters,lb),ub) != parameters + return -Inf + end + end + + SS_and_pars, solution_error = m.SS_solve_func(parameters, m, verbose) + + if solution_error > tol || isnan(solution_error) + return -Inf + end + + NSSS_labels = @ignore_derivatives [sort(union(m.exo_present,m.var))...,m.calibration_equations_parameters...] + + obs_indices = @ignore_derivatives indexin(observables,NSSS_labels) + + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + observables_and_states = @ignore_derivatives sort(union(m.timings.past_not_future_and_mixed_idx,indexin(observables,sort(union(m.aux,m.var,m.exo_present))))) + + # solve DSGE with parameters + solution = get_solution(m, parameters, algorithm = algorithm) + + # store solution + if algorithm == :first_order + 𝐒₁ = solution[2] + else + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + end + + # Thore: we can probably skip this because it is computationally expensive and should drop out in sampling ... MΓ‘tyΓ‘s: We cannot as the initial condition bias for the erros is important + + # Option 1 - no initial condition sampling - biased errors but faster - no need to compute LR covariance matrix + # x0 = zeros(m.timings.nPast_not_future_and_mixed) + + # Option 2 - initial condition is sampled - unbiased errors - slow as LR covariance is needed. + calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = m.timings.past_not_future_and_mixed_idx) + long_run_covariance = calculate_covariance_(solution[2]) + initial_conditions =long_run_covariance * x0 # x0 + + # Declare states + state = zeros(typeof(Ο΅_draw[1]), m.timings.nVars, size(data, 2) ) + + # propagate the state space + if algorithm == :first_order + + aug_state = [initial_conditions + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + end + elseif algorithm == :second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + elseif algorithm == :pruned_second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + end + + # define data in deviations form SS + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + # compute observation predictions - without ME + state_deviations = data_in_deviations - state[obs_indices,:] + # make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-4*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) *10^2 + make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-8*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) + # make_sure_state_equals_observable = -sum(abs2,state_deviations) * 1e30 +# END OF OBJECTIVE FUNCTION + + Turing.@addlogprob! make_sure_state_equals_observable#calculate_filterfree_loglikelihood(model, data(observables), observables; parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m]) +end + +FS2000_filterfree = FS2000_filter_free_loglikelihood_function(data, FS2000, observables) + +n_samples = 1000 + +samps = sample(FS2000_filterfree, NUTS(), n_samples, progress = true)#, init_params = sol) + +symbol_to_int(s) = parse(Int, string(s)[9:end-1]) +Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:m.timings.nExo*size(data,2)], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) +tmp = Turing.describe(Ο΅_chain) +Ο΅_mean = tmp[1][:, 2] +Ο΅_mean = reshape(Ο΅_mean, m.timings.nExo, Integer(size(Ο΅_mean,1)/m.timings.nExo)) +Ο΅_std = tmp[1][:, 3] +Ο΅_std = reshape(Ο΅_std, m.timings.nExo, Integer(size(Ο΅_std,1)/m.timings.nExo)) + +sum(abs,Ο΅_mean[1,end-20:end]-collect(filtered_errors[1,end-20:end]))<10^-4 diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index 61047fddf..e02e50905 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -67,7 +67,7 @@ get_irf(RBC_CME, algorithm = :third_order) T = timings([:R, :y], [:Pi, :c], [:k, :z_delta], [:A], [:A, :Pi, :c], [:A, :k, :z_delta], [:A, :Pi, :c, :k, :z_delta], [:A], [:k, :z_delta], [:A], [:delta_eps, :eps_z], [:A, :Pi, :R, :c, :k, :y, :z_delta], Symbol[], Symbol[], 2, 1, 3, 3, 5, 7, 2, [3, 6], [1, 2, 4, 5, 7], [1, 2, 4], [2, 3], [1, 5, 7], [1], [1], [5, 7], [5, 6, 1, 7, 3, 2, 4], [3, 4, 5, 1, 2]) -first_order_solution = calculate_first_order_solution(βˆ‡β‚; T = T, explosive = false)# |> Matrix{Float32} +first_order_solution, solved = calculate_first_order_solution(βˆ‡β‚; T = T, explosive = false)# |> Matrix{Float32} second_order_solution = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, @@ -246,99 +246,93 @@ end end - iirrff = irf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff = irf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff[4,1,:],[ -0.00036685520477089503 0.0021720718769730014],rtol = eps(Float32)) - ggiirrff = girf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + ggiirrff = girf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff[4,1,:],ggiirrff[4,1,:],rtol = eps(Float32)) SSS_delta = RBC_CME.solution.non_stochastic_steady_state[1:length(RBC_CME.var)] - RBC_CME.solution.perturbation.second_order.stochastic_steady_state - ggiirrff2 = girf(second_order_state_update, SSS_delta, zeros(T.nVars), T, draws = 1000,warmup_periods = 100) + ggiirrff2 = girf(second_order_state_update, SSS_delta, zeros(T.nVars), false, T, draws = 1000,warmup_periods = 100) @test isapprox(ggiirrff2[4,1,:],[-0.0003668849861768406 0.0021711333455274096],rtol = 1e-3) - iirrff2 = irf(second_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff2 = irf(second_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff2[4,1,:],[-0.0004547347878067665, 0.0020831426377533636],rtol = 1e-6) SSS_delta = RBC_CME.solution.non_stochastic_steady_state[1:length(RBC_CME.var)] - RBC_CME.solution.perturbation.third_order.stochastic_steady_state - ggiirrff3 = girf(third_order_state_update, SSS_delta, zeros(T.nVars), T,draws = 1000,warmup_periods = 100) + ggiirrff3 = girf(third_order_state_update, SSS_delta, zeros(T.nVars), false, T,draws = 1000,warmup_periods = 100) @test isapprox(ggiirrff3[4,1,:],[ -0.00036686142588429404 0.002171120660323429],rtol = 1e-3) - iirrff3 = irf(third_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff3 = irf(third_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff3[4,1,:],[-0.00045473149068020854, 0.002083198241302615], rtol = 1e-6) end - @testset verbose = true "NSSS and std derivatives" begin # derivatives of paramteres wrt standard deviations - stdev_deriv = ForwardDiff.jacobian(x -> get_moments(RBC_CME, x)[2], Float64.(RBC_CME.parameter_values)) + stdev_deriv = ForwardDiff.jacobian(x -> get_statistics(RBC_CME, x, parameters = RBC_CME.parameters, standard_deviation = RBC_CME.var)[1], RBC_CME.parameter_values) + stdev_deriv[9] @test isapprox(stdev_deriv[5,6],1.3135107627695757, rtol = 1e-6) - # derivatives of paramteres wrt non stochastic steady state - nsss_deriv = ForwardDiff.jacobian(x -> get_moments(RBC_CME, x)[1], Float64.(RBC_CME.parameter_values)) + nsss_deriv = ForwardDiff.jacobian(x -> get_statistics(RBC_CME, x, parameters = RBC_CME.parameters, non_stochastic_steady_state = RBC_CME.var)[1], RBC_CME.parameter_values) @test isapprox(nsss_deriv[4,1],3.296074644820076, rtol = 1e-6) end @testset verbose = true "Method of moments" begin # Method of moments: with varying steady states and derivatives of steady state numerical solved_vars - sol = Optim.optimize(x -> sum(abs2, get_moments(RBC_CME, vcat(x, RBC_CME.parameter_values[2:end]))[2][[5]] - [.21]), + sol = Optim.optimize(x -> sum(abs2, get_statistics(RBC_CME, x, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] - [.21]), [0], [1], [.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[2:end]))[2][5],.21,rtol = 1e-6) - - + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] ,[.21], atol = 1e-6) # multiple parameter inputs and targets - sol = Optim.optimize(x -> sum(abs2,get_moments(RBC_CME, vcat(x[1], RBC_CME.parameter_values[2:end-1],x[2]))[2][[2,5]] - [.0008,.21]), + sol = Optim.optimize(x -> sum(abs2,get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1] - [.0008,.21]), [0,0], [1,1], [.006,.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_moments(RBC_CME, vcat(sol.minimizer[1],RBC_CME.parameter_values[2:end-1],sol.minimizer[2]))[2][[2,5]],[.0008,.21],rtol=1e-6) - + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1], [.0008,.21], atol=1e-6) # function combining targets for SS and St.Dev. - function get_variances_optim(x,p) - out = get_moments(RBC_CME, vcat(x,p)) - sum(abs2,[out[1][6] - 1.45, out[2][5] - .2]) + function get_variances_optim(x) + out = get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.45, out[2][1] - .2]) end - out = get_variances_optim([.157,.999],RBC_CME.parameter_values[3:end]) + out = get_variances_optim([.157,.999]) - out = get_moments(RBC_CME, vcat([.157,.999],RBC_CME.parameter_values[3:end])) - sum(abs2,[out[1][6] - 1.4, out[2][5] - .21]) + out = get_statistics(RBC_CME, [.157,.999], parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.4, out[2][1] - .21]) - sol = Optim.optimize(x -> get_variances_optim(x,RBC_CME.parameter_values[3:end]), + sol = Optim.optimize(x -> get_variances_optim(x), [0,0.95], [1,1], [.16, .999], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox([get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[3:end]))[1][6] - get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[3:end]))[2][5]],[1.45,.2],rtol = 1e-6) + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]),[[1.45],[.2]],atol = 1e-6) # function combining targets for SS, St.Dev., and parameter - function get_variances_optim2(x,p) - out = get_moments(RBC_CME, vcat(x,p)) - sum(abs2,[out[1][6] - 1.45, out[2][5] - .2, x[3] - .02]) + function get_variances_optim2(x) + out = get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:3], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.45, out[2][1] - .2, x[3] - .02]) end - out = get_variances_optim2([.157,.999,.022],RBC_CME.parameter_values[4:end]) + out = get_variances_optim2([.157,.999,.022]) - sol = Optim.optimize(x -> get_variances_optim2(x, RBC_CME.parameter_values[4:end]), + sol = Optim.optimize(x -> get_variances_optim2(x), [0,0.95,0], [1,1,1], [.16, .999,.022], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox([get_moments(RBC_CME, vcat(sol.minimizer, RBC_CME.parameter_values[4:end]))[1][6] - get_moments(RBC_CME, vcat(sol.minimizer, RBC_CME.parameter_values[4:end]))[2][5] - sol.minimizer[3]],[1.45,.2,.02],rtol = 1e-6) + @test isapprox([get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:3], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sol.minimizer[3]],[[1.45],[.2],.02],atol = 1e-6) end RBC_CME = nothing @@ -394,7 +388,7 @@ RBC_CME = nothing - data = simulate(RBC_CME, levels = true)[:,:,1] + data = simulate(RBC_CME)[:,:,1] observables = [:c,:k] @test isapprox(420.25039827148197,calculate_kalman_filter_loglikelihood(RBC_CME,data(observables),observables),rtol = 1e-5)