Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Specify bounds on multivariate distributions #1479

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
1 change: 1 addition & 0 deletions Documentation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Release Date: TBD
- Fixes bug in `AgentPopulation` that caused discretization of distributions to not work. [1275](https://github.com/econ-ark/HARK/pull/1275)
- Adds support for distributions, booleans, and callables as parameters in the `Parameters` class. [1387](https://github.com/econ-ark/HARK/pull/1387)
- Removes a specific way of accounting for ``employment'' in the idiosyncratic-shocks income process. [1473](https://github.com/econ-ark/HARK/pull/1473)
- Improved tracking of the bounds of support for distributions, and (some) solvers now respect those bounds when computing the "worst outcome". [1479](https://github.com/econ-ark/HARK/pull/1479)

### 0.15.1

Expand Down
37 changes: 32 additions & 5 deletions HARK/Calibration/Income/IncomeProcesses.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,27 @@ class LognormPermIncShk(DiscreteDistribution):

def __init__(self, sigma, n_approx, neutral_measure=False, seed=0):
# Construct an auxiliary discretized normal
logn_approx = MeanOneLogNormal(sigma).discretize(
lognormal_dstn = MeanOneLogNormal(sigma)
logn_approx = lognormal_dstn.discretize(
n_approx if sigma > 0.0 else 1, method="equiprobable", tail_N=0
)

limit = {
"dist": lognormal_dstn,
"method": "equiprobable",
"N": n_approx,
"endpoints": False,
"infimum": logn_approx.limit["infimum"],
"supremum": logn_approx.limit["supremum"],
}

# Change the pmv if necessary
if neutral_measure:
logn_approx.pmv = (logn_approx.atoms * logn_approx.pmv).flatten()

super().__init__(pmv=logn_approx.pmv, atoms=logn_approx.atoms, seed=seed)
super().__init__(
pmv=logn_approx.pmv, atoms=logn_approx.atoms, limit=limit, seed=seed
)


class MixtureTranIncShk(DiscreteDistribution):
Expand Down Expand Up @@ -111,15 +124,22 @@ class MixtureTranIncShk(DiscreteDistribution):
"""

def __init__(self, sigma, UnempPrb, IncUnemp, n_approx, seed=0):
dstn_approx = MeanOneLogNormal(sigma).discretize(
dstn_orig = MeanOneLogNormal(sigma)
dstn_approx = dstn_orig.discretize(
n_approx if sigma > 0.0 else 1, method="equiprobable", tail_N=0
)

if UnempPrb > 0.0:
dstn_approx = add_discrete_outcome_constant_mean(
dstn_approx, p=UnempPrb, x=IncUnemp
)

super().__init__(pmv=dstn_approx.pmv, atoms=dstn_approx.atoms, seed=seed)
super().__init__(
pmv=dstn_approx.pmv,
atoms=dstn_approx.atoms,
limit=dstn_approx.limit,
seed=seed,
)


class MixtureTranIncShk_HANK(DiscreteDistribution):
Expand Down Expand Up @@ -174,7 +194,12 @@ def __init__(
# Rescale the transitory shock values to account for new features
TranShkMean_temp = (1.0 - tax_rate) * labor * wage
dstn_approx.atoms *= TranShkMean_temp
super().__init__(pmv=dstn_approx.pmv, atoms=dstn_approx.atoms, seed=seed)
super().__init__(
pmv=dstn_approx.pmv,
atoms=dstn_approx.atoms,
limit=dstn_approx.limit,
seed=seed,
)


class BufferStockIncShkDstn(DiscreteDistributionLabeled):
Expand Down Expand Up @@ -240,6 +265,7 @@ def __init__(
var_names=["PermShk", "TranShk"],
pmv=joint_dstn.pmv,
atoms=joint_dstn.atoms,
limit=joint_dstn.limit,
seed=seed,
)

Expand Down Expand Up @@ -319,6 +345,7 @@ def __init__(
var_names=["PermShk", "TranShk"],
pmv=joint_dstn.pmv,
atoms=joint_dstn.atoms,
limit=joint_dstn.limit,
seed=seed,
)

Expand Down
34 changes: 26 additions & 8 deletions HARK/ConsumptionSaving/ConsIndShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,31 +395,45 @@ def solve_one_period_ConsPF(
return solution_now


def calc_worst_inc_prob(inc_shk_dstn):
def calc_worst_inc_prob(inc_shk_dstn, use_infimum=True):
"""Calculate the probability of the worst income shock.

Args:
inc_shk_dstn (DiscreteDistribution): Distribution of shocks to income.
use_infimum (bool): Indicator for whether to try to use the infimum of the limiting (true) income distribution.
"""
probs = inc_shk_dstn.pmv
perm, tran = inc_shk_dstn.atoms
income = perm * tran
worst_inc = np.min(income)
if use_infimum:
worst_inc = np.prod(inc_shk_dstn.limit["infimum"])
else:
worst_inc = np.min(income)
return np.sum(probs[income == worst_inc])


def calc_boro_const_nat(m_nrm_min_next, inc_shk_dstn, rfree, perm_gro_fac):
def calc_boro_const_nat(
m_nrm_min_next, inc_shk_dstn, rfree, perm_gro_fac, use_infimum=True
):
"""Calculate the natural borrowing constraint.

Args:
m_nrm_min_next (float): Minimum normalized market resources next period.
inc_shk_dstn (DiscreteDstn): Distribution of shocks to income.
rfree (float): Risk free interest factor.
perm_gro_fac (float): Permanent income growth factor.
use_infimum (bool): Indicator for whether to use the infimum of the limiting (true) income distribution
"""
perm, tran = inc_shk_dstn.atoms
temp_fac = (perm_gro_fac * np.min(perm)) / rfree
return (m_nrm_min_next - np.min(tran)) * temp_fac
if use_infimum:
perm_min, tran_min = inc_shk_dstn.limit["infimum"]
else:
perm, tran = inc_shk_dstn.atoms
perm_min = np.min(perm)
tran_min = np.min(tran)

temp_fac = (perm_gro_fac * perm_min) / rfree
boro_cnst_nat = (m_nrm_min_next - tran_min) * temp_fac
return boro_cnst_nat


def calc_m_nrm_min(boro_const_art, boro_const_nat):
Expand Down Expand Up @@ -823,7 +837,7 @@ def solve_one_period_ConsKinkedR(
DiscFacEff = DiscFac * LivPrb # "effective" discount factor

# Calculate the probability that we get the worst possible income draw
WorstIncPrb = calc_worst_inc_prob(IncShkDstn)
WorstIncPrb = calc_worst_inc_prob(IncShkDstn, use_infimum=False)
# WorstIncPrb is the "Weierstrass p" concept: the odds we get the WORST thing
Ex_IncNext = expected(lambda x: x["PermShk"] * x["TranShk"], IncShkDstn)
hNrmNow = calc_human_wealth(solution_next.hNrm, PermGroFac, Rsave, Ex_IncNext)
Expand All @@ -835,7 +849,11 @@ def solve_one_period_ConsKinkedR(

# Calculate the minimum allowable value of money resources in this period
BoroCnstNat = calc_boro_const_nat(
solution_next.mNrmMin, IncShkDstn, Rboro, PermGroFac
solution_next.mNrmMin,
IncShkDstn,
Rboro,
PermGroFac,
use_infimum=False,
)
# Set the minimum allowable (normalized) market resources based on the natural
# and artificial borrowing constraints
Expand Down
4 changes: 2 additions & 2 deletions HARK/ConsumptionSaving/tests/test_ConsNewKeynesianModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def test_calc_tran_matrix(self):
AggC = np.dot(gridc.flatten(), vecDstn) # Aggregate Consumption
AggA = np.dot(grida.flatten(), vecDstn) # Aggregate Assets

self.assertAlmostEqual(AggA[0], 0.82984, places=4)
self.assertAlmostEqual(AggA[0], 0.82960, places=4)
self.assertAlmostEqual(AggC[0], 1.00780, places=4)


Expand All @@ -52,6 +52,6 @@ def test_calc_jacobian(self):
Agent.compute_steady_state()
CJAC_Perm, AJAC_Perm = Agent.calc_jacobian("PermShkStd", 50)

self.assertAlmostEqual(CJAC_Perm.T[30][29], -0.10503, places=HARK_PRECISION)
self.assertAlmostEqual(CJAC_Perm.T[30][29], -0.10508, places=HARK_PRECISION)
self.assertAlmostEqual(CJAC_Perm.T[30][30], 0.10316, places=HARK_PRECISION)
self.assertAlmostEqual(CJAC_Perm.T[30][31], 0.09059, places=HARK_PRECISION)
26 changes: 14 additions & 12 deletions HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,17 @@ def test_ConsIndShockSolverBasic(self):

self.assertAlmostEqual(
LifecycleExample.solution[0].cFunc(1).tolist(),
0.75062,
0.75074,
places=HARK_PRECISION,
)
self.assertAlmostEqual(
LifecycleExample.solution[1].cFunc(1).tolist(),
0.75864,
0.75876,
places=HARK_PRECISION,
)
self.assertAlmostEqual(
LifecycleExample.solution[2].cFunc(1).tolist(),
0.76812,
0.76824,
places=HARK_PRECISION,
)

Expand Down Expand Up @@ -223,13 +223,15 @@ def test_infinite_horizon(self):
IndShockExample.solve()

self.assertAlmostEqual(
IndShockExample.solution[0].mNrmStE, 1.54882, places=HARK_PRECISION
)
self.assertAlmostEqual(
IndShockExample.solution[0].cFunc.functions[0].x_list[0],
-0.25018,
places=HARK_PRECISION,
IndShockExample.solution[0].mNrmStE, 1.54765, places=HARK_PRECISION
)
# self.assertAlmostEqual(
# IndShockExample.solution[0].cFunc.functions[0].x_list[0],
# -0.25018,
# places=HARK_PRECISION,
# )
# This test is commented out because it was trivialized by revisions to the "worst income shock" code.
# The bottom x value of the unconstrained consumption function will definitely be zero, so this is pointless.

IndShockExample.track_vars = ["aNrm", "mNrm", "cNrm", "pLvl"]
IndShockExample.initialize_sim()
Expand Down Expand Up @@ -297,7 +299,7 @@ def test_lifecyle(self):

self.assertAlmostEqual(
LifecycleExample.solution[5].cFunc(3).tolist(),
2.12998,
2.13004,
places=HARK_PRECISION,
)

Expand Down Expand Up @@ -371,15 +373,15 @@ def test_cyclical(self):

self.assertAlmostEqual(
CyclicalExample.solution[3].cFunc(3).tolist(),
1.59584,
1.59597,
places=HARK_PRECISION,
)

CyclicalExample.initialize_sim()
CyclicalExample.simulate()

self.assertAlmostEqual(
CyclicalExample.state_now["aLvl"][1], 3.32431, places=HARK_PRECISION
CyclicalExample.state_now["aLvl"][1], 3.31950, places=HARK_PRECISION
)


Expand Down
4 changes: 2 additions & 2 deletions HARK/ConsumptionSaving/tests/test_modelcomparisons.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def setUp(self):
test_dictionary["UnempPrb"] = 0.0
test_dictionary["T_cycle"] = 1
test_dictionary["T_retire"] = 0
test_dictionary["BoroCnstArt"] = None
test_dictionary["BoroCnstArt"] = 0.0

InfiniteType = IndShockConsumerType(**test_dictionary)
InfiniteType.cycles = 0
Expand Down Expand Up @@ -79,7 +79,7 @@ def diffFunc(m):
m
) - self.InfiniteType.cFunc[0](m)

points = np.arange(0.5, mNrmMinInf + aXtraMin, mNrmMinInf + aXtraMax)
points = np.arange(0.5, mNrmMinInf + aXtraMax, mNrmMinInf + aXtraMin)
difference = diffFunc(points)
max_difference = np.max(np.abs(difference))

Expand Down
Loading
Loading