Skip to content

Commit

Permalink
Merge pull request #1396 from MridulS/upgrade_ruff
Browse files Browse the repository at this point in the history
Simplify pre-commit usage
  • Loading branch information
mnwhite authored Mar 11, 2024
2 parents de61ed1 + bca2586 commit bbb07a5
Show file tree
Hide file tree
Showing 47 changed files with 282 additions and 285 deletions.
44 changes: 6 additions & 38 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,49 +2,17 @@ exclude: Documentation/example_notebooks/

repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.4
rev: v0.3.2
hooks:
- id: ruff
types_or: [jupyter]
types_or: [ python, pyi, jupyter ]
args:
- --fix
- id: ruff-format
args: [--check]
types_or: [jupyter]

- repo: https://github.com/psf/black
rev: 23.7.0
hooks:
- id: black
exclude: ^examples/

- repo: https://github.com/asottile/pyupgrade
rev: v3.10.1
hooks:
- id: pyupgrade
args: ["--py38-plus"]
exclude: ^examples/

- repo: https://github.com/asottile/blacken-docs
rev: 1.15.0
hooks:
- id: blacken-docs
exclude: ^examples/

- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black", "--filter-files", "--skip", "__init__.py"]
exclude: ^examples/

- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.1
hooks:
- id: prettier
exclude: ^examples/
types_or: [ python, pyi, jupyter ]

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.5.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsAggShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
basic solver. Also includes a subclass of Market called CobbDouglas economy,
used for solving "macroeconomic" models with aggregate shocks.
"""

from copy import deepcopy

import numpy as np
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsGenIncProcessModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
ConsIndShockModel by explicitly tracking persistent income as a state variable,
and allows (log) persistent income to follow an AR1 process rather than random walk.
"""

import numpy as np

from HARK import AgentType, make_one_period_oo_solver
Expand Down
8 changes: 2 additions & 6 deletions HARK/ConsumptionSaving/ConsIndShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -2568,9 +2568,7 @@ def calc_transition_matrix(self, shk_dstn=None):
if not hasattr(shk_dstn, "pmv"):
shk_dstn = self.IncShkDstn

self.cPol_Grid = (
[]
) # List of consumption policy grids for each period in T_cycle
self.cPol_Grid = [] # List of consumption policy grids for each period in T_cycle
self.aPol_Grid = [] # List of asset policy grids for each period in T_cycle
self.tran_matrix = [] # List of transition matrices

Expand Down Expand Up @@ -2951,9 +2949,7 @@ def J_from_F(F):
else:
peturbed_list = [getattr(self, shk_param) + dx] + (
params["T_cycle"] - 1
) * [
getattr(self, shk_param)
] # Sequence of interest rates the agent
) * [getattr(self, shk_param)] # Sequence of interest rates the agent

setattr(ZerothColAgent, shk_param, peturbed_list) # Set attribute to agent

Expand Down
4 changes: 3 additions & 1 deletion HARK/ConsumptionSaving/ConsLabeledModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -897,7 +897,9 @@ class ConsRiskyAssetLabeledSolver(ConsIndShockLabeledSolver):
"""

solution_next: ConsumerSolutionLabeled # solution to next period's problem
ShockDstn: DiscreteDistributionLabeled # distribution of shocks to income and returns
ShockDstn: (
DiscreteDistributionLabeled # distribution of shocks to income and returns
)
LivPrb: float # survival probability
DiscFac: float # intertemporal discount factor
CRRA: float # coefficient of relative risk aversion
Expand Down
20 changes: 10 additions & 10 deletions HARK/ConsumptionSaving/ConsLaborModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
productivity shocks. Agents choose their quantities of labor and consumption after
observing both of these shocks, so the transitory shock is a state variable.
"""

import sys
from copy import copy

Expand Down Expand Up @@ -343,7 +344,6 @@ def uPinv(X):


class LaborIntMargConsumerType(IndShockConsumerType):

"""
A class representing agents who make a decision each period about how much
to consume vs save and how much labor to supply (as a fraction of their time).
Expand Down Expand Up @@ -737,13 +737,13 @@ def plot_LbrFunc(self, t, bMin=None, bMax=None, ShkSet=None):
init_labor_intensive["LbrCostCoeffs"] = [-1.0]
init_labor_intensive["WageRte"] = [1.0]
init_labor_intensive["IncUnemp"] = 0.0
init_labor_intensive[
"TranShkCount"
] = 15 # Crank up permanent shock count - Number of points in discrete approximation to transitory income shocks
init_labor_intensive["TranShkCount"] = (
15 # Crank up permanent shock count - Number of points in discrete approximation to transitory income shocks
)
init_labor_intensive["PermShkCount"] = 16 # Crank up permanent shock count
init_labor_intensive[
"aXtraCount"
] = 200 # May be important to have a larger number of gridpoints (than 48 initially)
init_labor_intensive["aXtraCount"] = (
200 # May be important to have a larger number of gridpoints (than 48 initially)
)
init_labor_intensive["aXtraMax"] = 80.0
init_labor_intensive["BoroCnstArt"] = None

Expand Down Expand Up @@ -791,6 +791,6 @@ def plot_LbrFunc(self, t, bMin=None, bMax=None, ShkSet=None):
init_labor_lifecycle["LbrCostCoeffs"] = np.array([-2.0, 0.4])
init_labor_lifecycle["T_cycle"] = 10
# init_labor_lifecycle['T_retire'] = 7 # IndexError at line 774 in interpolation.py.
init_labor_lifecycle[
"T_age"
] = 11 # Make sure that old people die at terminal age and don't turn into newborns!
init_labor_lifecycle["T_age"] = (
11 # Make sure that old people die at terminal age and don't turn into newborns!
)
8 changes: 2 additions & 6 deletions HARK/ConsumptionSaving/ConsMarkovModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,9 +470,7 @@ def calc_EndOfPrdvP(self):
np.logical_and(self.possible_transitions[:, j], which_states)
): # only consider a future state if one of the relevant states could transition to it
EndOfPrdvP_all[j, :] = self.EndOfPrdvPfunc_list[j](aGrid)
if (
self.CubicBool
): # Add conditional end-of-period (marginal) marginal value to the arrays
if self.CubicBool: # Add conditional end-of-period (marginal) marginal value to the arrays
EndOfPrdvPP_all[j, :] = self.EndOfPrdvPfunc_list[j].derivativeX(
aGrid
)
Expand Down Expand Up @@ -606,9 +604,7 @@ def make_solution(self, cNrm, mNrm):
solution_cond = ConsumerSolution(
cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow
)
if (
self.CubicBool
): # Add the state-conditional marginal marginal value function (if desired)
if self.CubicBool: # Add the state-conditional marginal marginal value function (if desired)
solution_cond = self.add_vPPfunc(solution_cond)

# Add the current-state-conditional solution to the overall period solution
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsMedModel.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Consumption-saving models that also include medical spending.
"""

from copy import deepcopy

import numpy as np
Expand Down
2 changes: 1 addition & 1 deletion HARK/ConsumptionSaving/ConsPortfolioFrameModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def birth_pLvlNow(self, N):
{
"mean": init_portfolio["RiskyAvg"],
"std": init_portfolio["RiskyStd"],
}
},
# seed=self.RNG.integers(0, 2 ** 31 - 1) : TODO: Seed logic
).discretize(init_portfolio["RiskyCount"], method="equiprobable"),
aggregate=True,
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsPortfolioModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
agents who must allocate their resources among consumption, saving in a risk-free
asset (with a low return), and saving in a risky asset (with higher average return).
"""

from copy import deepcopy

import numpy as np
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsPrefShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
2) A combination of (1) and ConsKinkedR, demonstrating how to construct a new model
by inheriting from multiple classes.
"""

import numpy as np

from HARK import make_one_period_oo_solver
Expand Down
3 changes: 2 additions & 1 deletion HARK/ConsumptionSaving/ConsRepAgentModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
take a heterogeneous agents approach. In RA models, all attributes are either
time invariant or exist on a short cycle; models must be infinite horizon.
"""

import numpy as np

from HARK.ConsumptionSaving.ConsIndShockModel import (
Expand All @@ -12,7 +13,7 @@
init_idiosyncratic_shocks,
)
from HARK.ConsumptionSaving.ConsMarkovModel import MarkovConsumerType
from HARK.distribution import MarkovProcess, Uniform
from HARK.distribution import MarkovProcess
from HARK.interpolation import LinearInterp, MargValueFuncCRRA

__all__ = ["RepAgentConsumerType", "RepAgentMarkovConsumerType"]
Expand Down
1 change: 1 addition & 0 deletions HARK/ConsumptionSaving/ConsRiskyAssetModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
simulation methods. It is meant as a container of methods for dealing with
risky assets that will be useful to models what will inherit from it.
"""

from dataclasses import dataclass

import numpy as np
Expand Down
9 changes: 5 additions & 4 deletions HARK/ConsumptionSaving/ConsRiskyContribModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
}
"""

from copy import deepcopy

import numpy as np
Expand Down Expand Up @@ -425,9 +426,9 @@ def sim_one_period(self):
# Advance time for all agents
self.t_age = self.t_age + 1 # Age all consumers by one period
self.t_cycle = self.t_cycle + 1 # Age all consumers within their cycle
self.t_cycle[
self.t_cycle == self.T_cycle
] = 0 # Resetting to zero for those who have reached the end
self.t_cycle[self.t_cycle == self.T_cycle] = (
0 # Resetting to zero for those who have reached the end
)

def get_states_Reb(self):
"""
Expand Down Expand Up @@ -513,7 +514,7 @@ def get_states_Sha(self):

# Post-states are assets after rebalancing

if not "tau" in self.time_vary:
if "tau" not in self.time_vary:
mNrmTilde, nNrmTilde = rebalance_assets(
self.controls["dfrac"],
self.state_now["mNrm"],
Expand Down
7 changes: 4 additions & 3 deletions HARK/ConsumptionSaving/TractableBufferStockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
Despite the non-standard solution method, the iterative process can be embedded
in the HARK framework, as shown below.
"""

from copy import copy

import numpy as np
Expand Down Expand Up @@ -621,9 +622,9 @@ def sim_birth(self, which_agents):
self.shocks["eStateNow"][which_agents] = 1.0
# How many periods since each agent was born
self.t_age[which_agents] = 0
self.t_cycle[
which_agents
] = 0 # Which period of the cycle each agent is currently in
self.t_cycle[which_agents] = (
0 # Which period of the cycle each agent is currently in
)
return None

def sim_death(self):
Expand Down
5 changes: 4 additions & 1 deletion HARK/ConsumptionSaving/tests/test_ConsAggShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ def setUp(self):

# Make agents heterogeneous in their discount factor
self.agents = distribute_params(
agent, "DiscFac", 3, Uniform(bot=0.90, top=0.94) # Impatient agents
agent,
"DiscFac",
3,
Uniform(bot=0.90, top=0.94), # Impatient agents
)

# Make an economy with those agents living in it
Expand Down
6 changes: 3 additions & 3 deletions HARK/ConsumptionSaving/tests/test_ConsMarkovModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ def setUp(self):

init_serial_unemployment = copy(init_idiosyncratic_shocks)
init_serial_unemployment["MrkvArray"] = [MrkvArray]
init_serial_unemployment[
"UnempPrb"
] = 0.0 # to make income distribution when employed
init_serial_unemployment["UnempPrb"] = (
0.0 # to make income distribution when employed
)
init_serial_unemployment["global_markov"] = False
self.model = MarkovConsumerType(**init_serial_unemployment)
self.model.cycles = 0
Expand Down
10 changes: 4 additions & 6 deletions HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,9 @@ def test_GICRawFails(self):
GICRaw_fail_dictionary = dict(self.base_params)
GICRaw_fail_dictionary["Rfree"] = 1.08
GICRaw_fail_dictionary["PermGroFac"] = [1.00]
GICRaw_fail_dictionary[
"cycles"
] = 0 # cycles=0 makes this an infinite horizon consumer
GICRaw_fail_dictionary["cycles"] = (
0 # cycles=0 makes this an infinite horizon consumer
)

GICRawFailExample = IndShockConsumerType(**GICRaw_fail_dictionary)

Expand Down Expand Up @@ -896,9 +896,7 @@ def test_calc_tran_matrix(self):
asset = example1.aPol_Grid # Normalized Asset Policy Grid

example1.calc_ergodic_dist()
vecDstn = (
example1.vec_erg_dstn
) # Distribution of market resources and permanent income as a vector (m*p)x1 vector where
vecDstn = example1.vec_erg_dstn # Distribution of market resources and permanent income as a vector (m*p)x1 vector where

# Compute Aggregate Consumption and Aggregate Assets
gridc = np.zeros((len(c), len(p)))
Expand Down
6 changes: 3 additions & 3 deletions HARK/ConsumptionSaving/tests/test_IndShockConsumerTypeFast.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,9 @@ def test_GICRawFails(self):
GICRaw_fail_dictionary = dict(self.base_params)
GICRaw_fail_dictionary["Rfree"] = 1.08
GICRaw_fail_dictionary["PermGroFac"] = [1.00]
GICRaw_fail_dictionary[
"cycles"
] = 0 # cycles=0 makes this an infinite horizon consumer
GICRaw_fail_dictionary["cycles"] = (
0 # cycles=0 makes this an infinite horizon consumer
)

GICRawFailExample = IndShockConsumerTypeFast(**GICRaw_fail_dictionary)

Expand Down
6 changes: 4 additions & 2 deletions HARK/ConsumptionSaving/tests/test_SmallOpenEconomy.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import copy
import unittest

import numpy as np

from HARK import distribute_params
from HARK.ConsumptionSaving.ConsAggShockModel import (
Expand All @@ -20,7 +19,10 @@ def test_small_open(self):

# Make agents heterogeneous in their discount factor
agents = distribute_params(
agent, "DiscFac", 3, Uniform(bot=0.90, top=0.94) # Impatient agents
agent,
"DiscFac",
3,
Uniform(bot=0.90, top=0.94), # Impatient agents
)

# Make an economy with those agents living in it
Expand Down
8 changes: 3 additions & 5 deletions HARK/ConsumptionSaving/tests/test_modelInits.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
This file tests whether HARK's models are initialized correctly.
"""


# Bring in modules we need
import unittest
from copy import copy
Expand All @@ -18,7 +17,6 @@
init_lifecycle,
)
from HARK.ConsumptionSaving.ConsMarkovModel import MarkovConsumerType
from HARK.utilities import plot_funcs, plot_funcs_der


class testInitialization(unittest.TestCase):
Expand Down Expand Up @@ -92,9 +90,9 @@ def test_MarkovConsumerType(self):
# Make a consumer with serially correlated unemployment, subject to boom and bust cycles
init_serial_unemployment = copy(init_idiosyncratic_shocks)
init_serial_unemployment["MrkvArray"] = [MrkvArray]
init_serial_unemployment[
"UnempPrb"
] = 0.0 # to make income distribution when employed
init_serial_unemployment["UnempPrb"] = (
0.0 # to make income distribution when employed
)
init_serial_unemployment["global_markov"] = False
SerialUnemploymentExample = MarkovConsumerType(**init_serial_unemployment)
except:
Expand Down
Loading

0 comments on commit bbb07a5

Please sign in to comment.