diff --git a/README.md b/README.md index 8d5303ca..773a99a4 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ Grid-based demographic prisoner's dilemma model, demonstrating how simple rules ### [Epstein Civil Violence Model](https://github.com/projectmesa/mesa-examples/tree/main/examples/epstein_civil_violence) -Joshua Epstein's [model](http://www.uvm.edu/~pdodds/files/papers/others/2002/epstein2002a.pdf) of how a decentralized uprising can be suppressed or reach a critical mass of support. +Joshua Epstein's [model](http://www.uvm.edu/~pdodds/files/papers/others/2002/epstein2002a.pdf) of how a decentralized uprising can be suppressed or reach a critical mass of support. This model simulates the interactions between citizens and law enforcement in a grid space, considering factors such as citizen density, cop density, vision, legitimacy, and arrest probability. ### [Forest Fire Model](https://github.com/projectmesa/mesa-examples/tree/main/examples/forest_fire) @@ -85,7 +85,7 @@ This is Epstein & Axtell's Sugarscape model with Traders, a detailed description ### [Wolf-Sheep Predation Model](https://github.com/projectmesa/mesa-examples/tree/main/examples/wolf_sheep) -Implementation of an ecological model of predation and reproduction, based on the NetLogo [Wolf Sheep Predation](http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation) model. +Implementation of an ecological model of predation and reproduction, based on the NetLogo [Wolf Sheep Predation](http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation) model. This model simulates the dynamics of predator-prey interactions between wolves and sheep, including factors such as energy expenditure, reproduction, and grass regrowth. ## Continuous Space Examples diff --git a/examples/bank_reserves/Readme.md b/examples/bank_reserves/Readme.md index 27570d20..c38c7bec 100644 --- a/examples/bank_reserves/Readme.md +++ b/examples/bank_reserves/Readme.md @@ -8,7 +8,7 @@ The model demonstrates the following Mesa features: - MultiGrid for creating shareable space for agents - DataCollector for collecting data on individual model runs - Slider for adjusting initial model parameters - - ModularServer for visualization of agent interaction + - Solara and ABMSimulator for visualization of agent interaction and simulation. - Agent object inheritance - Using a BatchRunner to collect data on multiple combinations of model parameters @@ -25,7 +25,7 @@ To install the dependencies use pip and the requirements.txt in this directory. To run the model interactively, use `mesa runserver` in this directory: ``` - $ mesa runserver + $ solara run app.py ``` Then open your browser to [http://127.0.0.1:8521/](http://127.0.0.1:8521/), select the model parameters, press Reset, then Start. @@ -46,9 +46,8 @@ To update the parameters to test other parameter sweeps, edit the list of parame * ``bank_reserves/random_walker.py``: This defines a class that inherits from the Mesa Agent class. The main purpose is to provide a method for agents to move randomly one cell at a time. * ``bank_reserves/agents.py``: Defines the People and Bank classes. * ``bank_reserves/model.py``: Defines the Bank Reserves model and the DataCollector functions. -* ``bank_reserves/server.py``: Sets up the interactive visualization server. -* ``run.py``: Launches a model visualization server. -* ``batch_run.py``: Basically the same as model.py, but includes a Mesa BatchRunner. The result of the batch run will be a .csv file with the data from every step of every run. +* ``bank_reserves/app.py``: Sets up the interactive visualization server and launches it using solara. +* ``bank_reserves/batch_run.py``: Basically the same as model.py, but includes a Mesa BatchRunner. The result of the batch run will be a .csv file with the data from every step of every run. ## Further Reading diff --git a/examples/bank_reserves/bank_reserves/__init__.py b/examples/bank_reserves/bank_reserves/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/bank_reserves/bank_reserves/agents.py b/examples/bank_reserves/bank_reserves/agents.py index 5d81a65c..b80c014d 100644 --- a/examples/bank_reserves/bank_reserves/agents.py +++ b/examples/bank_reserves/bank_reserves/agents.py @@ -1,18 +1,45 @@ -""" -The following code was adapted from the Bank Reserves model included in Netlogo -Model information can be found at: -http://ccl.northwestern.edu/netlogo/models/BankReserves -Accessed on: November 2, 2017 -Author of NetLogo code: - Wilensky, U. (1998). NetLogo Bank Reserves model. - http://ccl.northwestern.edu/netlogo/models/BankReserves. - Center for Connected Learning and Computer-Based Modeling, - Northwestern University, Evanston, IL. -""" - import mesa -from .random_walk import RandomWalker + +class RandomWalker(mesa.Agent): + """ + Class implementing random walker methods in a generalized manner. + Not intended to be used on its own, but to inherit its methods to multiple + other agents. + """ + + grid = None + x = None + y = None + # use a Moore neighborhood + moore = True + + def __init__(self, unique_id, pos, model, moore=True): + """ + grid: The MultiGrid object in which the agent lives. + x: The agent's current x coordinate + y: The agent's current y coordinate + moore: If True, may move in all 8 directions. + Otherwise, only up, down, left, right. + """ + super().__init__(unique_id, model) + self.pos = pos + self.moore = moore + + def random_move(self): + """ + Step one cell in any allowable direction. + """ + # Pick the next cell from the adjacent cells. + next_moves = self.model.grid.get_neighborhood(self.pos, self.moore, True) + # Filter out occupied cells + empty_moves = [ + move for move in next_moves if self.model.grid.is_cell_empty(move) + ] + if empty_moves: + next_move = self.random.choice(empty_moves) + # Now move: + self.model.grid.move_agent(self, next_move) class Bank(mesa.Agent): @@ -42,9 +69,9 @@ def bank_balance(self): # subclass of RandomWalker, which is subclass to Mesa Agent class Person(RandomWalker): - def __init__(self, unique_id, model, moore, bank, rich_threshold): + def __init__(self, unique_id, pos, model, moore, bank, rich_threshold): # init parent class with required parameters - super().__init__(unique_id, model, moore=moore) + super().__init__(unique_id, pos, model, moore=moore) # the amount each person has in savings self.savings = 0 # total loan amount person has outstanding @@ -168,7 +195,7 @@ def take_out_loan(self, amount): outstanding loans""" self.loans += amount self.wallet += amount - # decresae the amount the bank can loan right now + # decrease the amount the bank can loan right now self.bank.bank_to_loan -= amount # increase the bank's outstanding loans self.bank.bank_loans += amount diff --git a/examples/bank_reserves/bank_reserves/app.py b/examples/bank_reserves/bank_reserves/app.py new file mode 100644 index 00000000..a7c407e1 --- /dev/null +++ b/examples/bank_reserves/bank_reserves/app.py @@ -0,0 +1,77 @@ +import solara +from mesa.visualization.solara_viz import SolaraViz, make_text +from mesa.experimental.devs.simulator import ABMSimulator +from .model import BankReserves +from .agents import Person, Bank + + +def agent_portrayal(agent): + if isinstance(agent, Person): + color = "tab:blue" + elif isinstance(agent, Bank): + color = "tab:green" + else: + color = "tab:purple" # Fallback color + + return { + "color": color, + "size": 50, + } + + +def get_rich_poor_ratio(model): + if model.schedule is None: + return "Rich/Poor Ratio: N/A" + rich_count = sum( + isinstance(agent, Person) and agent.savings > model.rich_threshold + for agent in model.schedule.agents + ) + poor_count = sum( + isinstance(agent, Person) and agent.loans > 10 + for agent in model.schedule.agents + ) + ratio = rich_count / poor_count if poor_count > 0 else float("inf") + return f"Rich/Poor Ratio: {ratio:.2f}" + + +# Define the SolaraViz visualization +page = SolaraViz( + model_class=BankReserves, + model_params={ + "height": 20, + "width": 20, + "init_people": 2, + "rich_threshold": 10, + "reserve_percent": 50, + }, + measures=[ + make_text(get_rich_poor_ratio), + ], + name="Bank Reserves Model", + agent_portrayal=agent_portrayal, +) + + +@solara.component +def App(): + solara.Title("Bank Reserves Model") + solara.Markdown("# Bank Reserves Model") + solara.Markdown("This is a visualization of the Bank Reserves Model.") + + # Add color legend + solara.Markdown(""" + ## Color Legend + - Blue: Persons + - Green: Bank + - Purple: Other + """) + + page.show() + + +if __name__ == "__main__": + model = BankReserves(seed=15) + simulator = ABMSimulator() + simulator.setup(model) + simulator.run_for(time_delta=100) + App() diff --git a/examples/bank_reserves/batch_run.py b/examples/bank_reserves/bank_reserves/batch_run.py similarity index 76% rename from examples/bank_reserves/batch_run.py rename to examples/bank_reserves/bank_reserves/batch_run.py index 4a0115bb..853bdc02 100644 --- a/examples/bank_reserves/batch_run.py +++ b/examples/bank_reserves/bank_reserves/batch_run.py @@ -25,74 +25,58 @@ """ import itertools - import mesa import numpy as np import pandas as pd -from bank_reserves.agents import Bank, Person +from .agents import Bank, Person # Start of datacollector functions def get_num_rich_agents(model): """list of rich agents""" - rich_agents = [a for a in model.schedule.agents if a.savings > model.rich_threshold] - # return number of rich agents return len(rich_agents) def get_num_poor_agents(model): """list of poor agents""" - poor_agents = [a for a in model.schedule.agents if a.loans > 10] - # return number of poor agents return len(poor_agents) def get_num_mid_agents(model): """list of middle class agents""" - mid_agents = [ a for a in model.schedule.agents if a.loans < 10 and a.savings < model.rich_threshold ] - # return number of middle class agents return len(mid_agents) def get_total_savings(model): """list of amounts of all agents' savings""" - agent_savings = [a.savings for a in model.schedule.agents] - # return the sum of agents' savings return np.sum(agent_savings) def get_total_wallets(model): """list of amounts of all agents' wallets""" - agent_wallets = [a.wallet for a in model.schedule.agents] - # return the sum of all agents' wallets return np.sum(agent_wallets) def get_total_money(model): """sum of all agents' wallets""" - wallet_money = get_total_wallets(model) - # sum of all agents' savings savings_money = get_total_savings(model) - # return sum of agents' wallets and savings for total money return wallet_money + savings_money def get_total_loans(model): """list of amounts of all agents' loans""" - agent_loans = [a.loans for a in model.schedule.agents] - # return sum of all agents' loans return np.sum(agent_loans) @@ -105,17 +89,10 @@ def track_run(model): class BankReservesModel(mesa.Model): - # id generator to track run number in batch run data id_gen = itertools.count(1) - - # grid height grid_h = 20 - # grid width grid_w = 20 - """init parameters "init_people", "rich_threshold", and "reserve_percent" - are all set via Slider""" - def __init__( self, height=grid_h, @@ -131,10 +108,8 @@ def __init__( self.init_people = init_people self.schedule = mesa.time.RandomActivation(self) self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True) - # rich_threshold is the amount of savings a person needs to be considered "rich" self.rich_threshold = rich_threshold self.reserve_percent = reserve_percent - # see datacollector functions above self.datacollector = mesa.DataCollector( model_reporters={ "Rich": get_num_rich_agents, @@ -147,30 +122,22 @@ def __init__( "Model Params": track_params, "Run": track_run, }, - agent_reporters={"Wealth": "wealth"}, + agent_reporters={"wealth": "wealth"}, ) - # create a single bank for the model self.bank = Bank(1, self, self.reserve_percent) - # create people for the model according to number of people set by user for i in range(self.init_people): - # set x coordinate as a random number within the width of the grid x = self.random.randrange(self.width) - # set y coordinate as a random number within the height of the grid y = self.random.randrange(self.height) p = Person(i, (x, y), self, True, self.bank, self.rich_threshold) - # place the Person object on the grid at coordinates (x, y) self.grid.place_agent(p, (x, y)) - # add the Person object to the model schedule self.schedule.add(p) self.running = True def step(self): - # collect data self.datacollector.collect(self) - # tell all the agents in the model to run their step function self.schedule.step() def run_model(self): @@ -178,20 +145,55 @@ def run_model(self): self.step() -# parameter lists for each parameter to be tested in batch run br_params = { "init_people": [25, 100], "rich_threshold": [5, 10], - "reserve_percent": 5, + "reserve_percent": [5, 10, 20, 50], } +def custom_batch_run( + model_cls, parameters, iterations, max_steps, model_reporters, agent_reporters +): + results = [] + for params in ( + dict(zip(parameters, x)) for x in itertools.product(*parameters.values()) + ): + for i in range(iterations): + model = model_cls(**params) + for step in range(max_steps): + model.step() + model_data = { + var: reporter(model) for var, reporter in model_reporters.items() + } + agent_data = [ + {var: getattr(agent, var) for var in agent_reporters} + for agent in model.schedule.agents + ] + results.append({**model_data, "Agent Data": agent_data}) + return results + + def main(): - # The existing batch run logic here - data = mesa.batch_run( + data = custom_batch_run( BankReservesModel, br_params, + iterations=5, + max_steps=100, + model_reporters={ + "Rich": get_num_rich_agents, + "Poor": get_num_poor_agents, + "Middle Class": get_num_mid_agents, + "Savings": get_total_savings, + "Wallets": get_total_wallets, + "Money": get_total_money, + "Loans": get_total_loans, + "Model Params": track_params, + "Run": track_run, + }, + agent_reporters={"wealth": "wealth"}, ) + br_df = pd.DataFrame(data) br_df.to_csv("BankReservesModel_Data.csv") diff --git a/examples/bank_reserves/bank_reserves/model.py b/examples/bank_reserves/bank_reserves/model.py index 421854fb..68be3ade 100644 --- a/examples/bank_reserves/bank_reserves/model.py +++ b/examples/bank_reserves/bank_reserves/model.py @@ -1,111 +1,64 @@ -""" -The following code was adapted from the Bank Reserves model included in Netlogo -Model information can be found at: -http://ccl.northwestern.edu/netlogo/models/BankReserves -Accessed on: November 2, 2017 -Author of NetLogo code: - Wilensky, U. (1998). NetLogo Bank Reserves model. - http://ccl.northwestern.edu/netlogo/models/BankReserves. - Center for Connected Learning and Computer-Based Modeling, - Northwestern University, Evanston, IL. -""" - import mesa -import numpy as np +from mesa import Model +from mesa.time import RandomActivation +from mesa.space import SingleGrid +from mesa.experimental.devs.simulator import ABMSimulator from .agents import Bank, Person -""" -If you want to perform a parameter sweep, call batch_run.py instead of run.py. -For details see batch_run.py in the same directory as run.py. -""" - # Start of datacollector functions def get_num_rich_agents(model): """return number of rich agents""" - - rich_agents = [a for a in model.schedule.agents if a.savings > model.rich_threshold] + rich_agents = [ + a + for a in model.schedule.agents + if isinstance(a, Person) and a.savings > model.rich_threshold + ] return len(rich_agents) def get_num_poor_agents(model): """return number of poor agents""" - - poor_agents = [a for a in model.schedule.agents if a.loans > 10] + poor_agents = [ + a for a in model.schedule.agents if isinstance(a, Person) and a.loans > 10 + ] return len(poor_agents) def get_num_mid_agents(model): """return number of middle class agents""" - mid_agents = [ a for a in model.schedule.agents - if a.loans < 10 and a.savings < model.rich_threshold + if isinstance(a, Person) and a.loans <= 10 and a.savings <= model.rich_threshold ] return len(mid_agents) def get_total_savings(model): """sum of all agents' savings""" - - agent_savings = [a.savings for a in model.schedule.agents] - # return the sum of agents' savings - return np.sum(agent_savings) + return sum(a.savings for a in model.schedule.agents if isinstance(a, Person)) def get_total_wallets(model): """sum of amounts of all agents' wallets""" - - agent_wallets = [a.wallet for a in model.schedule.agents] - # return the sum of all agents' wallets - return np.sum(agent_wallets) + return sum(a.wallet for a in model.schedule.agents if isinstance(a, Person)) def get_total_money(model): - # sum of all agents' wallets - wallet_money = get_total_wallets(model) - # sum of all agents' savings - savings_money = get_total_savings(model) - # return sum of agents' wallets and savings for total money - return wallet_money + savings_money + return get_total_wallets(model) + get_total_savings(model) def get_total_loans(model): - # list of amounts of all agents' loans - agent_loans = [a.loans for a in model.schedule.agents] - # return sum of all agents' loans - return np.sum(agent_loans) + return sum(a.loans for a in model.schedule.agents if isinstance(a, Person)) class BankReserves(mesa.Model): - """ - This model is a Mesa implementation of the Bank Reserves model from NetLogo. - It is a highly abstracted, simplified model of an economy, with only one - type of agent and a single bank representing all banks in an economy. People - (represented by circles) move randomly within the grid. If two or more people - are on the same grid location, there is a 50% chance that they will trade with - each other. If they trade, there is an equal chance of giving the other agent - $5 or $2. A positive trade balance will be deposited in the bank as savings. - If trading results in a negative balance, the agent will try to withdraw from - its savings to cover the balance. If it does not have enough savings to cover - the negative balance, it will take out a loan from the bank to cover the - difference. The bank is required to keep a certain percentage of deposits as - reserves and the bank's ability to loan at any given time is a function of - the amount of deposits, its reserves, and its current total outstanding loan - amount. - """ - - # grid height grid_h = 20 - # grid width grid_w = 20 - """init parameters "init_people", "rich_threshold", and "reserve_percent" - are all set via Slider""" - def __init__( self, height=grid_h, @@ -113,17 +66,18 @@ def __init__( init_people=2, rich_threshold=10, reserve_percent=50, + seed=None, ): - super().__init__() + super().__init__(seed=seed) self.height = height self.width = width self.init_people = init_people - self.schedule = mesa.time.RandomActivation(self) - self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True) - # rich_threshold is the amount of savings a person needs to be considered "rich" self.rich_threshold = rich_threshold self.reserve_percent = reserve_percent - # see datacollector functions above + + self.grid = SingleGrid(self.width, self.height, torus=True) + self.schedule = RandomActivation(self) + self.datacollector = mesa.DataCollector( model_reporters={ "Rich": get_num_rich_agents, @@ -134,30 +88,42 @@ def __init__( "Money": get_total_money, "Loans": get_total_loans, }, - agent_reporters={"Wealth": lambda x: getattr(x, "wealth", None)}, + agent_reporters={ + "Wealth": lambda x: getattr(x, "wealth", None) + if isinstance(x, Person) + else None + }, ) - # create a single bank for the model + # Create the bank and place it on the grid + bank_pos = (self.width // 2, self.height // 2) # Place bank at the center self.bank = Bank(1, self, self.reserve_percent) + self.grid.place_agent(self.bank, bank_pos) + # Note: We're not adding the bank to the schedule anymore - # create people for the model according to number of people set by user + # Create people for i in range(self.init_people): - # set x, y coords randomly within the grid - x = self.random.randrange(self.width) - y = self.random.randrange(self.height) - p = Person(i, self, True, self.bank, self.rich_threshold) - # place the Person object on the grid at coordinates (x, y) - self.grid.place_agent(p, (x, y)) - # add the Person object to the model schedule - self.schedule.add(p) + self.create_person(i + 2) self.running = True self.datacollector.collect(self) + def create_person(self, unique_id): + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + pos = (x, y) + while not self.grid.is_cell_empty(pos): + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + pos = (x, y) + + person = Person(unique_id, pos, self, True, self.bank, self.rich_threshold) + self.grid.remove_agent(person) + self.grid.place_agent(person, pos) + self.schedule.add(person) + def step(self): - # tell all the agents in the model to run their step function self.schedule.step() - # collect data self.datacollector.collect(self) def run_model(self): diff --git a/examples/bank_reserves/bank_reserves/random_walk.py b/examples/bank_reserves/bank_reserves/random_walk.py deleted file mode 100644 index 884b24bd..00000000 --- a/examples/bank_reserves/bank_reserves/random_walk.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Citation: -The following code is a copy from random_walk.py at -https://github.com/projectmesa/mesa/blob/main/examples/wolf_sheep/wolf_sheep/random_walk.py -Accessed on: November 2, 2017 -Original Author: Jackie Kazil - -Generalized behavior for random walking, one grid cell at a time. -""" - -import mesa - - -class RandomWalker(mesa.Agent): - """ - Class implementing random walker methods in a generalized manner. - Not intended to be used on its own, but to inherit its methods to multiple - other agents. - """ - - grid = None - x = None - y = None - # use a Moore neighborhood - moore = True - - def __init__(self, unique_id, model, moore=True): - """ - grid: The MultiGrid object in which the agent lives. - x: The agent's current x coordinate - y: The agent's current y coordinate - moore: If True, may move in all 8 directions. - Otherwise, only up, down, left, right. - """ - super().__init__(unique_id, model) - self.moore = moore - - def random_move(self): - """ - Step one cell in any allowable direction. - """ - # Pick the next cell from the adjacent cells. - next_moves = self.model.grid.get_neighborhood(self.pos, self.moore, True) - next_move = self.random.choice(next_moves) - # Now move: - self.model.grid.move_agent(self, next_move) diff --git a/examples/bank_reserves/bank_reserves/server.py b/examples/bank_reserves/bank_reserves/server.py deleted file mode 100644 index 79a4c97a..00000000 --- a/examples/bank_reserves/bank_reserves/server.py +++ /dev/null @@ -1,90 +0,0 @@ -import mesa - -from .agents import Person -from .model import BankReserves - -""" -Citation: -The following code was adapted from server.py at -https://github.com/projectmesa/mesa/blob/main/examples/wolf_sheep/wolf_sheep/server.py -Accessed on: November 2, 2017 -Author of original code: Taylor Mutch -""" - -# The colors here are taken from Matplotlib's tab10 palette -# Green -RICH_COLOR = "#2ca02c" -# Red -POOR_COLOR = "#d62728" -# Blue -MID_COLOR = "#1f77b4" - - -def person_portrayal(agent): - if agent is None: - return - - portrayal = {} - - # update portrayal characteristics for each Person object - if isinstance(agent, Person): - portrayal["Shape"] = "circle" - portrayal["r"] = 0.5 - portrayal["Layer"] = 0 - portrayal["Filled"] = "true" - - color = MID_COLOR - - # set agent color based on savings and loans - if agent.savings > agent.model.rich_threshold: - color = RICH_COLOR - if agent.savings < 10 and agent.loans < 10: - color = MID_COLOR - if agent.loans > 10: - color = POOR_COLOR - - portrayal["Color"] = color - - return portrayal - - -# dictionary of user settable parameters - these map to the model __init__ parameters -model_params = { - "init_people": mesa.visualization.Slider( - "People", 25, 1, 200, description="Initial Number of People" - ), - "rich_threshold": mesa.visualization.Slider( - "Rich Threshold", - 10, - 1, - 20, - description="Upper End of Random Initial Wallet Amount", - ), - "reserve_percent": mesa.visualization.Slider( - "Reserves", - 50, - 1, - 100, - description="Percent of deposits the bank has to hold in reserve", - ), -} - -# set the portrayal function and size of the canvas for visualization -canvas_element = mesa.visualization.CanvasGrid(person_portrayal, 20, 20, 500, 500) - -# map data to chart in the ChartModule -chart_element = mesa.visualization.ChartModule( - [ - {"Label": "Rich", "Color": RICH_COLOR}, - {"Label": "Poor", "Color": POOR_COLOR}, - {"Label": "Middle Class", "Color": MID_COLOR}, - ] -) - -# create instance of Mesa ModularServer -server = mesa.visualization.ModularServer( - BankReserves, - [canvas_element, chart_element], - "Bank Reserves Model", - model_params=model_params, -) diff --git a/examples/bank_reserves/run.py b/examples/bank_reserves/run.py deleted file mode 100644 index 64a572ee..00000000 --- a/examples/bank_reserves/run.py +++ /dev/null @@ -1,3 +0,0 @@ -from bank_reserves.server import server - -server.launch(open_browser=True) diff --git a/examples/epstein_civil_violence/requirements.txt b/examples/epstein_civil_violence/requirements.txt index da2b9972..5aca876b 100644 --- a/examples/epstein_civil_violence/requirements.txt +++ b/examples/epstein_civil_violence/requirements.txt @@ -1,3 +1,4 @@ jupyter matplotlib mesa~=2.0 +enum \ No newline at end of file diff --git a/examples/epstein_civil_violence/test_epstein_civil_violence.py b/examples/epstein_civil_violence/test_epstein_civil_violence.py new file mode 100644 index 00000000..4a3798c8 --- /dev/null +++ b/examples/epstein_civil_violence/test_epstein_civil_violence.py @@ -0,0 +1,6 @@ +from epstein_civil_violence.epstein_civil_violence import EpsteinCivilViolence + + +def test_epstein_civil_violence(): + model = EpsteinCivilViolence(seed=15) + assert model is not None diff --git a/examples/epstein_civil_violence_experimental/Readme.md b/examples/epstein_civil_violence_experimental/Readme.md new file mode 100644 index 00000000..09556b13 --- /dev/null +++ b/examples/epstein_civil_violence_experimental/Readme.md @@ -0,0 +1,51 @@ +# Epstein Civil Violence Model + +This is an experimental implementation of the Epstein Civil Violence Model, which simulates the dynamics of civil violence with agents representing citizens and cops. + + +## Summary + +This model is based on Joshua Epstein's simulation of how civil unrest grows and is suppressed. Citizen agents wander the grid randomly, and are endowed with individual risk aversion and hardship levels; there is also a universal regime legitimacy value. There are also Cop agents, who work on behalf of the regime. Cops arrest Citizens who are actively rebelling; Citizens decide whether to rebel based on their hardship and the regime legitimacy, and their perceived probability of arrest. + +The model generates mass uprising as self-reinforcing processes: if enough agents are rebelling, the probability of any individual agent being arrested is reduced, making more agents more likely to join the uprising. However, the more rebelling Citizens the Cops arrest, the less likely additional agents become to join. This model also utilizes the experimental feature: ABMSimulator. + +## Installation + +To install the dependencies use pip and the requirements.txt in this directory. e.g. + +``` + # First, we clone the Mesa repo + $ git clone https://github.com/projectmesa/mesa-examples.git + $ cd mesa + # Then we cd to the example directory + $ cd examples/epstein_civil_violence_experimental + $ pip install -r requirements.txt +``` + +## How to Run + +You can run the batch model directly by executing the `app.py` file. as + +``` + solara run app.py +``` + +Then open your browser to [http://127.0.0.1:8521/](http://127.0.0.1:8521/) + +## Files + +* `epstein_civil_violence_experimental/agent.py`: Defines the Citizen and Cop agent classes. +* `epstein_civil_violence_experimental/model.py`: Defines the Epstein Civil Violence model itself, including the initialization of the grid, agents, and data collection. +* `epstein_civil_violence_experimental/app.py`: Sets up the interactive visualization server for the model using SolaraViz and launches a batch run of the model. +* `epstein_civil_violence_experimental/requirements.txt`: Lists the dependencies required to run the model. +* `epstein_civil_violence_experimental/Readme.md`: Provides an overview and instructions for the model. + +## Further Reading + +This model is based adapted from: + +[Epstein, J. “Modeling civil violence: An agent-based computational approach”, Proceedings of the National Academy of Sciences, Vol. 99, Suppl. 3, May 14, 2002](http://www.pnas.org/content/99/suppl.3/7243.short) + +A similar model is also included with NetLogo: + +Wilensky, U. (2004). NetLogo Rebellion model. http://ccl.northwestern.edu/netlogo/models/Rebellion. Center for Connected Learning and Computer-Based Modeling, Northwestern University, Evanston, IL. diff --git a/examples/epstein_civil_violence_experimental/__init__.py b/examples/epstein_civil_violence_experimental/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/epstein_civil_violence_experimental/agent.py b/examples/epstein_civil_violence_experimental/agent.py new file mode 100644 index 00000000..81873b07 --- /dev/null +++ b/examples/epstein_civil_violence_experimental/agent.py @@ -0,0 +1,116 @@ +import enum +import math + +from mesa import Agent + + +class AgentState(enum.IntEnum): + QUIESCENT = 0 + ARRESTED = 1 + ACTIVE = 2 + + +class EpsteinAgent(Agent): + def __init__(self, unique_id, model, vision, movement): + super().__init__(unique_id, model) + self.vision = vision + self.movement = movement + + +class Citizen(EpsteinAgent): + def __init__( + self, + unique_id, + model, + vision, + movement, + hardship, + regime_legitimacy, + risk_aversion, + threshold, + arrest_prob_constant, + ): + super().__init__(unique_id, model, vision, movement) + self.hardship = hardship + self.regime_legitimacy = regime_legitimacy + self.risk_aversion = risk_aversion + self.threshold = threshold + self.condition = AgentState.QUIESCENT + self.grievance = self.hardship * (1 - self.regime_legitimacy) + self.arrest_probability = None + self.arrest_prob_constant = arrest_prob_constant + self.jail_time_remaining = 0 + + def step(self): + if self.condition == AgentState.ARRESTED: + self.jail_time_remaining -= 1 + if self.jail_time_remaining <= 0: + self.release_from_jail() + return + + self.update_neighbors() + self.update_estimated_arrest_probability() + net_risk = self.risk_aversion * self.arrest_probability + if self.grievance - net_risk > self.threshold: + self.condition = AgentState.ACTIVE + else: + self.condition = AgentState.QUIESCENT + if self.movement and self.empty_neighbors: + new_pos = self.random.choice(self.empty_neighbors) + self.model.grid.move_agent(self, new_pos) + + def update_neighbors(self): + self.neighborhood = self.model.grid.get_neighborhood( + self.pos, moore=True, radius=self.vision + ) + self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood) + self.empty_neighbors = [ + c for c in self.neighborhood if self.model.grid.is_cell_empty(c) + ] + + def update_estimated_arrest_probability(self): + cops_in_vision = len([c for c in self.neighbors if isinstance(c, Cop)]) + actives_in_vision = 1.0 # citizen counts herself + for c in self.neighbors: + if isinstance(c, Citizen) and c.condition == AgentState.ACTIVE: + actives_in_vision += 1 + self.arrest_probability = 1 - math.exp( + -1 * self.arrest_prob_constant * (cops_in_vision / actives_in_vision) + ) + + def sent_to_jail(self, jail_time): + self.model.schedule.remove(self) + self.condition = AgentState.ARRESTED + self.jail_time_remaining = jail_time + + def release_from_jail(self): + self.model.schedule.add(self) + self.condition = AgentState.QUIESCENT + + +class Cop(EpsteinAgent): + def __init__(self, unique_id, model, vision, movement, max_jail_term): + super().__init__(unique_id, model, vision, movement) + self.max_jail_term = max_jail_term + + def step(self): + self.update_neighbors() + active_neighbors = [] + for agent in self.neighbors: + if isinstance(agent, Citizen) and agent.condition == AgentState.ACTIVE: + active_neighbors.append(agent) + if active_neighbors: + arrestee = self.random.choice(active_neighbors) + arrestee.sent_to_jail(self.random.randint(0, self.max_jail_term)) + if self.movement and self.empty_neighbors: + new_pos = self.random.choice(self.empty_neighbors) + self.model.grid.move_agent(self, new_pos) + + def update_neighbors(self): + self.neighborhood = self.model.grid.get_neighborhood( + self.pos, moore=True, radius=self.vision + ) + self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood) + self.empty_neighbors = [ + c for c in self.neighborhood if self.model.grid.is_cell_empty(c) + ] diff --git a/examples/epstein_civil_violence_experimental/app.py b/examples/epstein_civil_violence_experimental/app.py new file mode 100644 index 00000000..36b3fd8c --- /dev/null +++ b/examples/epstein_civil_violence_experimental/app.py @@ -0,0 +1,84 @@ +import solara +from mesa.visualization.solara_viz import SolaraViz, make_text +from mesa.experimental.devs.simulator import ABMSimulator +from model import EpsteinCivilViolence +from .agent import AgentState, Citizen, Cop + + +def agent_portrayal(agent): + if isinstance(agent, Citizen): + if agent.condition == AgentState.QUIESCENT: + color = "tab:blue" + elif agent.condition == AgentState.ACTIVE: + color = "tab:red" + else: # ARRESTED + color = "tab:gray" + elif isinstance(agent, Cop): + color = "tab:green" + else: + color = "tab:purple" # Fallback color + + return { + "color": color, + "size": 50, + } + + +def get_citizen_cop_ratio(model): + if model.schedule is None: + return "Citizen/Cop Ratio: N/A" + citizen_count = sum(isinstance(agent, Citizen) for agent in model.schedule.agents) + cop_count = sum(isinstance(agent, Cop) for agent in model.schedule.agents) + ratio = citizen_count / cop_count if cop_count > 0 else float("inf") + return f"Citizen/Cop Ratio: {ratio:.2f}" + + +# Define the SolaraViz visualization +page = SolaraViz( + model_class=EpsteinCivilViolence, + model_params={ + "width": 40, + "height": 40, + "citizen_density": 0.7, + "cop_density": 0.074, + "citizen_vision": 7, + "cop_vision": 7, + "legitimacy": 0.8, + "max_jail_term": 1000, + "active_threshold": 0.1, + "arrest_prob_constant": 2.3, + "movement": True, + "max_iters": 1000, + }, + measures=[ + make_text(get_citizen_cop_ratio), + ], + name="Epstein Civil Violence Model", + agent_portrayal=agent_portrayal, +) + + +@solara.component +def App(): + solara.Title("Epstein Civil Violence Model") + solara.Markdown("# Epstein Civil Violence Model") + solara.Markdown("This is a visualization of the Epstein Civil Violence Model.") + + # Add color legend + solara.Markdown(""" + ## Color Legend + - Blue: Quiescent Citizens + - Red: Active Citizens + - Gray: Arrested Citizens + - Green: Cops + """) + + page.show() + + +if __name__ == "__main__": + model = EpsteinCivilViolence(seed=15) + simulator = ABMSimulator() + simulator.setup(model) + simulator.run_for(time_delta=100) + App() diff --git a/examples/epstein_civil_violence_experimental/model.py b/examples/epstein_civil_violence_experimental/model.py new file mode 100644 index 00000000..9ed98e88 --- /dev/null +++ b/examples/epstein_civil_violence_experimental/model.py @@ -0,0 +1,93 @@ +import mesa +from mesa.space import SingleGrid +from mesa.time import RandomActivation +from .agent import Citizen, Cop, AgentState + + +class EpsteinCivilViolence(mesa.Model): + def __init__( + self, + width=40, + height=40, + citizen_density=0.7, + cop_density=0.074, + citizen_vision=7, + cop_vision=7, + legitimacy=0.8, + max_jail_term=1000, + active_threshold=0.1, + arrest_prob_constant=2.3, + movement=True, + max_iters=1000, + seed=None, + ): + super().__init__(seed) + if cop_density + citizen_density > 1: + raise ValueError("Cop density + citizen density must be less than 1") + + self.width = width + self.height = height + self.citizen_density = citizen_density + self.cop_density = cop_density + + self.max_iters = max_iters + + self.grid = SingleGrid(self.width, self.height, torus=True) + self.schedule = RandomActivation(self) + + for _, pos in self.grid.coord_iter(): + if self.random.random() < self.cop_density: + agent = Cop( + self.next_id(), + self, + cop_vision, + movement, + max_jail_term, + ) + elif self.random.random() < (self.cop_density + self.citizen_density): + agent = Citizen( + self.next_id(), + self, + citizen_vision, + movement, + hardship=self.random.random(), + regime_legitimacy=legitimacy, + risk_aversion=self.random.random(), + threshold=active_threshold, + arrest_prob_constant=arrest_prob_constant, + ) + else: + continue + self.grid.place_agent(agent, pos) + self.schedule.add(agent) + + self.datacollector = mesa.DataCollector( + {"unhappy": "unhappy", "happy": "happy"} + ) + self.datacollector.collect(self) + + self.running = True + + @property + def unhappy(self): + num_unhappy = 0 + for agent in self.schedule.agents: + if isinstance(agent, Citizen) and agent.condition == AgentState.ACTIVE: + num_unhappy += 1 + return num_unhappy + + @property + def happy(self): + return len(self.schedule.agents) - self.unhappy + + def step(self): + self.schedule.step() + self.datacollector.collect(self) + + if not self.unhappy: + self.running = False + + self.active_agents = self.schedule.agents + + def step(self): + self.schedule.step() diff --git a/examples/epstein_civil_violence_experimental/requirements.txt b/examples/epstein_civil_violence_experimental/requirements.txt new file mode 100644 index 00000000..f72b2225 --- /dev/null +++ b/examples/epstein_civil_violence_experimental/requirements.txt @@ -0,0 +1,4 @@ +mesa +solara +numpy +matplotlib diff --git a/examples/wolf_sheep_experimental/Readme.md b/examples/wolf_sheep_experimental/Readme.md new file mode 100644 index 00000000..d3f6d7a9 --- /dev/null +++ b/examples/wolf_sheep_experimental/Readme.md @@ -0,0 +1,59 @@ +# Wolf-Sheep Experimental Model + +This is an experimental implementation of the Wolf-Sheep Predation Model, which simulates the dynamics of predator-prey interactions between wolves and sheep, including factors such as energy expenditure, reproduction, and grass regrowth. + +## Summary + +## Summary + +This model consists of three agent types: wolves, sheep, and grass patches. The wolves and sheep wander around the grid at random, expending energy as they move. Sheep eat fully grown grass to replenish energy, while wolves eat sheep if they occupy the same cell. Both species reproduce asexually with a certain probability if they have sufficient energy, splitting their energy with offspring. The grass regrows after a set time if enabled in the model. Agents die if their energy depletes. + +The model tests and demonstrates several Mesa concepts and features: + - MultiGrid for spatial representation. + - Multiple agent types (wolves, sheep, grass patches). + - Agent portrayal with different shapes and colors. + - Agents inheriting behavior (random movement) from an abstract parent. + - Model composition using multiple files. + - Dynamic agent addition and removal from the schedule. + - Data collection and visualization using SolaraViz. + - Experimental feature: ABMSimulator + +## Installation + +To install the dependencies use pip and the requirements.txt in this directory. e.g. + +``` + # First, we clone the Mesa repo + $ git clone https://github.com/projectmesa/mesa-examples.git + $ cd mesa + # Then we cd to the example directory + $ cd examples/wolf_sheep_experimental + $ pip install -r requirements.txt +``` + +## How to Run + +You can run the batch model directly by executing the `app.py` file. e.g. + +Then open your browser to [http://127.0.0.1:8521/](http://127.0.0.1:8521/) and press Reset, then Run. + +``` + solara run app.py +``` + +## Files + +* `wolf_sheep_experimental/agents.py`: Defines the Wolf, Sheep, and GrassPatch agent classes. +* `wolf_sheep_experimental/model.py`: Defines the Wolf-Sheep Predation model itself, including the initialization of the grid, agents, and data collection. +* `wolf_sheep_experimental/app.py`: Sets up the interactive visualization server for the model and launches a batch run of the model and visualizes the results using matplotlib. +* `wolf_sheep_experimental/requirements.txt`: Lists the dependencies required to run the model. +* `wolf_sheep_experimental/Readme.md`: Provides an overview and instructions for the model. + +## Further Reading + +This model is closely based on the NetLogo Wolf-Sheep Predation Model: + +Wilensky, U. (1997). NetLogo Wolf Sheep Predation model. http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation. Center for Connected Learning and Computer-Based Modeling, Northwestern University, Evanston, IL. + +See also the [Lotka–Volterra equations +](https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations) for an example of a classic differential-equation model with similar dynamics. diff --git a/examples/wolf_sheep_experimental/__init__.py b/examples/wolf_sheep_experimental/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/wolf_sheep_experimental/agents.py b/examples/wolf_sheep_experimental/agents.py new file mode 100644 index 00000000..155f45ea --- /dev/null +++ b/examples/wolf_sheep_experimental/agents.py @@ -0,0 +1,129 @@ +import mesa + + +class RandomWalker(mesa.Agent): + """ + Class implementing random walker methods in a generalized manner. + + Not intended to be used on its own, but to inherit its methods to multiple + other agents. + """ + + def __init__(self, unique_id, model, moore=True): + """ + grid: The MultiGrid object in which the agent lives. + x: The agent's current x coordinate + y: The agent's current y coordinate + moore: If True, may move in all 8 directions. + Otherwise, only up, down, left, right. + """ + super().__init__(unique_id, model) + self.moore = moore + + def random_move(self): + """ + Step one cell in any allowable direction. + """ + if self.pos is not None: + # Pick the next cell from the adjacent cells. + next_moves = self.model.grid.get_neighborhood(self.pos, self.moore, True) + next_move = self.random.choice(next_moves) + # Now move: + self.model.grid.move_agent(self, next_move) + + +class GrassPatch(mesa.Agent): + def __init__(self, unique_id, model, fully_grown, countdown): + super().__init__(unique_id, model) + self.fully_grown = fully_grown + self.countdown = countdown + + def step(self): + if not self.fully_grown: + if self.countdown <= 0: + self.fully_grown = True + self.countdown = self.model.grass_regrowth_time + else: + self.countdown -= 1 + + +class Animal(RandomWalker): + def __init__(self, unique_id, model, moore, energy, p_reproduce, energy_from_food): + super().__init__(unique_id, model, moore) + self.energy = energy + self.p_reproduce = p_reproduce + self.energy_from_food = energy_from_food + self.is_alive = True + + def spawn_offspring(self): + self.energy /= 2 + offspring = self.__class__( + self.model.next_id(), + self.model, + self.moore, + self.energy, + self.p_reproduce, + self.energy_from_food, + ) + self.model.grid.place_agent(offspring, self.pos) + self.model.schedule.add(offspring) + + def feed(self): + pass + + def die(self): + if self.is_alive: + self.is_alive = False + if self.pos is not None: + self.model.grid.remove_agent(self) + try: + self.model.schedule.remove(self) + except KeyError: + pass # Agent was already removed from schedule + + def step(self): + if not self.is_alive: + return + self.random_move() + self.energy -= 1 + + self.feed() + + if self.energy < 0: + self.die() + elif self.random.random() < self.p_reproduce: + self.spawn_offspring() + + +class Sheep(Animal): + """ + A sheep that walks around, reproduces (asexually) and gets eaten. + """ + + def feed(self): + if self.pos is not None: + # If there is grass available, eat it + agents = self.model.grid.get_cell_list_contents([self.pos]) + grass_patch = next( + (obj for obj in agents if isinstance(obj, GrassPatch)), None + ) + if grass_patch and grass_patch.fully_grown: + self.energy += self.energy_from_food + grass_patch.fully_grown = False + + +class Wolf(Animal): + """ + A wolf that walks around, reproduces (asexually) and eats sheep. + """ + + def feed(self): + if self.pos is not None: + agents = self.model.grid.get_cell_list_contents([self.pos]) + sheep = [obj for obj in agents if isinstance(obj, Sheep)] + if len(sheep) > 0: + sheep_to_eat = self.random.choice(sheep) + self.energy += self.energy_from_food + + # Kill the sheep + sheep_to_eat.die() diff --git a/examples/wolf_sheep_experimental/app.py b/examples/wolf_sheep_experimental/app.py new file mode 100644 index 00000000..d917b209 --- /dev/null +++ b/examples/wolf_sheep_experimental/app.py @@ -0,0 +1,85 @@ +import time +import solara +from mesa.visualization.solara_viz import SolaraViz, make_text +from mesa.experimental.devs.simulator import ABMSimulator +from .model import WolfSheep +from .agents import Sheep, Wolf, GrassPatch + + +def agent_portrayal(agent): + if isinstance(agent, Sheep): + portrayal = { + "color": "tab:blue", + "size": 50, + } + elif isinstance(agent, Wolf): + portrayal = { + "color": "tab:red", + "size": 50, + } + elif isinstance(agent, GrassPatch): + color = "tab:green" if agent.fully_grown else "tab:brown" + portrayal = { + "color": color, + "size": 50, + } + return portrayal + + +def get_wolf_sheep_ratio(model): + wolf_count = sum(isinstance(agent, Wolf) for agent in model.schedule.agents) + sheep_count = sum(isinstance(agent, Sheep) for agent in model.schedule.agents) + ratio = wolf_count / sheep_count if sheep_count > 0 else float("inf") + return f"Wolf/Sheep Ratio: {ratio:.2f}" + + +model_params = { + "width": 20, + "height": 20, + "initial_sheep": 100, + "initial_wolves": 50, + "sheep_reproduce": 0.04, + "wolf_reproduce": 0.05, + "wolf_gain_from_food": 20, + "grass": True, + "grass_regrowth_time": 30, + "sheep_gain_from_food": 4, +} + +page = SolaraViz( + model_class=WolfSheep, + model_params=model_params, + measures=[ + make_text(get_wolf_sheep_ratio), + ], + name="Wolf-Sheep Predation Model", + agent_portrayal=agent_portrayal, +) + + +@solara.component +def App(): + solara.Title("Wolf-Sheep Predation Model") + solara.Markdown("# Wolf-Sheep Predation Model") + solara.Markdown("This is a visualization of the Wolf-Sheep Predation Model.") + + # Add color legend + solara.Markdown(""" + ## Color Legend + - Blue: Sheep + - Red: Wolves + - Green: Fully grown grass + - Brown: Eaten grass (regrowing) + """) + + page.show() + + +if __name__ == "__main__": + model = WolfSheep(25, 25, 60, 40, 0.2, 0.1, 20) + simulator = ABMSimulator() + simulator.setup(model) + start_time = time.perf_counter() + simulator.run_for(time_delta=100) + print("Time:", time.perf_counter() - start_time) + App() diff --git a/examples/wolf_sheep_experimental/model.py b/examples/wolf_sheep_experimental/model.py new file mode 100644 index 00000000..6f08c1af --- /dev/null +++ b/examples/wolf_sheep_experimental/model.py @@ -0,0 +1,122 @@ +""" +Wolf-Sheep Predation Model +================================ + +Replication of the model found in NetLogo: + Wilensky, U. (1997). NetLogo Wolf Sheep Predation model. + http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation. + Center for Connected Learning and Computer-Based Modeling, + Northwestern University, Evanston, IL. +""" + +from mesa import Model +from mesa.datacollection import DataCollector +from mesa.space import MultiGrid +from mesa.time import RandomActivation +from mesa.experimental.devs.simulator import ABMSimulator + +from .agents import GrassPatch, Sheep, Wolf + + +class WolfSheep(Model): + """Wolf-Sheep Predation Model""" + + def __init__( + self, + width=20, + height=20, + initial_sheep=100, + initial_wolves=50, + sheep_reproduce=0.04, + wolf_reproduce=0.05, + wolf_gain_from_food=20, + grass=False, + grass_regrowth_time=30, + sheep_gain_from_food=4, + ): + super().__init__() + self.width = width + self.height = height + self.initial_sheep = initial_sheep + self.initial_wolves = initial_wolves + self.sheep_reproduce = sheep_reproduce + self.wolf_reproduce = wolf_reproduce + self.wolf_gain_from_food = wolf_gain_from_food + self.grass = grass + self.grass_regrowth_time = grass_regrowth_time + self.sheep_gain_from_food = sheep_gain_from_food + + self.schedule = RandomActivation(self) + self.grid = MultiGrid(self.width, self.height, torus=True) + + self.datacollector = DataCollector( + model_reporters={"Wolf/Sheep Ratio": get_wolf_sheep_ratio}, + agent_reporters={"Energy": "energy"}, + ) + + self.simulator = ABMSimulator() + self.simulator.setup(self) + + self._init_population() + + self.running = True + self.datacollector.collect(self) + + def _init_population(self): + # Create sheep + for i in range(self.initial_sheep): + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + energy = self.random.randrange(2 * self.sheep_gain_from_food) + sheep = Sheep( + self.next_id(), + self, + True, + energy, + self.sheep_reproduce, + self.sheep_gain_from_food, + ) + self.grid.place_agent(sheep, (x, y)) + self.schedule.add(sheep) + + # Create wolves + for i in range(self.initial_wolves): + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + energy = self.random.randrange(2 * self.wolf_gain_from_food) + wolf = Wolf( + self.next_id(), + self, + True, + energy, + self.wolf_reproduce, + self.wolf_gain_from_food, + ) + self.grid.place_agent(wolf, (x, y)) + self.schedule.add(wolf) + + # Create grass patches + if self.grass: + for agent, (x, y) in self.grid.coord_iter(): + fully_grown = self.random.choice([True, False]) + if fully_grown: + countdown = self.grass_regrowth_time + else: + countdown = self.random.randrange(self.grass_regrowth_time) + patch = GrassPatch(self.next_id(), self, fully_grown, countdown) + self.grid.place_agent(patch, (x, y)) + self.schedule.add(patch) + + def step(self): + self.schedule.step() + self.datacollector.collect(self) + + def run_model(self, step_count=200): + self.simulator.run_for(time_delta=step_count) + + +def get_wolf_sheep_ratio(model): + wolf_count = sum(isinstance(agent, Wolf) for agent in model.schedule.agents) + sheep_count = sum(isinstance(agent, Sheep) for agent in model.schedule.agents) + ratio = wolf_count / sheep_count if sheep_count > 0 else float("inf") + return ratio diff --git a/examples/wolf_sheep_experimental/requirements.txt b/examples/wolf_sheep_experimental/requirements.txt new file mode 100644 index 00000000..982e4066 --- /dev/null +++ b/examples/wolf_sheep_experimental/requirements.txt @@ -0,0 +1,4 @@ +mesa +solara +numpy +matplotlib \ No newline at end of file diff --git a/examples/wolf_sheep_experimental/resources/sheep.png b/examples/wolf_sheep_experimental/resources/sheep.png new file mode 100644 index 00000000..dfb81b0e Binary files /dev/null and b/examples/wolf_sheep_experimental/resources/sheep.png differ diff --git a/examples/wolf_sheep_experimental/resources/wolf.png b/examples/wolf_sheep_experimental/resources/wolf.png new file mode 100644 index 00000000..5357b855 Binary files /dev/null and b/examples/wolf_sheep_experimental/resources/wolf.png differ