diff --git a/app.py b/app.py index f2f8759..37cc736 100644 --- a/app.py +++ b/app.py @@ -8,28 +8,31 @@ import glob import json from pathlib import Path +from src import motivation_model as mm +import jupedsim as jps import numpy as np import pandas as pd import pedpy import streamlit as st from jupedsim.internal.notebook_utils import animate, read_sqlite_file -from src.logger_config import init_logger -import simulation + +from simulation import main +from src.analysis import run from src.inifile_parser import ( parse_fps, - parse_time_step, + parse_motivation_strategy, parse_number_agents, parse_simulation_time, + parse_time_step, ) +from src.logger_config import init_logger from src.ui import ( + init_sidebar, ui_motivation_parameters, ui_simulation_parameters, ui_velocity_model_parameters, - init_sidebar, ) from src.utilities import delete_txt_files, load_json, save_json -from src.analysis import run -import jupedsim as jps def read_data(output_file: str) -> pd.DataFrame: @@ -133,7 +136,6 @@ def read_data(output_file: str) -> pd.DataFrame: if Path(OUTPUT_FILE).exists(): Path(OUTPUT_FILE).unlink() msg.empty() - msg.code("Running simulation ...") with open(CONFIG_FILE, "r", encoding="utf8") as f: json_str = f.read() data = json.loads(json_str) @@ -141,10 +143,15 @@ def read_data(output_file: str) -> pd.DataFrame: time_step = parse_time_step(data) number_agents = parse_number_agents(data) simulation_time = parse_simulation_time(data) + strategy = parse_motivation_strategy(data) - with st.spinner("Simulating ..."): + msg.code( + f"Running simulation with {number_agents}. Strategy: <{strategy}>..." + ) + + with st.spinner("Simulating..."): if fps and time_step: - evac_time = simulation.main( + evac_time = main( number_agents, fps, time_step, @@ -165,5 +172,32 @@ def read_data(output_file: str) -> pd.DataFrame: anm = animate(trajectory_data, walkable_area, every_nth_frame=int(fps)) st.plotly_chart(anm) + if True or c3.button("Plot"): + strategy = data["motivation_parameters"]["motivation_strategy"] + width = float(data["motivation_parameters"]["width"]) + height = float(data["motivation_parameters"]["height"]) + max_value = float(data["motivation_parameters"]["max_value"]) + min_value = float(data["motivation_parameters"]["min_value"]) + seed = data["motivation_parameters"]["seed"] + number_agents = float(parse_number_agents(data)) + if strategy == "default": + motivation_strategy = mm.DefaultMotivationStrategy( + width=width, height=height + ) + if strategy == "EVC": + motivation_strategy = mm.EVCStrategy( + width=width, + height=height, + max_reward=number_agents, + seed=seed, + max_value=max_value, + min_value=min_value, + ) + + figs = motivation_strategy.plot() + with st.expander("Plot model", expanded=True): + for fig in figs: + st.pyplot(fig) + if tab == "Analysis": run() diff --git a/files/bottleneck.json b/files/bottleneck.json index 12dc4c7..11c72b9 100644 --- a/files/bottleneck.json +++ b/files/bottleneck.json @@ -51,7 +51,6 @@ }, "motivation_parameters": { "motivation_strategy": "default", - "active": 0, "normal_v_0": 1.2, "normal_time_gap": 1.0, "width": "1.0", diff --git a/simulation.py b/simulation.py index ea09d40..5ac1d1e 100644 --- a/simulation.py +++ b/simulation.py @@ -15,7 +15,6 @@ from src import motivation_model as mm from src.inifile_parser import ( - is_motivation_active, parse_accessible_areas, parse_destinations, parse_distribution_polygons, @@ -125,12 +124,10 @@ def init_simulation( door_point2=(motivation_doors[0][1][0], motivation_doors[0][1][1]), normal_v_0=normal_v_0, normal_time_gap=normal_time_gap, - active=is_motivation_active(_data), motivation_strategy=motivation_strategy, ) - if motivation_model.active: - motivation_model.print_details() - logging.info("No motivation!") + + motivation_model.print_details() logging.info("Init simulation done") return simulation, motivation_model @@ -161,7 +158,7 @@ def run_simulation( and simulation.elapsed_time() < _simulation_time ): simulation.iterate() - if motivation_model.active and simulation.iteration_count() % 100 == 0: + if simulation.iteration_count() % 100 == 0: agents = simulation.agents() number_agents_in_simulation = simulation.agent_count() for agent in agents: @@ -185,7 +182,6 @@ def run_simulation( logging.info( f"Agents: {agent.id},{v_0 = :.2f}, {time_gap = :.2f}, {motivation_i = }, Pos: {position[0]:.2f} {position[1]:.2f}" ) - write_value_to_file( file_handle, f"{position[0]} {position[1]} {motivation_i} {v_0} {time_gap} {distance}", @@ -241,17 +237,18 @@ def main( stage_id=stage_id, radius=radius, v0=normal_v_0, - time_gap=normal_time_gap - + time_gap=normal_time_gap, ) - ped_ids = distribute_and_add_agents(simulation, agent_parameters, positions) logging.info(f"Running simulation for {len(ped_ids)} agents:") run_simulation(simulation, motivation_model, _simulation_time) - logging.info(f"Simulation completed after {simulation.iteration_count()} iterations") + logging.info( + f"Simulation completed after {simulation.iteration_count()} iterations" + ) logging.info(f"simulation time: {simulation.iteration_count()*_time_step} [s]") # logging.info(f"Trajectory: {_trajectory_path}") - return simulation.iteration_count()*_time_step + return simulation.iteration_count() * _time_step + if __name__ == "__main__": init_logger() diff --git a/src/motivation_model.py b/src/motivation_model.py index d398717..7093793 100644 --- a/src/motivation_model.py +++ b/src/motivation_model.py @@ -3,7 +3,7 @@ import random from dataclasses import dataclass from typing import Any, Optional, Tuple, TypeAlias - +import matplotlib.pyplot as plt import numpy as np from .logger_config import log_debug @@ -35,6 +35,22 @@ def motivation(self, params: dict[str, Any]) -> float: return float(np.exp(expr) * np.e * self.height) + def plot(self): + fig = plt.figure() + distances = np.linspace(0, 10, 100) + m = [] + for dist in distances: + m.append(self.motivation({"distance": dist})) + + plt.plot(distances, m) + plt.grid(alpha=0.3) + plt.ylim([-0.1, 3]) + plt.xlim([-0.1, 4]) + plt.ylabel("Motivation") + plt.xlabel("Distance / m") + plt.title(f"{self.name()} - M(width, height)") + return [fig] + @dataclass class EVCStrategy: @@ -82,6 +98,7 @@ def value(min_v: float, max_v: float, seed: Optional[float] = None): """Random value in interval. seed is optional.""" if seed is not None: random.seed(seed) + return random.uniform(min_v, max_v) def motivation(self, params: dict[str, Any]) -> float: @@ -99,6 +116,76 @@ def motivation(self, params: dict[str, Any]) -> float: ) ) + def plot(self): + """Plot functions for inspection.""" + fig0, ax0 = plt.subplots(ncols=1, nrows=1) + fig1, ax1 = plt.subplots(ncols=1, nrows=1) + fig2, ax2 = plt.subplots(ncols=1, nrows=1) + fig3, ax3 = plt.subplots(ncols=1, nrows=1) + distances = np.linspace(0, 10, 100) + # E + E = [] + for dist in distances: + E.append(self.expectancy(dist, self.width, self.height)) + + ax0.plot(distances, E) + ax0.grid(alpha=0.3) + ax0.set_ylim([-0.1, 3]) + ax0.set_xlim([-0.1, 4]) + ax0.set_title(f"{self.name()} - E (width, height)") + ax0.set_xlabel("Distance / m") + ax0.set_ylabel("Expectancy") + # V + V = [] + agents = np.linspace(1, self.max_reward) + for s in agents: + V.append(self.value(self.min_value, self.max_value, self.seed)) + + ax1.plot(agents, V, "o") + ax1.plot( + [self.seed], + [self.value(self.min_value, self.max_value, self.seed)], + "or", + ms=10, + ) + ax1.grid(alpha=0.3) + ax1.set_ylim([-0.1, 5]) + ax1.set_xlim([-0.1, self.max_reward + 1]) + ax1.set_title(f"{self.name()} - V (seed = {self.seed})") + ax1.set_xlabel("# Agents") + ax1.set_ylabel("Value") + # C + C = [] + N = np.arange(0, self.max_reward) + for n in N: + C.append(self.competition(n, self.max_reward)) + + ax2.plot(N, C) + ax2.grid(alpha=0.3) + ax2.set_xlim([0, self.max_reward + 1]) + ax2.set_ylim([0, 1.5]) + ax2.set_xlabel("reward") + ax2.set_ylabel("Competition") + ax2.set_title(f"{self.name()} - C (max reward {self.max_reward:.0f})") + # M + m = [] + for dist in distances: + params = { + "distance": dist, + "number_agents_in_simulation": self.max_reward, + } + m.append(self.motivation(params)) + + ax3.plot(distances, m) + ax3.grid(alpha=0.3) + ax3.set_ylim([-0.1, 3]) + ax3.set_xlim([-0.1, 4]) + ax3.set_title(f"{self.name()} - E.V.C (N={self.max_reward})") + ax3.set_xlabel("Distance / m") + ax3.set_ylabel("Motivation") + + return fig0, fig1, fig2, fig3 + @dataclass class MotivationModel: @@ -108,7 +195,6 @@ class MotivationModel: door_point2: Point = (60, 102) normal_v_0: float = 1.2 normal_time_gap: float = 1.0 - active: int = 1 motivation_strategy: Any = None def print_details(self) -> None: @@ -118,7 +204,6 @@ def print_details(self) -> None: log_debug(f">> Door Point 2: {self.door_point2}") log_debug(f"> Normal Velocity 0: {self.normal_v_0}") log_debug(f">> Normal Time Gap: {self.normal_time_gap}") - log_debug(f">> Active: {self.active}") def __post_init__(self) -> None: """Init v0 and time gap.""" @@ -132,7 +217,7 @@ def calculate_motivation_state(self, motivation_i: float) -> Tuple[float, float] """Return v0, T tuples depending on Motivation. (v0,T)=(1.2,1).""" v_0 = self.normal_v_0 time_gap = self.normal_time_gap - v_0_new = (1 + 0 * motivation_i) * v_0 # TODO + v_0_new = (1 + motivation_i) * v_0 time_gap_new = time_gap / (1 + motivation_i) return v_0_new, time_gap_new diff --git a/src/ui.py b/src/ui.py index 1a83679..4c3eed5 100644 --- a/src/ui.py +++ b/src/ui.py @@ -104,9 +104,10 @@ def ui_simulation_parameters(data: Dict[str, Any]) -> None: data["simulation_parameters"]["simulation_time"] = st.number_input( "Simulation Time:", value=data["simulation_parameters"]["simulation_time"] ) - data["motivation_parameters"]["seed"] = st.text_input( + data["motivation_parameters"]["seed"] = st.number_input( "Seed", key="seed", + step=1.0, value=float(data["motivation_parameters"]["seed"]), help="Seed for random generator for value", ) @@ -115,29 +116,24 @@ def ui_simulation_parameters(data: Dict[str, Any]) -> None: def ui_motivation_parameters(data: Dict[str, Any]) -> None: """Motivation Parameters Section.""" with st.sidebar.expander("Motivation Parameters", expanded=True): - act = st.empty() model = st.empty() c1, c2 = st.columns(2) - motivation_activated = act.checkbox("Activate motivation", value=True) - if motivation_activated: - data["motivation_parameters"]["active"] = 1 - else: - data["motivation_parameters"]["active"] = 0 - motivation_strategy = model.selectbox( "Select model", ["default", "EVC"], help="Model 2: M = M(dist). Model 3: M = V.E, Model4: M=V.E.C", ) - data["motivation_parameters"]["width"] = c1.text_input( + data["motivation_parameters"]["width"] = c1.number_input( "Width", key="width", + step=0.5, value=float(data["motivation_parameters"]["width"]), help="width of function defining distance dependency", ) - data["motivation_parameters"]["height"] = c2.text_input( + data["motivation_parameters"]["height"] = c2.number_input( "Height", key="hight", + step=0.5, value=float(data["motivation_parameters"]["height"]), help="Height of function defining distance dependency", ) @@ -145,6 +141,7 @@ def ui_motivation_parameters(data: Dict[str, Any]) -> None: data["motivation_parameters"]["max_value"] = c1.number_input( "Max_value", key="max_value", + step=0.5, value=float(data["motivation_parameters"]["max_value"]), help="Max Value", ) @@ -152,6 +149,7 @@ def ui_motivation_parameters(data: Dict[str, Any]) -> None: data["motivation_parameters"]["min_value"] = c2.number_input( "Min_value", key="min_value", + step=0.5, value=float(data["motivation_parameters"]["min_value"]), help="Min Value", )