diff --git a/activitysim/abm/models/atwork_subtour_frequency.py b/activitysim/abm/models/atwork_subtour_frequency.py index f9cae3821..348355443 100644 --- a/activitysim/abm/models/atwork_subtour_frequency.py +++ b/activitysim/abm/models/atwork_subtour_frequency.py @@ -117,6 +117,7 @@ def atwork_subtour_frequency( trace_label=trace_label, trace_choice_name="atwork_subtour_frequency", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # convert indexes to alternative names diff --git a/activitysim/abm/models/atwork_subtour_scheduling.py b/activitysim/abm/models/atwork_subtour_scheduling.py index 1eec282f2..fff94ef30 100644 --- a/activitysim/abm/models/atwork_subtour_scheduling.py +++ b/activitysim/abm/models/atwork_subtour_scheduling.py @@ -56,7 +56,6 @@ def atwork_subtour_scheduling( estimator = estimation.manager.begin_estimation(state, "atwork_subtour_scheduling") model_spec = state.filesystem.read_model_spec(file_name=model_settings.SPEC) - sharrow_skip = model_settings.sharrow_skip coefficients_df = state.filesystem.read_model_coefficients(model_settings) model_spec = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator @@ -96,7 +95,7 @@ def atwork_subtour_scheduling( estimator=estimator, chunk_size=state.settings.chunk_size, trace_label=trace_label, - sharrow_skip=sharrow_skip, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/auto_ownership.py b/activitysim/abm/models/auto_ownership.py index c99344566..eefaa40a0 100644 --- a/activitysim/abm/models/auto_ownership.py +++ b/activitysim/abm/models/auto_ownership.py @@ -9,20 +9,21 @@ from activitysim.core import ( config, - expressions, estimation, + expressions, simulate, tracing, workflow, ) from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable from activitysim.core.configuration.logit import LogitComponentSettings + from .util import annotate logger = logging.getLogger(__name__) -class AutoOwnershipSettings(LogitComponentSettings): +class AutoOwnershipSettings(LogitComponentSettings, extra="forbid"): """ Settings for the `auto_ownership` component. """ @@ -99,6 +100,7 @@ def auto_ownership_simulate( trace_choice_name="auto_ownership", log_alt_losers=log_alt_losers, estimator=estimator, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/cdap.py b/activitysim/abm/models/cdap.py index 7eb082e4d..c99817fde 100644 --- a/activitysim/abm/models/cdap.py +++ b/activitysim/abm/models/cdap.py @@ -17,7 +17,11 @@ tracing, workflow, ) -from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable +from activitysim.core.configuration.base import ( + ComputeSettings, + PreprocessorSettings, + PydanticReadable, +) from activitysim.core.util import reindex logger = logging.getLogger(__name__) @@ -34,6 +38,7 @@ class CdapSettings(PydanticReadable, extra="forbid"): annotate_households: PreprocessorSettings | None = None COEFFICIENTS: Path CONSTANTS: dict[str, Any] = {} + compute_settings: ComputeSettings | None = None @workflow.step @@ -202,6 +207,7 @@ def cdap_simulate( trace_hh_id=trace_hh_id, trace_label=trace_label, add_joint_tour_utility=add_joint_tour_utility, + compute_settings=model_settings.compute_settings, ) else: choices = cdap.run_cdap( @@ -215,6 +221,7 @@ def cdap_simulate( chunk_size=state.settings.chunk_size, trace_hh_id=trace_hh_id, trace_label=trace_label, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/free_parking.py b/activitysim/abm/models/free_parking.py index 97b70ade3..9aa2800a6 100644 --- a/activitysim/abm/models/free_parking.py +++ b/activitysim/abm/models/free_parking.py @@ -118,6 +118,7 @@ def free_parking( trace_label=trace_label, trace_choice_name="free_parking_at_work", estimator=estimator, + compute_settings=model_settings.compute_settings, ) free_parking_alt = model_settings.FREE_PARKING_ALT diff --git a/activitysim/abm/models/joint_tour_composition.py b/activitysim/abm/models/joint_tour_composition.py index ab57298e0..ee4ac3a69 100644 --- a/activitysim/abm/models/joint_tour_composition.py +++ b/activitysim/abm/models/joint_tour_composition.py @@ -123,6 +123,7 @@ def joint_tour_composition( trace_label=trace_label, trace_choice_name="composition", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # convert indexes to alternative names diff --git a/activitysim/abm/models/joint_tour_frequency.py b/activitysim/abm/models/joint_tour_frequency.py index 93a8aa475..1700c143b 100644 --- a/activitysim/abm/models/joint_tour_frequency.py +++ b/activitysim/abm/models/joint_tour_frequency.py @@ -112,6 +112,7 @@ def joint_tour_frequency( trace_label=trace_label, trace_choice_name="joint_tour_frequency", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # convert indexes to alternative names diff --git a/activitysim/abm/models/joint_tour_frequency_composition.py b/activitysim/abm/models/joint_tour_frequency_composition.py index be52c900a..27b8347ec 100644 --- a/activitysim/abm/models/joint_tour_frequency_composition.py +++ b/activitysim/abm/models/joint_tour_frequency_composition.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) -class JointTourFrequencyCompositionSettings(LogitComponentSettings): +class JointTourFrequencyCompositionSettings(LogitComponentSettings, extra="forbid"): """ Settings for the `joint_tour_frequency_composition` component. """ @@ -140,6 +140,7 @@ def joint_tour_frequency_composition( trace_choice_name=trace_label, estimator=estimator, explicit_chunk_size=0, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/joint_tour_participation.py b/activitysim/abm/models/joint_tour_participation.py index ac8afdce0..98a8c70c7 100644 --- a/activitysim/abm/models/joint_tour_participation.py +++ b/activitysim/abm/models/joint_tour_participation.py @@ -418,6 +418,7 @@ def joint_tour_participation( trace_choice_name="participation", custom_chooser=participants_chooser, estimator=estimator, + compute_settings=model_settings.compute_settings, ) # choice is boolean (participate or not) diff --git a/activitysim/abm/models/joint_tour_scheduling.py b/activitysim/abm/models/joint_tour_scheduling.py index 260abedc7..9bdcbe146 100644 --- a/activitysim/abm/models/joint_tour_scheduling.py +++ b/activitysim/abm/models/joint_tour_scheduling.py @@ -104,7 +104,6 @@ def joint_tour_scheduling( estimator = estimation.manager.begin_estimation(state, "joint_tour_scheduling") model_spec = state.filesystem.read_model_spec(file_name=model_settings.SPEC) - sharrow_skip = model_settings.sharrow_skip coefficients_df = state.filesystem.read_model_coefficients(model_settings) model_spec = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator @@ -128,7 +127,7 @@ def joint_tour_scheduling( estimator=estimator, chunk_size=state.settings.chunk_size, trace_label=trace_label, - sharrow_skip=sharrow_skip, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/location_choice.py b/activitysim/abm/models/location_choice.py index 3017235f6..cb4de93b0 100644 --- a/activitysim/abm/models/location_choice.py +++ b/activitysim/abm/models/location_choice.py @@ -192,6 +192,9 @@ def _location_sample( chunk_tag=chunk_tag, trace_label=trace_label, zone_layer=zone_layer, + compute_settings=model_settings.compute_settings.subcomponent_settings( + "sample" + ), ) return choices @@ -696,6 +699,9 @@ def run_location_simulate( trace_choice_name=model_settings.DEST_CHOICE_COLUMN_NAME, estimator=estimator, skip_choice=skip_choice, + compute_settings=model_settings.compute_settings.subcomponent_settings( + "simulate" + ), ) if not want_logsums: diff --git a/activitysim/abm/models/mandatory_tour_frequency.py b/activitysim/abm/models/mandatory_tour_frequency.py index a80b82904..8ab69710f 100644 --- a/activitysim/abm/models/mandatory_tour_frequency.py +++ b/activitysim/abm/models/mandatory_tour_frequency.py @@ -53,7 +53,7 @@ def add_null_results(state, trace_label, mandatory_tour_frequency_settings): state.add_table("persons", persons) -class MandatoryTourFrequencySettings(LogitComponentSettings): +class MandatoryTourFrequencySettings(LogitComponentSettings, extra="forbid"): """ Settings for the `mandatory_tour_frequency` component. """ @@ -134,6 +134,7 @@ def mandatory_tour_frequency( trace_label=trace_label, trace_choice_name="mandatory_tour_frequency", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # convert indexes to alternative names diff --git a/activitysim/abm/models/non_mandatory_tour_frequency.py b/activitysim/abm/models/non_mandatory_tour_frequency.py index 972c4b3dc..f43a523f0 100644 --- a/activitysim/abm/models/non_mandatory_tour_frequency.py +++ b/activitysim/abm/models/non_mandatory_tour_frequency.py @@ -12,8 +12,8 @@ from activitysim.abm.models.util import annotate from activitysim.abm.models.util.overlap import ( - person_max_window, person_available_periods, + person_max_window, ) from activitysim.abm.models.util.school_escort_tours_trips import ( recompute_tour_count_statistics, @@ -161,7 +161,7 @@ class NonMandatoryTourSpecSegment(PydanticReadable): COEFFICIENTS: Path -class NonMandatoryTourFrequencySettings(LogitComponentSettings): +class NonMandatoryTourFrequencySettings(LogitComponentSettings, extra="forbid"): """ Settings for the `non_mandatory_tour_frequency` component. """ @@ -321,6 +321,7 @@ def non_mandatory_tour_frequency( trace_choice_name="non_mandatory_tour_frequency", estimator=estimator, explicit_chunk_size=model_settings.explicit_chunk, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/school_escorting.py b/activitysim/abm/models/school_escorting.py index 908113e98..0ade57dd5 100644 --- a/activitysim/abm/models/school_escorting.py +++ b/activitysim/abm/models/school_escorting.py @@ -3,7 +3,7 @@ from __future__ import annotations import logging -from typing import Any +from typing import Any, Literal import numpy as np import pandas as pd @@ -367,7 +367,7 @@ def create_school_escorting_bundles_table(choosers, tours, stage): return bundles -class SchoolEscortSettings(BaseLogitComponentSettings): +class SchoolEscortSettings(BaseLogitComponentSettings, extra="forbid"): """ Settings for the `telecommute_frequency` component. """ @@ -391,21 +391,6 @@ class SchoolEscortSettings(BaseLogitComponentSettings): GENDER_WEIGHT: float = 10.0 AGE_WEIGHT: float = 1.0 - sharrow_skip: bool | dict[str, bool] = False - """Setting to skip sharrow. - - Sharrow can be skipped (or not) for all school escorting stages by giving - simply true or false. Alternatively, it can be skipped only for particular - stages by giving a mapping of stage name to skipping. For example: - - ```yaml - sharrow_skip: - OUTBOUND: true - INBOUND: false - OUTBOUND_COND: true - ``` - """ - SIMULATE_CHOOSER_COLUMNS: list[str] | None = None SPEC: None = None @@ -428,6 +413,13 @@ class SchoolEscortSettings(BaseLogitComponentSettings): explicit_chunk: int = 0 """If > 0, use this chunk size instead of adaptive chunking.""" + LOGIT_TYPE: Literal["MNL"] = "MNL" + """Logit model mathematical form. + + * "MNL" + Multinomial logit model. + """ + @workflow.step def school_escorting( @@ -508,19 +500,15 @@ def school_escorting( state, model_spec_raw, coefficients_df, estimator ) - # allow for skipping sharrow entirely in this model with `sharrow_skip: true` + # allow for skipping sharrow entirely in this model with `compute_settings.sharrow_skip: true` # or skipping stages selectively with a mapping of the stages to skip - sharrow_skip = model_settings.sharrow_skip - stage_sharrow_skip = False # default is false unless set below - if sharrow_skip: - if isinstance(sharrow_skip, dict): - stage_sharrow_skip = sharrow_skip.get(stage.upper(), False) - else: - stage_sharrow_skip = True - if stage_sharrow_skip: - locals_dict["_sharrow_skip"] = True - else: - locals_dict.pop("_sharrow_skip", None) + stage_compute_settings = model_settings.compute_settings.subcomponent_settings( + stage.upper() + ) + # if stage_sharrow_skip: + # locals_dict["_sharrow_skip"] = True + # else: + # locals_dict.pop("_sharrow_skip", None) # reduce memory by limiting columns if selected columns are supplied chooser_columns = model_settings.SIMULATE_CHOOSER_COLUMNS @@ -584,6 +572,7 @@ def school_escorting( trace_choice_name="school_escorting_" + stage, estimator=estimator, explicit_chunk_size=model_settings.explicit_chunk, + compute_settings=stage_compute_settings, ) if estimator: diff --git a/activitysim/abm/models/stop_frequency.py b/activitysim/abm/models/stop_frequency.py index 4513c2477..2f0253f21 100644 --- a/activitysim/abm/models/stop_frequency.py +++ b/activitysim/abm/models/stop_frequency.py @@ -214,6 +214,7 @@ def stop_frequency( trace_label=tracing.extend_trace_label(trace_label, segment_name), trace_choice_name="stops", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # convert indexes to alternative names diff --git a/activitysim/abm/models/telecommute_frequency.py b/activitysim/abm/models/telecommute_frequency.py index 19bd850f8..f98791a2a 100755 --- a/activitysim/abm/models/telecommute_frequency.py +++ b/activitysim/abm/models/telecommute_frequency.py @@ -20,7 +20,7 @@ logger = logging.getLogger("activitysim") -class TelecommuteFrequencySettings(LogitComponentSettings): +class TelecommuteFrequencySettings(LogitComponentSettings, extra="forbid"): """ Settings for the `telecommute_frequency` component. """ @@ -99,6 +99,7 @@ def telecommute_frequency( trace_label=trace_label, trace_choice_name="telecommute_frequency", estimator=estimator, + compute_settings=model_settings.compute_settings, ) choices = pd.Series(model_spec.columns[choices.values], index=choices.index) diff --git a/activitysim/abm/models/transit_pass_ownership.py b/activitysim/abm/models/transit_pass_ownership.py index 48e01c47d..9a34b7b0b 100644 --- a/activitysim/abm/models/transit_pass_ownership.py +++ b/activitysim/abm/models/transit_pass_ownership.py @@ -20,7 +20,7 @@ logger = logging.getLogger("activitysim") -class TransitPassOwnershipSettings(LogitComponentSettings): +class TransitPassOwnershipSettings(LogitComponentSettings, extra="forbid"): """ Settings for the `transit_pass_ownership` component. """ @@ -93,6 +93,7 @@ def transit_pass_ownership( trace_label=trace_label, trace_choice_name="transit_pass_ownership", estimator=estimator, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/transit_pass_subsidy.py b/activitysim/abm/models/transit_pass_subsidy.py index 7d1f320e2..e21be89f9 100644 --- a/activitysim/abm/models/transit_pass_subsidy.py +++ b/activitysim/abm/models/transit_pass_subsidy.py @@ -92,6 +92,7 @@ def transit_pass_subsidy( trace_label=trace_label, trace_choice_name="transit_pass_subsidy", estimator=estimator, + compute_settings=model_settings.compute_settings, ) if estimator: diff --git a/activitysim/abm/models/trip_departure_choice.py b/activitysim/abm/models/trip_departure_choice.py index 43f02df34..a0ddb363d 100644 --- a/activitysim/abm/models/trip_departure_choice.py +++ b/activitysim/abm/models/trip_departure_choice.py @@ -3,7 +3,6 @@ from __future__ import annotations import logging -from pathlib import Path from typing import Any import numpy as np @@ -20,7 +19,11 @@ tracing, workflow, ) -from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable +from activitysim.core.configuration.base import ( + ComputeSettings, + PreprocessorSettings, + PydanticCompute, +) from activitysim.core.skim_dataset import SkimDataset from activitysim.core.skim_dictionary import SkimDict from activitysim.core.util import reindex @@ -188,6 +191,7 @@ def choose_tour_leg_pattern( trace_label="trace_label", *, chunk_sizer: chunk.ChunkSizer, + compute_settings: ComputeSettings | None = None, ): alternatives = generate_alternatives(trip_segment, STOP_TIME_DURATION).sort_index() have_trace_targets = state.tracing.has_trace_targets(trip_segment) @@ -234,7 +238,14 @@ def choose_tour_leg_pattern( interaction_utilities, trace_eval_results, ) = interaction_simulate.eval_interaction_utilities( - state, spec, interaction_df, None, trace_label, trace_rows, estimator=None + state, + spec, + interaction_df, + None, + trace_label, + trace_rows, + estimator=None, + compute_settings=compute_settings, ) interaction_utilities = pd.concat( @@ -385,7 +396,14 @@ def choose_tour_leg_pattern( return choices -def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): +def apply_stage_two_model( + state: workflow.State, + omnibus_spec, + trips, + chunk_size, + trace_label: str, + compute_settings: ComputeSettings | None = None, +): if not trips.index.is_monotonic: trips = trips.sort_index() @@ -436,7 +454,7 @@ def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): trip_list = [] for ( - i, + _i, chooser_chunk, chunk_trace_label, chunk_sizer, @@ -444,7 +462,7 @@ def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): for is_outbound, trip_segment in chooser_chunk.groupby(OUTBOUND): direction = OUTBOUND if is_outbound else "inbound" spec = get_spec_for_segment(omnibus_spec, direction) - segment_trace_label = "{}_{}".format(direction, chunk_trace_label) + segment_trace_label = f"{direction}_{chunk_trace_label}" patterns = build_patterns(trip_segment, time_windows) @@ -455,6 +473,7 @@ def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): spec, trace_label=segment_trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) choices = pd.merge( @@ -482,7 +501,7 @@ def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): return trips["depart"].astype(int) -class TripDepartureChoiceSettings(PydanticReadable, extra="forbid"): +class TripDepartureChoiceSettings(PydanticCompute, extra="forbid"): """ Settings for the `trip_departure_choice` component. """ @@ -506,7 +525,6 @@ def trip_departure_choice( model_settings_file_name: str = "trip_departure_choice.yaml", trace_label: str = "trip_departure_choice", ) -> None: - if model_settings is None: model_settings = TripDepartureChoiceSettings.read_settings_file( state.filesystem, @@ -557,7 +575,12 @@ def trip_departure_choice( ) choices = apply_stage_two_model( - state, spec, trips_merged_df, state.settings.chunk_size, trace_label + state, + spec, + trips_merged_df, + state.settings.chunk_size, + trace_label, + compute_settings=model_settings.compute_settings, ) trips_df = trips diff --git a/activitysim/abm/models/trip_destination.py b/activitysim/abm/models/trip_destination.py index 6dea50980..2e07220c6 100644 --- a/activitysim/abm/models/trip_destination.py +++ b/activitysim/abm/models/trip_destination.py @@ -215,6 +215,9 @@ def _destination_sample( chunk_tag=chunk_tag, trace_label=trace_label, zone_layer=zone_layer, + compute_settings=model_settings.compute_settings.subcomponent_settings( + "sample" + ), ) return choices diff --git a/activitysim/abm/models/trip_mode_choice.py b/activitysim/abm/models/trip_mode_choice.py index b9091522c..e3e530fb7 100644 --- a/activitysim/abm/models/trip_mode_choice.py +++ b/activitysim/abm/models/trip_mode_choice.py @@ -277,6 +277,7 @@ def trip_mode_choice( trace_label=segment_trace_label, trace_choice_name="trip_mode_choice", estimator=estimator, + compute_settings=model_settings.compute_settings, ) if state.settings.trace_hh_id: diff --git a/activitysim/abm/models/util/cdap.py b/activitysim/abm/models/util/cdap.py index eab26bc7b..635d37a82 100644 --- a/activitysim/abm/models/util/cdap.py +++ b/activitysim/abm/models/util/cdap.py @@ -9,6 +9,7 @@ import pandas as pd from activitysim.core import chunk, logit, simulate, tracing, workflow +from activitysim.core.configuration.base import ComputeSettings logger = logging.getLogger(__name__) @@ -184,6 +185,7 @@ def individual_utilities( trace_label=None, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ Calculate CDAP utilities for all individuals. @@ -211,6 +213,7 @@ def individual_utilities( locals_d, trace_label=trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) # add columns from persons to facilitate building household interactions @@ -909,6 +912,7 @@ def household_activity_choices( add_joint_tour_utility=False, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ Calculate household utilities for each activity pattern alternative for households of hhsize @@ -957,7 +961,12 @@ def household_activity_choices( ) utils = simulate.eval_utilities( - state, spec, choosers, trace_label=trace_label, chunk_sizer=chunk_sizer + state, + spec, + choosers, + trace_label=trace_label, + chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) if len(utils.index) == 0: @@ -981,6 +990,7 @@ def household_activity_choices( choosers, trace_label=trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) # add joint util to util @@ -1183,6 +1193,7 @@ def _run_cdap( add_joint_tour_utility, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ) -> pd.DataFrame | tuple: """ Implements core run_cdap functionality on persons df (or chunked subset thereof) @@ -1213,6 +1224,7 @@ def _run_cdap( trace_hh_id, trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "indiv_utils", indiv_utils) @@ -1229,6 +1241,7 @@ def _run_cdap( trace_label=trace_label, add_joint_tour_utility=add_joint_tour_utility, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) hh_choices_list.append(choices) @@ -1315,6 +1328,7 @@ def run_cdap( trace_hh_id=None, trace_label=None, add_joint_tour_utility=False, + compute_settings: ComputeSettings | None = None, ): """ Choose individual activity patterns for persons. @@ -1378,6 +1392,7 @@ def run_cdap( chunk_trace_label, add_joint_tour_utility, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) else: cdap_results = _run_cdap( @@ -1392,6 +1407,7 @@ def run_cdap( chunk_trace_label, add_joint_tour_utility, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(cdap_results) diff --git a/activitysim/abm/models/util/mode.py b/activitysim/abm/models/util/mode.py index b6e6bbb23..49776684a 100644 --- a/activitysim/abm/models/util/mode.py +++ b/activitysim/abm/models/util/mode.py @@ -9,6 +9,7 @@ import pandas as pd from activitysim.core import config, expressions, simulate, workflow +from activitysim.core.configuration.base import ComputeSettings from activitysim.core.configuration.logit import TourModeComponentSettings from activitysim.core.estimation import Estimator @@ -34,6 +35,7 @@ def mode_choice_simulate( trace_choice_name, trace_column_names=None, estimator: Optional[Estimator] = None, + compute_settings: ComputeSettings | None = None, ): """ common method for both tour_mode_choice and trip_mode_choice @@ -51,6 +53,7 @@ def mode_choice_simulate( trace_label trace_choice_name estimator + compute_settings : ComputeSettings Returns ------- @@ -70,6 +73,7 @@ def mode_choice_simulate( trace_choice_name=trace_choice_name, estimator=estimator, trace_column_names=trace_column_names, + compute_settings=compute_settings, ) # for consistency, always return dataframe, whether or not logsums were requested @@ -170,6 +174,7 @@ def run_tour_mode_choice_simulate( trace_choice_name=trace_choice_name, trace_column_names=trace_column_names, estimator=estimator, + compute_settings=model_settings.compute_settings, ) return choices diff --git a/activitysim/abm/models/util/tour_destination.py b/activitysim/abm/models/util/tour_destination.py index fbc8113e2..22b0744da 100644 --- a/activitysim/abm/models/util/tour_destination.py +++ b/activitysim/abm/models/util/tour_destination.py @@ -123,6 +123,9 @@ def _destination_sample( chunk_tag=chunk_tag, trace_label=trace_label, zone_layer=zone_layer, + compute_settings=model_settings.compute_settings.subcomponent_settings( + "sample" + ), ) # if special person id is passed diff --git a/activitysim/abm/models/util/tour_frequency.py b/activitysim/abm/models/util/tour_frequency.py index eff354965..2709fa4b8 100644 --- a/activitysim/abm/models/util/tour_frequency.py +++ b/activitysim/abm/models/util/tour_frequency.py @@ -637,7 +637,7 @@ class JointTourFreqCompAlts(PydanticReadable): COMPOSITION: JointTourFreqCompContent -class JointTourFreqCompSettings(LogitComponentSettings): +class JointTourFreqCompSettings(LogitComponentSettings, extra="forbid"): """ Settings for joint tour frequency and composition. """ diff --git a/activitysim/abm/models/util/tour_od.py b/activitysim/abm/models/util/tour_od.py index 22ea4a310..89dc7fdc3 100644 --- a/activitysim/abm/models/util/tour_od.py +++ b/activitysim/abm/models/util/tour_od.py @@ -216,6 +216,9 @@ def _od_sample( chunk_tag=chunk_tag, trace_label=trace_label, zone_layer="taz", + compute_settings=model_settings.compute_settings.subcomponent_settings( + "sample" + ), ) return choices diff --git a/activitysim/abm/models/util/tour_scheduling.py b/activitysim/abm/models/util/tour_scheduling.py index f52d0db44..db003786f 100644 --- a/activitysim/abm/models/util/tour_scheduling.py +++ b/activitysim/abm/models/util/tour_scheduling.py @@ -16,11 +16,11 @@ def run_tour_scheduling( state: workflow.State, - model_name, - chooser_tours, - persons_merged, - tdd_alts, - tour_segment_col, + model_name: str, + chooser_tours: pd.DataFrame, + persons_merged: pd.DataFrame, + tdd_alts: pd.DataFrame, + tour_segment_col: str, ): trace_label = model_name model_settings_file_name = f"{model_name}.yaml" @@ -70,7 +70,7 @@ def run_tour_scheduling( # load segmented specs spec_segment_settings = model_settings.SPEC_SEGMENTS specs = {} - sharrow_skips = {} + compute_settings = {} for spec_segment_name, spec_settings in spec_segment_settings.items(): bundle_name = f"{model_name}_{spec_segment_name}" @@ -85,7 +85,9 @@ def run_tour_scheduling( specs[spec_segment_name] = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator ) - sharrow_skips[spec_segment_name] = spec_settings.sharrow_skip + compute_settings[ + spec_segment_name + ] = spec_settings.compute_settings.subcomponent_settings(spec_segment_name) if estimator: estimators[spec_segment_name] = estimator # add to local list @@ -100,7 +102,7 @@ def run_tour_scheduling( tour_segments[tour_segment_name] = {} tour_segments[tour_segment_name]["spec_segment_name"] = spec_segment_name tour_segments[tour_segment_name]["spec"] = specs[spec_segment_name] - tour_segments[tour_segment_name]["sharrow_skip"] = sharrow_skips[ + tour_segments[tour_segment_name]["compute_settings"] = compute_settings[ spec_segment_name ] tour_segments[tour_segment_name]["estimator"] = estimators.get( @@ -123,7 +125,6 @@ def run_tour_scheduling( spec_file_name = model_settings.SPEC model_spec = state.filesystem.read_model_spec(file_name=spec_file_name) - sharrow_skip = model_settings.sharrow_skip coefficients_df = state.filesystem.read_model_coefficients(model_settings) model_spec = simulate.eval_coefficients( state, model_spec, coefficients_df, estimator @@ -139,7 +140,7 @@ def run_tour_scheduling( tour_segments = { "spec": model_spec, "estimator": estimator, - "sharrow_skip": sharrow_skip, + "compute_settings": model_settings.compute_settings, } if estimators: diff --git a/activitysim/abm/models/util/vectorize_tour_scheduling.py b/activitysim/abm/models/util/vectorize_tour_scheduling.py index 17425d75b..6bdc907bd 100644 --- a/activitysim/abm/models/util/vectorize_tour_scheduling.py +++ b/activitysim/abm/models/util/vectorize_tour_scheduling.py @@ -14,7 +14,7 @@ from activitysim.core import chunk, config, expressions, los, simulate from activitysim.core import timetable as tt from activitysim.core import tracing, workflow -from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable +from activitysim.core.configuration.base import ComputeSettings, PreprocessorSettings from activitysim.core.configuration.logit import LogitComponentSettings from activitysim.core.interaction_sample_simulate import interaction_sample_simulate from activitysim.core.util import reindex @@ -224,6 +224,7 @@ def _compute_logsums( locals_d=locals_dict, chunk_size=0, trace_label=trace_label, + compute_settings=model_settings.compute_settings, ) return logsums @@ -705,8 +706,8 @@ def _schedule_tours( tour_owner_id_col, estimator, tour_trace_label, - sharrow_skip=False, *, + compute_settings: ComputeSettings | None = None, chunk_sizer: chunk.ChunkSizer, ): """ @@ -821,11 +822,6 @@ def _schedule_tours( if constants is not None: locals_d.update(constants) - if sharrow_skip: - locals_d["_sharrow_skip"] = True - else: - locals_d["_sharrow_skip"] = False - if not RUN_ALTS_PREPROCESSOR_BEFORE_MERGE: # Note: Clint was running alts_preprocessor here on tdd_interaction_dataset instead of on raw (unmerged) alts # and he was using logsum_tour_purpose as selector, although logically it should be the spec_segment @@ -861,6 +857,7 @@ def _schedule_tours( chunk_size=0, trace_label=tour_trace_label, estimator=estimator, + compute_settings=compute_settings, ) chunk_sizer.log_df(tour_trace_label, "choices", choices) @@ -891,7 +888,7 @@ def schedule_tours( chunk_size, tour_trace_label, tour_chunk_tag, - sharrow_skip=False, + compute_settings: ComputeSettings | None = None, ): """ chunking wrapper for _schedule_tours @@ -949,7 +946,7 @@ def schedule_tours( tour_owner_id_col, estimator, tour_trace_label=chunk_trace_label, - sharrow_skip=sharrow_skip, + compute_settings=compute_settings, chunk_sizer=chunk_sizer, ) @@ -1102,7 +1099,7 @@ def vectorize_tour_scheduling( chunk_size=chunk_size, tour_trace_label=segment_trace_label, tour_chunk_tag=segment_chunk_tag, - sharrow_skip=tour_segment_info.get("sharrow_skip"), + compute_settings=tour_segment_info.get("compute_settings"), ) choice_list.append(choices) @@ -1132,7 +1129,7 @@ def vectorize_tour_scheduling( chunk_size=chunk_size, tour_trace_label=tour_trace_label, tour_chunk_tag=tour_chunk_tag, - sharrow_skip=tour_segments.get("sharrow_skip"), + compute_settings=tour_segments.get("compute_settings"), ) choice_list.append(choices) @@ -1152,7 +1149,7 @@ def vectorize_subtour_scheduling( estimator, chunk_size=0, trace_label=None, - sharrow_skip=False, + compute_settings: ComputeSettings | None = None, ): """ Like vectorize_tour_scheduling but specifically for atwork subtours @@ -1251,7 +1248,7 @@ def vectorize_subtour_scheduling( state.settings.chunk_size, tour_trace_label, tour_chunk_tag, - sharrow_skip=sharrow_skip, + compute_settings=compute_settings, ) choice_list.append(choices) @@ -1306,7 +1303,7 @@ def vectorize_joint_tour_scheduling( estimator, chunk_size=0, trace_label=None, - sharrow_skip=False, + compute_settings: ComputeSettings | None = None, ): """ Like vectorize_tour_scheduling but specifically for joint tours @@ -1399,7 +1396,7 @@ def vectorize_joint_tour_scheduling( chunk_size, tour_trace_label, tour_chunk_tag, - sharrow_skip=sharrow_skip, + compute_settings=compute_settings, ) # - update timetables of all joint tour participants diff --git a/activitysim/abm/models/vehicle_allocation.py b/activitysim/abm/models/vehicle_allocation.py index a341493ff..9dcaf8c71 100644 --- a/activitysim/abm/models/vehicle_allocation.py +++ b/activitysim/abm/models/vehicle_allocation.py @@ -247,6 +247,7 @@ def vehicle_allocation( trace_label=trace_label, trace_choice_name="vehicle_allocation", estimator=estimator, + compute_settings=model_settings.compute_settings, ) # matching alt names to choices diff --git a/activitysim/abm/models/vehicle_type_choice.py b/activitysim/abm/models/vehicle_type_choice.py index 813652459..cfe6fc398 100644 --- a/activitysim/abm/models/vehicle_type_choice.py +++ b/activitysim/abm/models/vehicle_type_choice.py @@ -485,6 +485,7 @@ def iterate_vehicle_type_choice( trace_choice_name="vehicle_type", estimator=estimator, explicit_chunk_size=model_settings.explicit_chunk, + compute_settings=model_settings.compute_settings, ) # otherwise, "simple simulation" should suffice, with a model spec that enumerates @@ -500,6 +501,7 @@ def iterate_vehicle_type_choice( trace_label=trace_label, trace_choice_name="vehicle_type", estimator=estimator, + compute_settings=model_settings.compute_settings, ) else: raise NotImplementedError(simulation_type) @@ -554,7 +556,7 @@ def iterate_vehicle_type_choice( return all_choices, all_choosers -class VehicleTypeChoiceSettings(LogitComponentSettings): +class VehicleTypeChoiceSettings(LogitComponentSettings, extra="forbid"): """ Settings for the `vehicle_type_choice` component. """ diff --git a/activitysim/abm/models/work_from_home.py b/activitysim/abm/models/work_from_home.py index 234302b70..8b96dafa1 100755 --- a/activitysim/abm/models/work_from_home.py +++ b/activitysim/abm/models/work_from_home.py @@ -50,9 +50,6 @@ class WorkFromHomeSettings(LogitComponentSettings, extra="forbid"): WORK_FROM_HOME_TARGET_PERCENT_TOLERANCE: float = None """Setting to set work from home target percent tolerance.""" - sharrow_skip: bool = False - """Setting to skip sharrow.""" - DEST_CHOICE_COLUMN_NAME: str = "workplace_zone_id" """Column name in persons dataframe to specify the workplace zone id. """ @@ -140,9 +137,6 @@ def work_from_home( state, model_spec, coefficients_df, estimator ) - if model_settings.sharrow_skip: - constants["disable_sharrow"] = True - choices = simulate.simple_simulate( state, choosers=choosers, @@ -152,6 +146,7 @@ def work_from_home( trace_label=trace_label, trace_choice_name="work_from_home", estimator=estimator, + compute_settings=model_settings.compute_settings, ) if iterations_target_percent is not None: diff --git a/activitysim/core/configuration/base.py b/activitysim/core/configuration/base.py index 754865dc1..556dd9916 100644 --- a/activitysim/core/configuration/base.py +++ b/activitysim/core/configuration/base.py @@ -1,8 +1,10 @@ from __future__ import annotations +from contextlib import contextmanager from pathlib import Path from typing import Any, Literal, TypeVar, Union # noqa: F401 +import pandas as pd from pydantic import BaseModel as PydanticBase from activitysim.core import configuration @@ -126,3 +128,115 @@ class PreprocessorSettings(PydanticBase): number of merged tables as the memory requirements for the preprocessor will increase with each table. """ + + +class ComputeSettings(PydanticBase): + """ + Sharrow settings for a component. + """ + + sharrow_skip: bool | dict[str, bool] = False + """Skip sharrow when evaluating this component. + + This overrides the global sharrow setting, and is useful if you want to skip + sharrow for particular components, either because their specifications are + not compatible with sharrow or if the sharrow performance is known to be + poor on this component. + + When a component has multiple subcomponents, the `sharrow_skip` setting can be + a dictionary that maps the names of the subcomponents to boolean values. + For example, to skip sharrow for an OUTBOUND and OUTBOUND_COND subcomponent + but not the INBOUND subcomponent, use the following setting: + + ```yaml + sharrow_skip: + OUTBOUND: true + INBOUND: false + OUTBOUND_COND: true + ``` + + Alternatively, even for components with multiple subcomponents, the `sharrow_skip` + value can be a single boolean true or false, which will be used for all + subcomponents. + + """ + + fastmath: bool = True + """Use fastmath when evaluating this component with sharrow. + + The fastmath option can be used to speed up the evaluation of expressions in + this component's spec files, but it does so by making some simplifying + assumptions about the math, e.g. that neither inputs nor outputs of any + computations are NaN or Inf. This can lead to errors when the assumptions + are violated. If running in sharrow test mode generates errors, try turning + this setting off. + """ + + use_bottleneck: bool | None = None + """Use the bottleneck library with pandas.eval. + + Set to True or False to force the use of bottleneck or not. If set to None, + the current pandas option setting of `compute.use_bottleneck` will be used. + + See https://pandas.pydata.org/docs/reference/api/pandas.set_option.html + for more information.""" + + use_numexpr: bool | None = None + """Use the numexpr library with pandas.eval. + + Set to True or False to force the use of numexpr or not. If set to None, + the current pandas option setting of `compute.use_numexpr` will be used. + + See https://pandas.pydata.org/docs/reference/api/pandas.set_option.html + for more information. + """ + + use_numba: bool | None = None + """Use the numba library with pandas.eval. + + Set to True or False to force the use of numba or not. If set to None, + the current pandas option setting of `compute.use_numba` will be used. + + See https://pandas.pydata.org/docs/reference/api/pandas.set_option.html + for more information. + """ + + def should_skip(self, subcomponent: str) -> bool: + """Check if sharrow should be skipped for a particular subcomponent.""" + if isinstance(self.sharrow_skip, dict): + return self.sharrow_skip.get(subcomponent, False) + else: + return bool(self.sharrow_skip) + + @contextmanager + def pandas_option_context(self): + """Context manager to set pandas options for compute settings.""" + args = () + if self.use_bottleneck is not None: + args += ("compute.use_bottleneck", self.use_bottleneck) + if self.use_numexpr is not None: + args += ("compute.use_numexpr", self.use_numexpr) + if self.use_numba is not None: + args += ("compute.use_numba", self.use_numba) + if args: + with pd.option_context(*args): + yield + else: + yield + + def subcomponent_settings(self, subcomponent: str) -> ComputeSettings: + """Get the sharrow settings for a particular subcomponent.""" + return ComputeSettings( + sharrow_skip=self.should_skip(subcomponent), + fastmath=self.fastmath, + use_bottleneck=self.use_bottleneck, + use_numexpr=self.use_numexpr, + use_numba=self.use_numba, + ) + + +class PydanticCompute(PydanticReadable): + """Base class for component settings that include optional sharrow controls.""" + + compute_settings: ComputeSettings = ComputeSettings() + """Sharrow settings for this component.""" diff --git a/activitysim/core/configuration/logit.py b/activitysim/core/configuration/logit.py index a7f507aeb..22e14e49f 100644 --- a/activitysim/core/configuration/logit.py +++ b/activitysim/core/configuration/logit.py @@ -1,12 +1,14 @@ from __future__ import annotations +import warnings from pathlib import Path from typing import Any, Literal +import pydantic from pydantic import BaseModel as PydanticBase -from pydantic import validator +from pydantic import model_validator, validator -from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable +from activitysim.core.configuration.base import PreprocessorSettings, PydanticCompute class LogitNestSpec(PydanticBase): @@ -43,7 +45,7 @@ def prefer_float_to_str(cls, coefficient_value): return coefficient_value -class BaseLogitComponentSettings(PydanticReadable): +class BaseLogitComponentSettings(PydanticCompute): """ Base configuration class for components that are logit models. @@ -75,8 +77,35 @@ class BaseLogitComponentSettings(PydanticReadable): CONSTANTS: dict[str, Any] = {} """Named constants usable in the utility expressions.""" - sharrow_skip: bool = False - """Skip sharrow when evaluating this component.""" + # sharrow_skip is deprecated in factor of compute_settings.sharrow_skip + @model_validator(mode="before") + @classmethod + def update_sharrow_skip(cls, data: Any) -> Any: + if isinstance(data, dict): + if "sharrow_skip" in data: + if "compute_settings" not in data: + # move to new format + data["compute_settings"] = {"sharrow_skip": data["sharrow_skip"]} + del data["sharrow_skip"] + warnings.warn( + "sharrow_skip is deprecated in favor of compute_settings.sharrow_skip", + DeprecationWarning, + ) + elif ( + isinstance(data["compute_settings"], dict) + and "sharrow_skip" not in data["compute_settings"] + ): + data["compute_settings"]["sharrow_skip"] = data["sharrow_skip"] + del data["sharrow_skip"] + warnings.warn( + "sharrow_skip is deprecated in favor of compute_settings.skip", + DeprecationWarning, + ) + elif "sharrow_skip" in data["compute_settings"]: + raise ValueError( + "sharrow_skip and compute_settings.sharrow_skip cannot both be defined" + ) + return data class LogitComponentSettings(BaseLogitComponentSettings): @@ -134,7 +163,7 @@ def nests_are_for_nl(cls, nests, values): return nests -class TemplatedLogitComponentSettings(LogitComponentSettings): +class TemplatedLogitComponentSettings(LogitComponentSettings, extra="forbid"): """ Base configuration for segmented logit models with a coefficient template. """ @@ -203,7 +232,7 @@ class TourLocationComponentSettings(LocationComponentSettings, extra="forbid"): The number of alternatives to sample for estimation mode. If zero, then all alternatives are used. Truth alternative will be included in the sample. - Larch does not yet support sampling alternatives for estimation, + Larch does not yet support sampling alternatives for estimation, but this setting is still helpful for estimation mode runtime. """ diff --git a/activitysim/core/flow.py b/activitysim/core/flow.py index 92429e7d4..39f706a6c 100644 --- a/activitysim/core/flow.py +++ b/activitysim/core/flow.py @@ -16,6 +16,7 @@ import activitysim.core.skim_dataset # noqa: F401 from activitysim import __version__ from activitysim.core import tracing, workflow +from activitysim.core.configuration.base import ComputeSettings from activitysim.core.simulate_consts import SPEC_EXPRESSION_NAME, SPEC_LABEL_NAME from activitysim.core.timetable import ( sharrow_tt_adjacent_window_after, @@ -142,6 +143,7 @@ def get_flow( choosers=None, interacts=None, zone_layer=None, + compute_settings: ComputeSettings | None = None, ): extra_vars = only_simple(local_d) orig_col_name = local_d.get("orig_col_name", None) @@ -184,6 +186,7 @@ def get_flow( zone_layer=zone_layer, aux_vars=aux_vars, primary_origin_col_name=primary_origin_col_name, + compute_settings=compute_settings, ) flow.tree.aux_vars = aux_vars return flow @@ -465,6 +468,7 @@ def new_flow( zone_layer=None, aux_vars=None, primary_origin_col_name=None, + compute_settings: ComputeSettings | None = None, ): """ Setup a new sharrow flow. @@ -516,12 +520,15 @@ def new_flow( aux_vars : Mapping Extra values that are available to expressions and which are written only by reference into compiled code (and thus can be changed later). + compute_settings : ComputeSettings, optional + Settings for the sharrow flow. Returns ------- sharrow.Flow """ - + if compute_settings is None: + compute_settings = ComputeSettings() with logtime(f"setting up flow {trace_label}"): if choosers is None: chooser_cols = [] @@ -700,6 +707,7 @@ def _apply_filter(_dataset, renames: list): extra_hash_data=extra_hash_data, hashing_level=0, boundscheck=False, + fastmath=compute_settings.fastmath, ) @@ -750,6 +758,7 @@ def apply_flow( required=False, interacts=None, zone_layer=None, + compute_settings: ComputeSettings | None = None, ): """ Apply a sharrow flow. @@ -779,6 +788,8 @@ def apply_flow( Specify which zone layer of the skims is to be used. You cannot use the 'maz' zone layer in a one-zone model, but you can use the 'taz' layer in a two- or three-zone model (e.g. for destination pre-sampling). + compute_settings : ComputeSettings, optional + Settings for the sharrow flow, including for skipping and fastmath. Returns ------- @@ -807,6 +818,7 @@ def apply_flow( choosers=choosers, interacts=interacts, zone_layer=zone_layer, + compute_settings=compute_settings, ) except ValueError as err: if "unable to rewrite" in str(err): diff --git a/activitysim/core/interaction_sample.py b/activitysim/core/interaction_sample.py index 80dc6fb00..91ea04f84 100644 --- a/activitysim/core/interaction_sample.py +++ b/activitysim/core/interaction_sample.py @@ -15,6 +15,7 @@ tracing, workflow, ) +from activitysim.core.configuration.base import ComputeSettings from activitysim.core.skim_dataset import DatasetWrapper from activitysim.core.skim_dictionary import SkimWrapper @@ -132,6 +133,7 @@ def _interaction_sample( trace_label=None, zone_layer=None, chunk_sizer=None, + compute_settings: ComputeSettings | None = None, ): """ Run a MNL simulation in the situation in which alternatives must @@ -178,6 +180,9 @@ def _interaction_sample( 'maz' zone layer in a one-zone model, but you can use the 'taz' layer in a two- or three-zone model (e.g. for destination pre-sampling). + compute_settings : ComputeSettings, optional + Settings to use if compiling with sharrow + Returns ------- choices_df : pandas.DataFrame @@ -223,6 +228,10 @@ def _interaction_sample( chooser_index_id = interaction_simulate.ALT_CHOOSER_ID if log_alt_losers else None sharrow_enabled = state.settings.sharrow + if compute_settings is None: + compute_settings = ComputeSettings() + if compute_settings.sharrow_skip: + sharrow_enabled = False # - cross join choosers and alternatives (cartesian product) # for every chooser, there will be a row for each alternative @@ -246,6 +255,7 @@ def _interaction_sample( log_alt_losers=log_alt_losers, extra_data=alternatives, zone_layer=zone_layer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "interaction_utilities", interaction_utilities) if sharrow_enabled == "test" or True: @@ -302,6 +312,7 @@ def _interaction_sample( estimator=None, log_alt_losers=log_alt_losers, zone_layer=zone_layer, + compute_settings=ComputeSettings(sharrow_skip=True), ) chunk_sizer.log_df(trace_label, "interaction_utilities", interaction_utilities) @@ -521,6 +532,7 @@ def interaction_sample( chunk_tag: str | None = None, trace_label: str | None = None, zone_layer: str | None = None, + compute_settings: ComputeSettings | None = None, ): """ Run a simulation in the situation in which alternatives must @@ -616,6 +628,7 @@ def interaction_sample( trace_label=chunk_trace_label, zone_layer=zone_layer, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) if choices.shape[0] > 0: diff --git a/activitysim/core/interaction_sample_simulate.py b/activitysim/core/interaction_sample_simulate.py index b42eb2683..5dcda88b5 100644 --- a/activitysim/core/interaction_sample_simulate.py +++ b/activitysim/core/interaction_sample_simulate.py @@ -8,6 +8,7 @@ import pandas as pd from activitysim.core import chunk, interaction_simulate, logit, tracing, workflow +from activitysim.core.configuration.base import ComputeSettings from activitysim.core.simulate import set_skim_wrapper_targets logger = logging.getLogger(__name__) @@ -31,6 +32,7 @@ def _interaction_sample_simulate( skip_choice=False, *, chunk_sizer: chunk.ChunkSizer, + compute_settings: ComputeSettings | None = None, ): """ Run a MNL simulation in the situation in which alternatives must @@ -181,6 +183,7 @@ def _interaction_sample_simulate( trace_rows, estimator=estimator, log_alt_losers=log_alt_losers, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "interaction_utilities", interaction_utilities) @@ -375,6 +378,8 @@ def interaction_sample_simulate( trace_choice_name=None, estimator=None, skip_choice=False, + *, + compute_settings: ComputeSettings | None = None, ): """ Run a simulation in the situation in which alternatives must @@ -464,6 +469,7 @@ def interaction_sample_simulate( estimator, skip_choice, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(choices) diff --git a/activitysim/core/interaction_simulate.py b/activitysim/core/interaction_simulate.py index 3b9ba5d5f..12433056d 100644 --- a/activitysim/core/interaction_simulate.py +++ b/activitysim/core/interaction_simulate.py @@ -13,6 +13,7 @@ import pandas as pd from . import chunk, config, logit, simulate, tracing, workflow +from .configuration.base import ComputeSettings logger = logging.getLogger(__name__) @@ -32,6 +33,7 @@ def eval_interaction_utilities( log_alt_losers=False, extra_data=None, zone_layer=None, + compute_settings: ComputeSettings | None = None, ): """ Compute the utilities for a single-alternative spec evaluated in the context of df @@ -82,13 +84,11 @@ def eval_interaction_utilities( logger.info("Running eval_interaction_utilities on %s rows" % df.shape[0]) sharrow_enabled = state.settings.sharrow - - if locals_d is not None and locals_d.get("_sharrow_skip", False): + if compute_settings is None: + compute_settings = ComputeSettings() + if compute_settings.sharrow_skip: sharrow_enabled = False - # if trace_label.startswith("trip_destination"): - # sharrow_enabled = False - logger.info(f"{trace_label} sharrow_enabled is {sharrow_enabled}") trace_eval_results = None @@ -184,6 +184,7 @@ def replace_in_index_level(mi, level, *repls): trace_label, interacts=extra_data, zone_layer=zone_layer, + compute_settings=compute_settings, ) if sh_util is not None: chunk_sizer.log_df(trace_label, "sh_util", sh_util) @@ -261,107 +262,112 @@ def to_series(x): exprs = spec.index labels = spec.index - for expr, label, coefficient in zip(exprs, labels, spec.iloc[:, 0]): - try: - # - allow temps of form _od_DIST@od_skim['DIST'] - if expr.startswith("_"): - target = expr[: expr.index("@")] - rhs = expr[expr.index("@") + 1 :] - v = to_series(eval(rhs, globals(), locals_d)) - - # update locals to allows us to ref previously assigned targets - locals_d[target] = v - chunk_sizer.log_df( - trace_label, target, v - ) # track temps stored in locals - - if trace_eval_results is not None: - trace_eval_results[expr] = v[trace_rows] + with compute_settings.pandas_option_context(): + for expr, label, coefficient in zip(exprs, labels, spec.iloc[:, 0]): + try: + # - allow temps of form _od_DIST@od_skim['DIST'] + if expr.startswith("_"): + target = expr[: expr.index("@")] + rhs = expr[expr.index("@") + 1 :] + v = to_series(eval(rhs, globals(), locals_d)) - # don't add temps to utility sums - # they have a non-zero dummy coefficient to avoid being removed from spec as NOPs - continue + # update locals to allows us to ref previously assigned targets + locals_d[target] = v + chunk_sizer.log_df( + trace_label, target, v + ) # track temps stored in locals - if expr.startswith("@"): - v = to_series(eval(expr[1:], globals(), locals_d)) - else: - v = df.eval(expr, resolvers=[locals_d]) + if trace_eval_results is not None: + trace_eval_results[expr] = v[trace_rows] - if check_for_variability and v.std() == 0: - logger.info( - "%s: no variability (%s) in: %s" - % (trace_label, v.iloc[0], expr) - ) - no_variability += 1 - - # FIXME - how likely is this to happen? Not sure it is really a problem? - if ( - check_for_variability - and np.count_nonzero(v.isnull().values) > 0 - ): - logger.info("%s: missing values in: %s" % (trace_label, expr)) - has_missing_vals += 1 - - if estimator: - # in case we modified expression_values_df index - expression_values_df.insert( - loc=len(expression_values_df.columns), - column=label, - value=v.values if isinstance(v, pd.Series) else v, - ) + # don't add temps to utility sums + # they have a non-zero dummy coefficient to avoid being removed from spec as NOPs + continue - utility = (v * coefficient).astype("float") + if expr.startswith("@"): + v = to_series(eval(expr[1:], globals(), locals_d)) + else: + v = df.eval(expr, resolvers=[locals_d]) - if log_alt_losers: - assert ALT_CHOOSER_ID in df - max_utils_by_chooser = utility.groupby(df[ALT_CHOOSER_ID]).max() + if check_for_variability and v.std() == 0: + logger.info( + "%s: no variability (%s) in: %s" + % (trace_label, v.iloc[0], expr) + ) + no_variability += 1 - if (max_utils_by_chooser < simulate.ALT_LOSER_UTIL).any(): - losers = max_utils_by_chooser[ - max_utils_by_chooser < simulate.ALT_LOSER_UTIL - ] - logger.warning( - f"{trace_label} - {len(losers)} choosers of {len(max_utils_by_chooser)} " - f"with prohibitive utilities for all alternatives for expression: {expr}" + # FIXME - how likely is this to happen? Not sure it is really a problem? + if ( + check_for_variability + and np.count_nonzero(v.isnull().values) > 0 + ): + logger.info( + "%s: missing values in: %s" % (trace_label, expr) + ) + has_missing_vals += 1 + + if estimator: + # in case we modified expression_values_df index + expression_values_df.insert( + loc=len(expression_values_df.columns), + column=label, + value=v.values if isinstance(v, pd.Series) else v, ) - # loser_df = df[df[ALT_CHOOSER_ID].isin(losers.index)] - # print(f"\nloser_df\n{loser_df}\n") - # print(f"\nloser_max_utils_by_chooser\n{losers}\n") - # bug + utility = (v * coefficient).astype("float") - del max_utils_by_chooser + if log_alt_losers: + assert ALT_CHOOSER_ID in df + max_utils_by_chooser = utility.groupby( + df[ALT_CHOOSER_ID] + ).max() - utilities.utility.values[:] += utility + if (max_utils_by_chooser < simulate.ALT_LOSER_UTIL).any(): + losers = max_utils_by_chooser[ + max_utils_by_chooser < simulate.ALT_LOSER_UTIL + ] + logger.warning( + f"{trace_label} - {len(losers)} choosers of {len(max_utils_by_chooser)} " + f"with prohibitive utilities for all alternatives for expression: {expr}" + ) - if trace_eval_results is not None: - # expressions should have been uniquified when spec was read - # (though we could do it here if need be...) - # expr = assign.uniquify_key(trace_eval_results, expr, template="{} # ({})") - assert expr not in trace_eval_results + # loser_df = df[df[ALT_CHOOSER_ID].isin(losers.index)] + # print(f"\nloser_df\n{loser_df}\n") + # print(f"\nloser_max_utils_by_chooser\n{losers}\n") + # bug - trace_eval_results[expr] = v[trace_rows] - k = "partial utility (coefficient = %s) for %s" % ( - coefficient, - expr, - ) - trace_eval_results[k] = v[trace_rows] * coefficient + del max_utils_by_chooser - del v - # chunk_sizer.log_df(trace_label, 'v', None) + utilities.utility.values[:] += utility - except Exception as err: - logger.exception( - f"{trace_label} - {type(err).__name__} ({str(err)}) evaluating: {str(expr)}" - ) - if isinstance( - err, AssertionError - ) and "od pairs not in skim" in str(err): - logger.warning( - f"recode_pipeline_columns is set to {state.settings.recode_pipeline_columns}, " - f"you may want to check this" + if trace_eval_results is not None: + # expressions should have been uniquified when spec was read + # (though we could do it here if need be...) + # expr = assign.uniquify_key(trace_eval_results, expr, template="{} # ({})") + assert expr not in trace_eval_results + + trace_eval_results[expr] = v[trace_rows] + k = "partial utility (coefficient = %s) for %s" % ( + coefficient, + expr, + ) + trace_eval_results[k] = v[trace_rows] * coefficient + + del v + # chunk_sizer.log_df(trace_label, 'v', None) + + except Exception as err: + logger.exception( + f"{trace_label} - {type(err).__name__} ({str(err)}) evaluating: {str(expr)}" ) - raise err + if isinstance( + err, AssertionError + ) and "od pairs not in skim" in str(err): + logger.warning( + f"recode_pipeline_columns is set to {state.settings.recode_pipeline_columns}, " + f"you may want to check this" + ) + raise err if estimator: estimator.log( @@ -568,6 +574,10 @@ def to_series(x): re_sh_flow_load = sh_flow.load(sh_tree, dtype=np.float32) re_sh_flow_load_ = re_sh_flow_load[re_trace] + use_bottleneck = pd.get_option("compute.use_bottleneck") + use_numexpr = pd.get_option("compute.use_numexpr") + use_numba = pd.get_option("compute.use_numba") + look_for_problems_here = np.where( ~np.isclose( re_sh_flow_load_[ @@ -604,6 +614,7 @@ def _interaction_simulate( log_alt_losers=False, estimator=None, chunk_sizer=None, + compute_settings: ComputeSettings | None = None, ): """ Run a MNL simulation in the situation in which alternatives must @@ -691,11 +702,13 @@ def _interaction_simulate( alt_index_id = estimator.get_alt_id() if estimator else None chooser_index_id = ALT_CHOOSER_ID if log_alt_losers else None - sharrow_enabled = state.settings.sharrow - interaction_utilities = None - - if locals_d is not None and locals_d.get("_sharrow_skip", False): + if compute_settings is None: + compute_settings = ComputeSettings() + if compute_settings.sharrow_skip: sharrow_enabled = False + else: + sharrow_enabled = state.settings.sharrow + interaction_utilities = None if ( sharrow_enabled @@ -719,6 +732,7 @@ def _interaction_simulate( estimator=estimator, log_alt_losers=log_alt_losers, extra_data=alternatives, + compute_settings=compute_settings, ) # set this index here as this is how later code extracts the chosen alt id's @@ -783,6 +797,7 @@ def _interaction_simulate( trace_rows, estimator=estimator, log_alt_losers=log_alt_losers, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "interaction_utilities", interaction_utilities) # mem.trace_memory_info(f"{trace_label}.init interaction_utilities", force_garbage_collect=True) @@ -890,6 +905,7 @@ def interaction_simulate( trace_choice_name=None, estimator=None, explicit_chunk_size=0, + compute_settings: ComputeSettings | None = None, ): """ Run a simulation in the situation in which alternatives must @@ -967,6 +983,7 @@ def interaction_simulate( log_alt_losers=log_alt_losers, estimator=estimator, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(choices) diff --git a/activitysim/core/pathbuilder.py b/activitysim/core/pathbuilder.py index 266a99832..31393ceea 100644 --- a/activitysim/core/pathbuilder.py +++ b/activitysim/core/pathbuilder.py @@ -20,6 +20,7 @@ tracing, workflow, ) +from activitysim.core.configuration.base import ComputeSettings from activitysim.core.pathbuilder_cache import memo from activitysim.core.util import reindex @@ -56,7 +57,7 @@ def compute_utilities( f"{trace_label} Running compute_utilities with {choosers.shape[0]} choosers" ) - locals_dict = {"np": np, "los": network_los, "disable_sharrow": True} + locals_dict = {"np": np, "los": network_los} locals_dict.update(model_constants) # we don't grok coefficients, but allow them to use constants in spec alt columns @@ -88,6 +89,7 @@ def compute_utilities( trace_label=trace_label, trace_column_names=trace_column_names, chunk_sizer=chunk_sizer, + compute_settings=ComputeSettings(sharrow_skip=True), ) return utilities diff --git a/activitysim/core/simulate.py b/activitysim/core/simulate.py index 133a8b1dc..203c25668 100644 --- a/activitysim/core/simulate.py +++ b/activitysim/core/simulate.py @@ -25,7 +25,7 @@ util, workflow, ) -from activitysim.core.configuration.base import PydanticBase +from activitysim.core.configuration.base import ComputeSettings, PydanticBase from activitysim.core.configuration.logit import ( BaseLogitComponentSettings, LogitNestSpec, @@ -57,7 +57,7 @@ def random_rows(state: workflow.State, df, n): return df -def uniquify_spec_index(spec): +def uniquify_spec_index(spec: pd.DataFrame): # uniquify spec index inplace # ensure uniqueness of spec index by appending comment with dupe count # this allows us to use pandas dot to compute_utilities @@ -532,6 +532,7 @@ def eval_utilities( spec_sh=None, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ Evaluate a utility function as defined in a spec file. @@ -571,6 +572,8 @@ def eval_utilities( This is meant to give the same result, but allows for some optimizations or preprocessing outside the sharrow framework (e.g. to run the Python based transit virtual path builder and cache relevant values). + compute_settings : ComputeSettings, optional + Settings for sharrow. If not given, the default settings are used. Returns ------- @@ -592,7 +595,9 @@ def eval_utilities( if spec_sh is None: spec_sh = spec - if locals_d is not None and "disable_sharrow" in locals_d: + if compute_settings is None: + compute_settings = ComputeSettings() + if compute_settings.sharrow_skip: sharrow_enabled = False if sharrow_enabled: @@ -610,6 +615,7 @@ def eval_utilities( trace_label, sharrow_enabled == "require", zone_layer=zone_layer, + compute_settings=compute_settings, ) utilities = sh_util timelogger.mark("sharrow flow", True, logger, trace_label) @@ -641,42 +647,43 @@ def eval_utilities( chunk_sizer.log_df(trace_label, "expression_values", expression_values) i = 0 - for expr, coefficients in zip(exprs, spec.values): - try: - with warnings.catch_warnings(record=True) as w: - # Cause all warnings to always be triggered. - warnings.simplefilter("always") - if expr.startswith("@"): - expression_value = eval(expr[1:], globals_dict, locals_dict) - else: - expression_value = choosers.eval(expr) - - if len(w) > 0: - for wrn in w: - logger.warning( - f"{trace_label} - {type(wrn).__name__} ({wrn.message}) evaluating: {str(expr)}" - ) - - except Exception as err: - logger.exception( - f"{trace_label} - {type(err).__name__} ({str(err)}) evaluating: {str(expr)}" - ) - raise err - - if log_alt_losers: - # utils for each alt for this expression - # FIXME if we always did tis, we cold uem these and skip np.dot below - utils = np.outer(expression_value, coefficients) - losers = np.amax(utils, axis=1) < ALT_LOSER_UTIL - - if losers.any(): - logger.warning( - f"{trace_label} - {sum(losers)} choosers of {len(losers)} " - f"with prohibitive utilities for all alternatives for expression: {expr}" + with compute_settings.pandas_option_context(): + for expr, coefficients in zip(exprs, spec.values): + try: + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + if expr.startswith("@"): + expression_value = eval(expr[1:], globals_dict, locals_dict) + else: + expression_value = choosers.eval(expr) + + if len(w) > 0: + for wrn in w: + logger.warning( + f"{trace_label} - {type(wrn).__name__} ({wrn.message}) evaluating: {str(expr)}" + ) + + except Exception as err: + logger.exception( + f"{trace_label} - {type(err).__name__} ({str(err)}) evaluating: {str(expr)}" ) + raise err + + if log_alt_losers: + # utils for each alt for this expression + # FIXME if we always did tis, we cold uem these and skip np.dot below + utils = np.outer(expression_value, coefficients) + losers = np.amax(utils, axis=1) < ALT_LOSER_UTIL + + if losers.any(): + logger.warning( + f"{trace_label} - {sum(losers)} choosers of {len(losers)} " + f"with prohibitive utilities for all alternatives for expression: {expr}" + ) - expression_values[i] = expression_value - i += 1 + expression_values[i] = expression_value + i += 1 chunk_sizer.log_df(trace_label, "expression_values", expression_values) @@ -1157,6 +1164,7 @@ def eval_mnl( trace_column_names=None, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ Run a simulation for when the model spec does not involve alternative @@ -1220,6 +1228,7 @@ def eval_mnl( estimator=estimator, trace_column_names=trace_column_names, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "utilities", utilities) @@ -1278,6 +1287,7 @@ def eval_nl( trace_column_names=None, *, chunk_sizer: chunk.ChunkSizer, + compute_settings: ComputeSettings | None = None, ): """ Run a nested-logit simulation for when the model spec does not involve alternative @@ -1308,6 +1318,8 @@ def eval_nl( This is the column label to be used in trace file csv dump of choices trace_column_names: str or list of str chooser columns to include when tracing expression_values + fastmath : bool, default True + Use fastmath for sharrow compiled code. Returns ------- @@ -1339,6 +1351,7 @@ def eval_nl( trace_column_names=trace_column_names, spec_sh=spec_sh, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "raw_utilities", raw_utilities) @@ -1465,6 +1478,7 @@ def _simple_simulate( trace_column_names=None, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ Run an MNL or NL simulation for when the model spec does not involve alternative @@ -1528,6 +1542,7 @@ def _simple_simulate( trace_choice_name=trace_choice_name, trace_column_names=trace_column_names, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) else: choices = eval_nl( @@ -1544,6 +1559,7 @@ def _simple_simulate( trace_choice_name=trace_choice_name, trace_column_names=trace_column_names, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) return choices @@ -1582,6 +1598,7 @@ def simple_simulate( trace_label=None, trace_choice_name=None, trace_column_names=None, + compute_settings: ComputeSettings | None = None, ): """ Run an MNL or NL simulation for when the model spec does not involve alternative @@ -1616,6 +1633,7 @@ def simple_simulate( trace_choice_name=trace_choice_name, trace_column_names=trace_column_names, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(choices) @@ -1643,6 +1661,7 @@ def simple_simulate_by_chunk_id( estimator=None, trace_label=None, trace_choice_name=None, + compute_settings: ComputeSettings | None = None, ): """ chunk_by_chunk_id wrapper for simple_simulate @@ -1669,6 +1688,7 @@ def simple_simulate_by_chunk_id( trace_label=chunk_trace_label, trace_choice_name=trace_choice_name, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(choices) @@ -1682,7 +1702,14 @@ def simple_simulate_by_chunk_id( def eval_mnl_logsums( - state: workflow.State, choosers, spec, locals_d, trace_label=None, *, chunk_sizer + state: workflow.State, + choosers, + spec, + locals_d, + trace_label=None, + *, + chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ like eval_nl except return logsums instead of making choices @@ -1712,6 +1739,7 @@ def eval_mnl_logsums( trace_label, have_trace_targets, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "utilities", utilities) @@ -1825,6 +1853,7 @@ def eval_nl_logsums( trace_label=None, *, chunk_sizer: chunk.ChunkSizer, + compute_settings: ComputeSettings | None = None, ): """ like eval_nl except return logsums instead of making choices @@ -1855,6 +1884,7 @@ def eval_nl_logsums( have_trace_targets=have_trace_targets, spec_sh=spec_sh, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) chunk_sizer.log_df(trace_label, "raw_utilities", raw_utilities) @@ -1905,6 +1935,7 @@ def _simple_simulate_logsums( trace_label=None, *, chunk_sizer, + compute_settings: ComputeSettings | None = None, ): """ like simple_simulate except return logsums instead of making choices @@ -1926,6 +1957,7 @@ def _simple_simulate_logsums( locals_d, trace_label=trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) else: logsums = eval_nl_logsums( @@ -1936,6 +1968,7 @@ def _simple_simulate_logsums( locals_d, trace_label=trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) return logsums @@ -1952,6 +1985,7 @@ def simple_simulate_logsums( chunk_size=0, trace_label=None, chunk_tag=None, + compute_settings: ComputeSettings | None = None, ): """ like simple_simulate except return logsums instead of making choices @@ -1984,6 +2018,7 @@ def simple_simulate_logsums( locals_d, chunk_trace_label, chunk_sizer=chunk_sizer, + compute_settings=compute_settings, ) result_list.append(logsums) diff --git a/activitysim/examples/prototype_arc/configs/auto_ownership.yaml b/activitysim/examples/prototype_arc/configs/auto_ownership.yaml index 9e85945f9..09540f154 100644 --- a/activitysim/examples/prototype_arc/configs/auto_ownership.yaml +++ b/activitysim/examples/prototype_arc/configs/auto_ownership.yaml @@ -17,8 +17,3 @@ NESTS: SPEC: auto_ownership.csv COEFFICIENTS: auto_ownership_coeffs.csv - -LOGSUM_CHOOSER_COLUMNS: - - num_drivers - - num_workers - \ No newline at end of file diff --git a/activitysim/examples/prototype_mtc_extended/configs/school_escorting.yaml b/activitysim/examples/prototype_mtc_extended/configs/school_escorting.yaml index 31527cb1d..ff04d214e 100644 --- a/activitysim/examples/prototype_mtc_extended/configs/school_escorting.yaml +++ b/activitysim/examples/prototype_mtc_extended/configs/school_escorting.yaml @@ -1,10 +1,8 @@ -# The school escort model as written in this prototype is not -# compatible with sharrow, so "sharrow_skip" must be activated here. -# Currently the spec file has a few lines that evaluate differently in -# the sharrow implementation, resulting in failure that are flagged by -# the `test` mode. Once these are fixed (and string comparisons are -# minimized for performance) this `sharrow_skip` setting can be removed. -sharrow_skip: true +# Some data values in the spec file will refer to missing values stored +# as NaN in the data. This requires the `sharrow_fastmath` setting to +# be set to `false` to avoid errors in the sharrow implementation. +compute_settings: + fastmath: false OUTBOUND_SPEC: school_escorting_outbound.csv OUTBOUND_COEFFICIENTS: school_escorting_coefficients_outbound.csv diff --git a/activitysim/examples/prototype_mtc_extended/configs/vehicle_type_choice.yaml b/activitysim/examples/prototype_mtc_extended/configs/vehicle_type_choice.yaml index d0b9a91f6..38ceca185 100644 --- a/activitysim/examples/prototype_mtc_extended/configs/vehicle_type_choice.yaml +++ b/activitysim/examples/prototype_mtc_extended/configs/vehicle_type_choice.yaml @@ -2,7 +2,7 @@ SPEC: vehicle_type_choice_op4.csv COEFFICIENTS: vehicle_type_choice_op4_coefficients.csv -ALTS: vehicle_type_choice_op4_alternatives.csv +#ALTS: vehicle_type_choice_op4_alternatives.csv # SPEC: vehicle_type_choice_op2.csv # COEFFICIENTS: vehicle_type_choice_op2_coefficients.csv diff --git a/docs/dev-guide/using-sharrow.md b/docs/dev-guide/using-sharrow.md index 1feb8178a..e2b0093d4 100644 --- a/docs/dev-guide/using-sharrow.md +++ b/docs/dev-guide/using-sharrow.md @@ -213,8 +213,14 @@ as needed. For models with utility expressions that include a lot of string comparisons, (e.g. because they are built for the legacy `pandas.eval` interpreter and have not -been updated) sharrow can be disabled by setting `sharrow_skip: true` in the -component's configuration yaml file. +been updated) sharrow can be disabled by setting + +```yaml +compute_settings: + sharrow_skip: true +``` + +in the component's configuration yaml file. ### Multiprocessing Performance