From dd0e5705e7e951dd2b2a800437289ffee89a71cf Mon Sep 17 00:00:00 2001 From: Abhishek Sriraman Date: Mon, 16 Dec 2024 12:54:20 -0800 Subject: [PATCH] Update deprecated numpy attributes. Changes np.Inf to np.inf and np.int to int. Users may get a attribute error on using np.Inf for newer numpy versions. This should be a non-breaking change since np.inf is available in older numpy versions (and already used across this repo). --- src/overcooked_ai_py/agents/agent.py | 2 +- src/overcooked_ai_py/mdp/layout_generator.py | 2 +- src/overcooked_ai_py/mdp/overcooked_env.py | 4 ++-- src/overcooked_ai_py/planning/planners.py | 14 +++++++------- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/overcooked_ai_py/agents/agent.py b/src/overcooked_ai_py/agents/agent.py index d9471f6b..4ffb0f40 100644 --- a/src/overcooked_ai_py/agents/agent.py +++ b/src/overcooked_ai_py/agents/agent.py @@ -445,7 +445,7 @@ def get_lowest_cost_action_and_goal(self, start_pos_and_or, motion_goals): Chooses motion goal that has the lowest cost action plan. Returns the motion goal itself and the first action on the plan. """ - min_cost = np.Inf + min_cost = np.inf best_action, best_goal = None, None for goal in motion_goals: action_plan, _, plan_cost = self.mlam.motion_planner.get_plan( diff --git a/src/overcooked_ai_py/mdp/layout_generator.py b/src/overcooked_ai_py/mdp/layout_generator.py index 7789921e..4daa0a08 100644 --- a/src/overcooked_ai_py/mdp/layout_generator.py +++ b/src/overcooked_ai_py/mdp/layout_generator.py @@ -399,7 +399,7 @@ def get_random_starting_positions(self, grid, divider_x=None): class Grid(object): def __init__(self, shape): assert len(shape) == 2, "Grid must be 2 dimensional" - grid = (np.ones(shape) * TYPE_TO_CODE[COUNTER]).astype(np.int) + grid = (np.ones(shape) * TYPE_TO_CODE[COUNTER]).astype(int) self.mtx = grid self.shape = np.array(shape) self.width = shape[0] diff --git a/src/overcooked_ai_py/mdp/overcooked_env.py b/src/overcooked_ai_py/mdp/overcooked_env.py index da856e8b..b61c0ca6 100644 --- a/src/overcooked_ai_py/mdp/overcooked_env.py +++ b/src/overcooked_ai_py/mdp/overcooked_env.py @@ -430,7 +430,7 @@ def run_agents( display=False, dir=None, display_phi=False, - display_until=np.Inf, + display_until=np.inf, ): """ Trajectory returned will a list of state-action pairs (s_t, joint_a_t, r_t, done_t, info_t). @@ -491,7 +491,7 @@ def get_rollouts( dir=None, final_state=False, display_phi=False, - display_until=np.Inf, + display_until=np.inf, metadata_fn=None, metadata_info_fn=None, info=True, diff --git a/src/overcooked_ai_py/planning/planners.py b/src/overcooked_ai_py/planning/planners.py index 15d15b2d..0ba4d17b 100644 --- a/src/overcooked_ai_py/planning/planners.py +++ b/src/overcooked_ai_py/planning/planners.py @@ -165,7 +165,7 @@ def get_gridworld_pos_distance(self, pos1, pos2): to go from starting position to goal position (not including interaction action).""" # NOTE: currently unused, pretty bad code. If used in future, clean up - min_cost = np.Inf + min_cost = np.inf for d1, d2 in itertools.product(Direction.ALL_DIRECTIONS, repeat=2): start = (pos1, d1) end = (pos2, d2) @@ -364,8 +364,8 @@ def min_cost_between_features( Determines the minimum number of timesteps necessary for a player to go from any terrain feature in list1 to any feature in list2 and perform an interact action """ - min_dist = np.Inf - min_manhattan = np.Inf + min_dist = np.inf + min_manhattan = np.inf for pos1, pos2 in itertools.product(pos_list1, pos_list2): for mg1, mg2 in itertools.product( self.motion_goals_for_pos[pos1], @@ -383,7 +383,7 @@ def min_cost_between_features( min_dist = curr_dist # +1 to account for interaction action - if manhattan_if_fail and min_dist == np.Inf: + if manhattan_if_fail and min_dist == np.inf: min_dist = min_manhattan min_cost = min_dist + 1 return min_cost @@ -401,7 +401,7 @@ def min_cost_to_feature( """ start_pos = start_pos_and_or[0] assert self.mdp.get_terrain_type_at_pos(start_pos) != "X" - min_dist = np.Inf + min_dist = np.inf best_feature = None for feature_pos in feature_pos_list: for feature_goal in self.motion_goals_for_pos[feature_pos]: @@ -841,7 +841,7 @@ def _handle_conflict_with_same_goal_idx( if self._agents_are_in_same_position( (curr_pos_or0, curr_pos_or1) ): - return None, None, [np.Inf, np.Inf] + return None, None, [np.inf, np.inf] else: curr_pos_or0, curr_pos_or1 = next_pos_or0, next_pos_or1 @@ -860,7 +860,7 @@ def _handle_conflict_with_same_goal_idx( end_pos_and_or = (curr_pos_or0, curr_pos_or1) finishing_times = ( - (np.Inf, idx1) if wait_agent_idx == 0 else (idx0, np.Inf) + (np.inf, idx1) if wait_agent_idx == 0 else (idx0, np.inf) ) return joint_plan, end_pos_and_or, finishing_times