Skip to content

Commit

Permalink
Update all usages of np.Inf to np.inf.
Browse files Browse the repository at this point in the history
Users may get a attribute error on using np.Inf for newer numpy versions. This should be a non-breaking change since np.inf is available in older numpy versions (and already used across this repo).
  • Loading branch information
itwasabhi committed Dec 16, 2024
1 parent 9dfc1e8 commit 1a012b7
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion src/overcooked_ai_py/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def get_lowest_cost_action_and_goal(self, start_pos_and_or, motion_goals):
Chooses motion goal that has the lowest cost action plan.
Returns the motion goal itself and the first action on the plan.
"""
min_cost = np.Inf
min_cost = np.inf
best_action, best_goal = None, None
for goal in motion_goals:
action_plan, _, plan_cost = self.mlam.motion_planner.get_plan(
Expand Down
4 changes: 2 additions & 2 deletions src/overcooked_ai_py/mdp/overcooked_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ def execute_plan(self, start_state, joint_action_plan, display=False):
return successor_state, done

def run_agents(
self,
self,np.inf
agent_pair,
include_final_state=False,
display=False,
Expand Down Expand Up @@ -485,7 +485,7 @@ def run_agents(

def get_rollouts(
self,
agent_pair,
agent_pair,np.inf
num_games,
display=False,
dir=None,
Expand Down
14 changes: 7 additions & 7 deletions src/overcooked_ai_py/planning/planners.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def get_gridworld_pos_distance(self, pos1, pos2):
to go from starting position to goal position (not including
interaction action)."""
# NOTE: currently unused, pretty bad code. If used in future, clean up
min_cost = np.Inf
min_cost = np.inf
for d1, d2 in itertools.product(Direction.ALL_DIRECTIONS, repeat=2):
start = (pos1, d1)
end = (pos2, d2)
Expand Down Expand Up @@ -364,8 +364,8 @@ def min_cost_between_features(
Determines the minimum number of timesteps necessary for a player to go from any
terrain feature in list1 to any feature in list2 and perform an interact action
"""
min_dist = np.Inf
min_manhattan = np.Inf
min_dist = np.inf
min_manhattan = np.inf
for pos1, pos2 in itertools.product(pos_list1, pos_list2):
for mg1, mg2 in itertools.product(
self.motion_goals_for_pos[pos1],
Expand All @@ -383,7 +383,7 @@ def min_cost_between_features(
min_dist = curr_dist

# +1 to account for interaction action
if manhattan_if_fail and min_dist == np.Inf:
if manhattan_if_fail and min_dist == np.inf:
min_dist = min_manhattan
min_cost = min_dist + 1
return min_cost
Expand All @@ -401,7 +401,7 @@ def min_cost_to_feature(
"""
start_pos = start_pos_and_or[0]
assert self.mdp.get_terrain_type_at_pos(start_pos) != "X"
min_dist = np.Inf
min_dist = np.inf
best_feature = None
for feature_pos in feature_pos_list:
for feature_goal in self.motion_goals_for_pos[feature_pos]:
Expand Down Expand Up @@ -841,7 +841,7 @@ def _handle_conflict_with_same_goal_idx(
if self._agents_are_in_same_position(
(curr_pos_or0, curr_pos_or1)
):
return None, None, [np.Inf, np.Inf]
return None, None, [np.inf, np.inf]

else:
curr_pos_or0, curr_pos_or1 = next_pos_or0, next_pos_or1
Expand All @@ -860,7 +860,7 @@ def _handle_conflict_with_same_goal_idx(

end_pos_and_or = (curr_pos_or0, curr_pos_or1)
finishing_times = (
(np.Inf, idx1) if wait_agent_idx == 0 else (idx0, np.Inf)
(np.inf, idx1) if wait_agent_idx == 0 else (idx0, np.inf)
)
return joint_plan, end_pos_and_or, finishing_times

Expand Down

0 comments on commit 1a012b7

Please sign in to comment.