-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patha_star.py
59 lines (45 loc) · 1.75 KB
/
a_star.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from typing import Optional, Tuple
import heapq
from state import State
def manhattan_distance(state: State) -> int:
distance = 0
for index, value in enumerate(state.board):
if value == 0:
continue
goal_row = (value - 1) // state.width
goal_col = (value - 1) % state.width
row = index // state.width
col = index % state.width
distance += abs(goal_row - row) + abs(goal_col - col)
return distance
def hamming_distance(state: State) -> int:
distance = 0
goal = state.get_target_state()
for index, value in enumerate(state.board):
if value != 0 and value != goal.board[index]:
distance += 1
return distance
class AStarSolver:
@staticmethod
def solve(state, heuristic=manhattan_distance) -> Tuple[Optional[State], int, int, int]:
if not state.is_solvable():
raise Exception("Not solvable")
goal = state.get_target_state()
priority = heuristic(state)
frontier = [(priority, state, 0)]
explored = set()
num_of_visited = 1
max_depth = 0
while len(frontier) > 0:
_, node, depth = heapq.heappop(frontier)
if node == goal:
return node, num_of_visited, len(explored), max_depth
explored.add(node)
for neighbour in node.get_neighbours("URDL"):
if neighbour not in explored:
priority = (depth + 1) + heuristic(neighbour)
heapq.heappush(frontier, (priority, neighbour, depth + 1))
num_of_visited += 1
if depth + 1 > max_depth:
max_depth = depth + 1
return None, num_of_visited, len(explored), max_depth