-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsearch.py
341 lines (296 loc) · 12.5 KB
/
search.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from operator import itemgetter
import math
# manhattanHeuristic here, because searchAgents.py is depended on search.py, can't directly import to use manhattanHeuristic
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
A sample depth first search implementation is provided for you to help you understand how to interact with the problem.
"""
mystack = util.Stack()
startState = (problem.getStartState(), '', 0, [])
mystack.push(startState)
visited = set()
while mystack :
state = mystack.pop()
node, action, cost, path = state
if node not in visited :
visited.add(node)
if problem.isGoalState(node) :
path = path + [(node, action)]
break;
succStates = problem.getSuccessors(node)
for succState in succStates :
succNode, succAction, succCost = succState
newstate = (succNode, succAction, cost + succCost, path + [(node, action)])
mystack.push(newstate)
actions = [action[1] for action in path]
del actions[0]
return actions
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
# def aStarSearch(problem, startState, goalState, heuristic=manhattanHeuristic):
# startNode = (startState, '', 0, [])#state, aciotn, g_n, path
# # goalState = getGoalState
# # startNode = (startState, "", 0, []) #state, aciotn, g_n, path
# openPQ = util.PriorityQueue()
# openPQ.push(startNode, startNode[2] + util.manhattanDistance(startState, goalState))
# closeSet = set()
# best_gDic = {}
# while not openPQ.isEmpty():
# node = openPQ.pop()
# state, action, g_n, path = node
# if state in best_gDic.keys():
# best_g = best_gDic[state]
# else:
# best_gDic[state] = g_n
# best_g = g_n
# if state not in closeSet or g_n < best_g:
# closeSet.add(state)
# best_gDic[state] = g_n
# if state == goalState:
# #extract solution
# actions = [action[1] for action in path]
# pathStates = [state[0] for state in path]
# actions.append(action)
# pathStates.append(state)
# del actions[0]
# return (pathStates, actions, best_gDic)
# stateSuccessors = problem.getSuccessors(state)
# for successor in stateSuccessors:
# succState, succAction, succCost = successor
# newNode = (succState, succAction, g_n + succCost, path + [(state, action)])
# if util.manhattanDistance(succState, goalState) < math.inf:
# openPQ.push(newNode, newNode[2] + util.manhattanDistance(newNode[0], goalState))
def aStarSearch(problem, startState, goalState, realGoalState, fakeGoalState, pid3 = False, heuristic=manhattanHeuristic):
startNode = (startState, '', 0, 0, [])#state, aciotn, g_n, f_n, path
# goalState = getGoalState
# startNode = (startState, "", 0, []) #state, aciotn, g_n, path
openPQ = util.PriorityQueue()
openPQ.push(startNode, startNode[2] + util.manhattanDistance(startState, goalState))
closeSet = set()
best_gDic = {}
cnt = 0
while not openPQ.isEmpty():
node = openPQ.pop()
state, action, g_n, f_n, path = node
if state in best_gDic.keys():
best_g = best_gDic[state]
else:
best_gDic[state] = g_n
best_g = g_n
if state not in closeSet or g_n < best_g:
closeSet.add(state)
best_gDic[state] = g_n
if state == goalState:
#extract solution
actions = [action[1] for action in path]
pathStates = [state[0] for state in path]
actions.append(action)
pathStates.append(state)
del actions[0]
return (pathStates, actions, best_gDic)
stateSuccessors = problem.getSuccessors(state)
for successor in stateSuccessors:
succState, succAction, succCost = successor
#pid3
if pid3 and util.manhattanDistance(succState, realGoalState) < util.manhattanDistance(succState, fakeGoalState):
alpha = 2
else:
alpha = 1
newNode = (succState, succAction, g_n + succCost, g_n + succCost + alpha * util.manhattanDistance(succState, goalState), path + [(state, action)])
if util.manhattanDistance(succState, goalState) < math.inf:
openPQ.push(newNode, priority = newNode[3])
# def aStarSearch(problem, heuristic=manhattanHeuristic):
# """Search the node that has the lowest combined cost and heuristic first."""
# "*** YOUR CODE HERE ***"
# util.raiseNotDefined()
def enforcedHillClimbing(problem, heuristic=manhattanHeuristic):
"""COMP90054 your solution to part 1 here """
# ###### function #######
## TODO: revised path
## TODO: revised Heuristics
# improve function
def improve(node_0):
myqueue = util.Queue()
myqueue.push(node_0)
state_0, action_0, cost_0, path_0 = node_0
closed = set() #visited states set
while not myqueue.isEmpty() :
node = myqueue.pop()
state, action, cost, path = node
if state not in closed :
closed.add(state)
if manhattanHeuristic(state, problem) < manhattanHeuristic(state_0, problem):
return (state, action, cost, path)
curStateSuccessors = problem.getSuccessors(state)
#get (nextState, action, cost) in current state
for curStateSuccessor in curStateSuccessors:
succState, succAction, succCost = curStateSuccessor
newNode = (succState, succAction, cost + succCost, path + [(state, action)])
myqueue.push(newNode)
# initialize
curNode = (problem.getStartState(), '', 0, []) #(state, action, cost, path)
curState, curAction, curCost, curPath = curNode
# run imrpove
while not problem.isGoalState(curState):
curNode = improve(curNode)
curState, curAction, curCost, curPath = curNode
#extract solution
actions = [action[1] for action in curPath]
actions.append(curAction)
del actions[0]
return actions
def idaStarSearch(problem, heuristic=manhattanHeuristic):
"""COMP90054 your solution to part 2 here """
#ref1: https://en.wikipedia.org/wiki/Iterative_deepening_A*
#ref2: https://en.wikipedia.org/wiki/Iterative_deepening_A*#cite_note-re1985_7-2
def search(mypath, g_n, threshold) : #recursive function
node = mypath[-1]
state, action, cost, path = node
f_n = g_n + manhattanHeuristic(state, problem)
# print("state", state, "MH", manhattanHeuristic(state, problem))
if f_n > threshold :
return f_n
if problem.isGoalState(state) :
return "FOUND"
min = float("inf")
curStateSuccessors = problem.getSuccessors(state)
# sorted(curStateSuccessors, key=manhattanHeuristic(itemgetter(0), problem), reverse=True) #sort by manhattanHeuristic desending
# print(curStateSuccessors)
for curStateSuccessor in curStateSuccessors :
succState, succAction, succCost = curStateSuccessor
if curStateSuccessor not in mypath :
newNode = (succState, succAction, succCost, path + [(state, action)])
mypath.append(newNode)
#something wrong with my g_n?
t = search(mypath, g_n + succCost, threshold)
if t == "FOUND":
return "FOUND"
if t < min:
min = t
mypath.pop()
return min
# initialize
curNode = (problem.getStartState(), '', 0, []) #(state, action, cost, path)
curState, curAction, curCost, curPath = curNode
# initial threshold set as manhattanHeuristic
threshold = 0
# Fianl Path use stack
mypath = []
mypath.append(curNode)
while True :
t = search(mypath, 0, threshold)
if t == "FOUND" :
print("FOUND")
break
# return mypath
if math.isinf(t) :
print("NOT_FOUND")
# return "NOT_FOUND"
# print("threshold",threshold)
threshold = t #???
# print(mypath[-1])
finalpath = mypath[-1]
# for action in finalpath[3]:
# print("action:",action)
# extract solutions
actions = [action[1] for action in finalpath[3]]
print(actions)
actions.append(finalpath[1])
del actions[0]
return actions
#### temp
# successorList.append(newNode)
# >>> data = [('abc', 121),('abc', 231),('abc', 148), ('abc',221)]
# >>> sorted(data,key=itemgetter(1))
# current iteration
# successorsList.append(curStateSuccessor)
# sort current iteration in descending by f_n (how to get f_n)
# sorted(successorsList, key=itemgetter(2), reverse=True)
# for i in range(len(successorList)):
# successorNode = successorsList[i]
# for i in range(len(successorList)):
# mystack.push(successorList[i])
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
ehc = enforcedHillClimbing
ida = idaStarSearch