-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathivar_def_reflex_agent.py
114 lines (94 loc) · 3.88 KB
/
ivar_def_reflex_agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from captureAgents import CaptureAgent
from util import nearestPoint
import random, time, util
from game import Directions
class ReflexCaptureAgent(CaptureAgent):
"""
A base class for reflex agents that chooses score-maximizing actions
"""
def registerInitialState(self, gameState):
self.start = gameState.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gameState)
def chooseAction(self, gameState):
"""
Picks among the actions with the highest Q(s,a).
"""
actions = gameState.getLegalActions(self.index)
# You can profile your evaluation time by uncommenting these lines
# start = time.time()
values = [self.evaluate(gameState, a) for a in actions]
# print('eval time for agent %d: %.4f' % (self.index, time.time() - start))
maxValue = max(values)
bestActions = [a for a, v in zip(actions, values) if v == maxValue]
foodLeft = len(self.getFood(gameState).asList())
if foodLeft <= 2:
bestDist = 9999
for action in actions:
successor = self.getSuccessor(gameState, action)
pos2 = successor.getAgentPosition(self.index)
dist = self.getMazeDistance(self.start,pos2)
if dist < bestDist:
bestAction = action
bestDist = dist
return bestAction
return random.choice(bestActions)
def getSuccessor(self, gameState, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gameState.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def evaluate(self, gameState, action):
"""
Computes a linear combination of features and feature weights
"""
features = self.getFeatures(gameState, action)
weights = self.getWeights(gameState, action)
return features * weights
def getFeatures(self, gameState, action):
"""
Returns a counter of features for the state
"""
features = util.Counter()
successor = self.getSuccessor(gameState, action)
features['successorScore'] = self.getScore(successor)
return features
def getWeights(self, gameState, action):
"""
Normally, weights do not depend on the gamestate. They can be either
a counter or a dictionary.
"""
return {'successorScore': 1.0}
class DefensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that keeps its side Pacman-free. Again,
this is to give you an idea of what a defensive agent
could be like. It is not the best or only way to make
such an agent.
"""
def getFeatures(self, gameState, action):
features = util.Counter()
successor = self.getSuccessor(gameState, action)
myState = successor.getAgentState(self.index)
myPos = myState.getPosition()
# Computes whether we're on defense (1) or offense (0)
features['onDefense'] = 1
if myState.isPacman: features['onDefense'] = 0
# Computes distance to invaders we can see
enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]
invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]
features['numInvaders'] = len(invaders)
if len(invaders) > 0:
dists = [self.getMazeDistance(myPos, a.getPosition()) for a in invaders]
features['invaderDistance'] = min(dists)
if action == Directions.STOP: features['stop'] = 1
rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]
if action == rev: features['reverse'] = 1
return features
def getWeights(self, gameState, action):
return {'numInvaders': -1000, 'onDefense': 100, 'invaderDistance': -10, 'stop': -100, 'reverse': -2}