-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
131 lines (101 loc) · 4 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from othelloGame import othello
from othelloAI import human_ai, decisionRule_ai, NN_ai, minimax_ai
from othelloEval import othello_eval
import numpy as np
'''
Simlulations should all start from this file.
Instructions:
> create a new game by calling othello(ai_1,ai_2)
> specify ai_1 and ai_2 with a marker -1 or +1 only
> Use .start() to launch the game in the command window
> decisionRule_ai, NN_ai, minimax_ai are AI available, if you wish to play, use
human_ai as an AI argument
Enjoy
'''
def single_game():
# Example single with human player against greedy algo
# search_modes = ["MiniMax", "A-B Pruning", "Scout"]
# game = othello(minimax_ai(1, depth=4, search_mode=search_modes[0]), decisionRule_ai(-1))
# score = game.startgame(start_move=0)
# Example single with Nearal Network against greedy algo
game = othello(NN_ai(1), decisionRule_ai(-1))
score = game.startgame(start_move=0)
# Example game: two humans, board starts from a random
# legal Board State after 5 moves
# game = othello(human_ai(1), human_ai(-1))
# score = game.startgame(start_move=5)
def adverserial_MC():
# Make sure search algs are always bot 1,
# because we need to reset the nodesVisited to 0 after every game.
# For Every Run game it will save the png and a 2d array of the nodesvisited
# Simulation Params
search_modes = ["MiniMax", "A-B Pruning", "Scout"]
heuristics = ['All','Coin_Party','Stability','Frontier_Discs','Weight_Matrix','Corner_Closeness','Corner','Mobility']
debth_range = range(3, 4)
op_cond_range = np.arange(0, 30,5)
mc_runs = 500
# evaluation = othello_eval(
# minimax_ai(1, depth=2, search_mode="A-B Pruning"),
# decisionRule_ai(-1),
# runs=mc_runs,
# adverse=True,
# )
# evaluation.gameStartEval(values2test=(op_cond_range))
# evaluation.plotGameStartResults(draw=False)
for f in range(len(heuristics)):
for s in range(f,len(heuristics)):
evaluation = othello_eval(
minimax_ai(1, depth=1, search_mode="A-B Pruning",heur=heuristics[f]),
minimax_ai(-1, depth=1, search_mode="A-B Pruning",heur=heuristics[s]),
runs=mc_runs,
adverse=True,
)
evaluation.gameStartEval(values2test=(op_cond_range))
evaluation.plotGameStartResults(draw=False)
# for mode in search_modes:
# for d in debth_range:
# print("Running ", mode, " with a depth of ", d)
# evaluation = othello_eval(
# minimax_ai(1, depth=d, search_mode=mode),
# decisionRule_ai(-1),
# runs=mc_runs,
# adverse=True,
# )
# evaluation.gameStartEval(values2test=(op_cond_range))
# evaluation.plotGameStartResults(draw=False)
# evaluation = othello_eval(
# minimax_ai(1, depth=2, search_mode="ab"),
# decisionRule_ai(-1),
# runs=20,
# adverse=True,
# )
# evaluation.gameStartEval(values2test=(np.arange(0, 20,4)))
# evaluation.plotGameStartResults()
def NearalNetwork_MC():
op_cond_range = np.arange(0, 20, 3)
mc_runs = 4000
evaluation = othello_eval(NN_ai(1), decisionRule_ai(-1), runs=mc_runs)
evaluation.gameStartEval(values2test=(op_cond_range))
evaluation.plotGameStartResults()
def SingleModeEval_MC():
# Temp Fucntion for testing one simulationat a time in a differnet process
# Simulation Params
search_modes = ["MiniMax", "A-B Pruning", "Scout"]
debth_range = range(3, 4)
op_cond_range = np.arange(0, 20, 3)
mc_runs = 10
evaluation = othello_eval(
minimax_ai(1, depth=4, search_mode=search_modes[0]),
decisionRule_ai(-1),
runs=mc_runs,
adverse=True,
)
evaluation.gameStartEval(values2test=(op_cond_range))
evaluation.plotGameStartResults(draw=False)
def main():
single_game()
#adverserial_MC()
# NearalNetwork_MC()
# SingleModeEval_MC()
if __name__ == "__main__":
main()