-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevaluate.py
executable file
·93 lines (75 loc) · 3.56 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import argparse
from statistics import mean
import torch
from tqdm import tqdm
from agents.random_agent import RandomAgent
from agents.ai_agent import AIAgent
from agents.q_agent import QAgent
from graphic_visualizations import stats_plotter
import environment as brisc
from utils import BriscolaLogger, NetworkTypes
def evaluate(game, agents, num_evaluations):
"""Play num_evaluations games and report statistics."""
total_wins = [0] * len(agents)
points_history = [[] for _ in range(len(agents))]
for _ in tqdm(range(num_evaluations)):
game_winner_id, winner_points, _ = brisc.play_episode(game, agents, train=False)
for player in game.players:
points_history[player.id].append(player.points)
if player.id == game_winner_id:
total_wins[player.id] += 1
print(f"\nTotal wins: {total_wins}.")
for i in range(len(agents)):
print(
f"{agents[i].name} {i} won {total_wins[i] / num_evaluations:.2%} with an average of {mean(points_history[i]):.2f} points.")
return total_wins, points_history
def main(args=None):
"""Evaluate agent performance against RandomAgent and AIAgent."""
logger = BriscolaLogger(BriscolaLogger.LoggerLevels.TEST)
game = brisc.BriscolaGame(2, logger)
# agent to be evaluated is RandomAgent or QAgent if a model is provided
if FLAGS.model_dir:
print(f"Loading the model '{args.model_dir}'...")
checkpoint = torch.load(args.model_dir)
config = checkpoint['config']
agent = QAgent(
n_actions=config['n_actions'],
epsilon=config['epsilon'],
minimum_epsilon=config['minimum_epsilon'],
replay_memory_capacity=1000000,
minimum_training_samples=config['minimum_training_samples'],
batch_size=config['batch_size'],
discount=config['discount'],
loss_fn=config['loss_fn'],
learning_rate=0.0001,
replace_every=config['replace_every'],
epsilon_decay_rate=config['epsilon_decay_rate'],
layers=config['layers'],
state_type=config['state_type'],
)
agent.policy_net.load_state_dict(checkpoint['policy_state_dict'])
agent.make_greedy()
else:
agent = RandomAgent()
# test agent against RandomAgent
print(f"Testing against RandomAgent on {args.num_evaluations} games")
agents = [agent, RandomAgent()]
total_wins, points_history = evaluate(game, agents, FLAGS.num_evaluations)
# stats_plotter(agents, points_history, total_wins)
# test agent against AIAgent
print(f"Testing against AIAgent on {args.num_evaluations} games")
agents = [agent, AIAgent()]
total_wins, points_history = evaluate(game, agents, FLAGS.num_evaluations)
# stats_plotter(agents, points_history, total_wins)
if __name__ == '__main__':
# parameters
# ==================================================
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="models/QLearningAgent_90k_ruled_state3.pt",
help="Provide a trained model path if you want to play against a deep agent", type=str)
parser.add_argument("--network", default=NetworkTypes.DQN, choices=[NetworkTypes.DQN, NetworkTypes.DRQN],
help="Neural network used for approximating value function")
parser.add_argument("--num_evaluations", default=20000,
help="Number of evaluation games against each type of opponent for each test", type=int)
FLAGS = parser.parse_args()
main(FLAGS)