-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrainer.py
107 lines (80 loc) · 3.32 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
from torch.utils.data import DataLoader
from rlhf import RLHF_Model
from loader import RLHF_Dataset
import os
def train_epoch(model, optimizer, loss_fn, data_loader, device):
size = len(data_loader.dataset)
loss_sum = 0
correct = 0
model.train()
for batch, (x, y) in enumerate(data_loader):
x, y = x.to(device), y.to(device)
x = x.float()
y = y.float()
# Compute prediction error
pred = model(x)
# print('pred:', pred)
# print('y:', y)
# print('correct:', (pred > 0.5).type(torch.float).eq(y).sum().item())
correct += (pred > 0.5).type(torch.float).eq(y).sum().item()
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
return loss_sum / size, correct / size
def val_epoch(model, loss_fn, data_loader, device):
size = len(data_loader.dataset)
loss_sum = 0
correct = 0
model.eval()
with torch.no_grad():
for x, y in data_loader:
x, y = x.to(device), y.to(device)
x = x.float()
y = y.float()
pred = model(x)
loss = loss_fn(pred, y).item()
loss_sum += loss
correct += (pred > 0.5).type(torch.float).eq(y).sum().item()
return loss_sum / size, correct / size
if __name__ == '__main__':
# Hyperparameters
# learning_rate = 1e-5
learning_rate = 5e-4
batch_size = 4
epochs = 20
# Other variables
device = "cuda" if torch.cuda.is_available() else "cpu"
train_dataset = RLHF_Dataset('data/trajectory_random_policy/train')
val_dataset = RLHF_Dataset('data/trajectory_random_policy/test')
# Load data
train_loader = DataLoader(train_dataset, batch_size=batch_size)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
# Define model
model = RLHF_Model(input_size=25*120, output_size=1).to(device)
# printing number of parameters
print('Number of parameters:', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = torch.nn.BCELoss(reduction='sum')
# index is the number of models saved in checkpoints folder
# model_0 -> 0, model_1_best -> 1, model_2 -> 2, model_2_best -> 2
index = max([int(f.split('_')[1].split('.')[0]) for f in os.listdir('checkpoints') if f.endswith('.pt')]) + 1
min_val_loss = 1000
max_val_acc = 0
# Train model
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loss, train_acc = train_epoch(model, optimizer, loss_fn, train_loader, device)
val_loss, val_acc = val_epoch(model, loss_fn, val_loader, device)
print(f"Train loss: {train_loss:.4f} | Train Accuracy: {train_acc:.4f} | Validation loss: {val_loss:.4f} | Validation Accuracy: {val_acc:.4f}")
# Save model
torch.save(model.state_dict(), f'checkpoints/model_{index}.pt')
if val_loss < min_val_loss:
min_val_loss = val_loss
torch.save(model.state_dict(), f'checkpoints/model_{index}_best.pt')
if val_acc > max_val_acc:
max_val_acc = val_acc
print("Done!", 'best val loss:', min_val_loss, 'best val acc:', max_val_acc)