-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrainer.py
44 lines (32 loc) · 1.1 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from tqdm import tqdm
def train_epoch(model, optimizer, scheduler, loader, X, truth, criterion, device, kwargs):
model.train()
losses = 0
accs = 0
L = kwargs['L']
for idx in tqdm(loader):
x = X[idx, :].to(device)
truth_x = truth[idx, :].to(device)
preds = model(x, truth_x, **kwargs)
loss, acc = criterion(preds)
optimizer.zero_grad()
loss.backward()
# nn.utils.clip_grad_value_(model.parameters(), 0.01)
optimizer.step()
if scheduler is not None:
scheduler.step()
losses += loss.item()
accs += acc.item()
return losses / len(loader), accs / len(loader)
def val_epoch(model, loader, X, truth, criterion, device, kwargs):
model.eval()
losses = 0
accs = 0
for idx in tqdm(loader):
x = X[idx, :].to(device)
truth_x = truth[idx, :].to(device)
preds = model(x, truth_x, **kwargs)
loss, acc = criterion(preds)
losses += loss.item()
accs += acc.item()
return losses / len(loader), acc / len(loader)