Skip to content

Commit

Permalink
valid argument fix for gradscaler
Browse files Browse the repository at this point in the history
  • Loading branch information
Bala-Vignesh-Reddy committed Jan 5, 2025
2 parents 5da8d81 + dcb82e2 commit 77180d7
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion classify/val.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def run(
action = "validating" if dataloader.dataset.root.stem == "val" else "testing"
desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
with torch.amp.autocast('cuda', enabled=device.type != "cpu"):
with torch.amp.autocast("cuda", enabled=device.type != "cpu"):
for images, labels in bar:
with dt[0]:
images, labels = images.to(device, non_blocking=True), labels.to(device)
Expand Down
4 changes: 2 additions & 2 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,7 @@ def forward(self, ims, size=640, augment=False, profile=False):
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
autocast = self.amp and (p.device.type != "cpu") # Automatic Mixed Precision (AMP) inference
if isinstance(ims, torch.Tensor): # torch
with torch.amp.autocast('cuda', enabled=autocast):
with torch.amp.autocast("cuda", enabled=autocast):
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference

# Pre-process
Expand All @@ -888,7 +888,7 @@ def forward(self, ims, size=640, augment=False, profile=False):
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32

with torch.amp.autocast('cuda', enabled=autocast):
with torch.amp.autocast("cuda", enabled=autocast):
# Inference
with dt[1]:
y = self.model(x, augment=augment) # forward
Expand Down
4 changes: 2 additions & 2 deletions segment/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def lf(x):
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.amp.GradScaler('cuda', enabled=amp)
scaler = torch.amp.GradScaler(enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
# callbacks.run('on_train_start')
Expand Down Expand Up @@ -380,7 +380,7 @@ def lf(x):
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)

# Forward
with torch.amp.autocast('cuda', enabled=amp):
with torch.amp.autocast("cuda", enabled=amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
if RANK != -1:
Expand Down
2 changes: 1 addition & 1 deletion train.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def lf(x):
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)

# Forward
with torch.amp.autocast('cuda', enabled=amp):
with torch.amp.autocast("cuda", enabled=amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
Expand Down
2 changes: 1 addition & 1 deletion utils/autobatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

def check_train_batch_size(model, imgsz=640, amp=True):
"""Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting."""
with torch.amp.autocast('cuda', enabled=amp):
with torch.amp.autocast("cuda", enabled=amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size


Expand Down

0 comments on commit 77180d7

Please sign in to comment.