From a71041a2a2cd5d1e3761440d40a7a057c058cd42 Mon Sep 17 00:00:00 2001
From: fcakyon <34196005+fcakyon@users.noreply.github.com>
Date: Sat, 24 Apr 2021 02:57:54 +0300
Subject: [PATCH] update to ultralytics/yolov5 24.04.21 (#19)
* update to v5.0.1
* update to ultralytics/yolo 24.04.21
* update readme
* remove redundant file
* update onnx coremltools export workflow
---
.github/workflows/ci.yml | 1 +
.github/workflows/package_testing.yml | 4 +
README.md | 2 +-
yolov5/__init__.py | 3 +-
yolov5/data/argoverse_hd.yaml | 2 +-
yolov5/data/coco.yaml | 4 +-
yolov5/data/coco128.yaml | 2 +-
yolov5/data/scripts/get_argoverse_hd.sh | 2 +-
yolov5/data/scripts/get_coco.sh | 2 +-
yolov5/data/scripts/get_voc.sh | 113 +++++++++-------------
yolov5/data/visdrone.yaml | 65 +++++++++++++
yolov5/data/voc.yaml | 2 +-
yolov5/detect.py | 24 +++--
yolov5/hubconf.py | 10 +-
yolov5/models/common.py | 37 ++++---
yolov5/models/export.py | 71 +++++++++-----
yolov5/models/yolo.py | 15 +--
yolov5/test.py | 4 +-
yolov5/train.py | 17 ++--
yolov5/utils/activations.py | 58 +++++++----
yolov5/utils/autoanchor.py | 2 +-
yolov5/utils/aws/resume.py | 2 +-
yolov5/utils/datasets.py | 20 ++--
yolov5/utils/general.py | 67 ++++++++++---
yolov5/utils/google_utils.py | 10 +-
yolov5/utils/metrics.py | 2 +-
yolov5/utils/plots.py | 28 +++---
yolov5/utils/wandb_logging/log_dataset.py | 2 +-
yolov5/utils/wandb_logging/wandb_utils.py | 10 +-
29 files changed, 373 insertions(+), 208 deletions(-)
create mode 100644 yolov5/data/visdrone.yaml
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 031fa3f..08e6e98 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -74,5 +74,6 @@ jobs:
python yolov5/test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
yolo_test --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
# export
+ pip install onnx coremltools
python yolov5/models/export.py --weights yolov5/weights/yolov5s.pt --device $di
yolo_export --weights yolov5/weights/yolov5s.pt --device $di
diff --git a/.github/workflows/package_testing.yml b/.github/workflows/package_testing.yml
index 263f888..9d4c481 100644
--- a/.github/workflows/package_testing.yml
+++ b/.github/workflows/package_testing.yml
@@ -70,3 +70,7 @@ jobs:
yolo_test --img 128 --batch 16 --weights yolov5/weights/yolov5s.pt --device $di
python yolov5/test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
yolo_test --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
+ # export
+ pip install onnx coremltools
+ python yolov5/models/export.py --weights yolov5/weights/yolov5s.pt --device $di
+ yolo_export --weights yolov5/weights/yolov5s.pt --device $di
diff --git a/README.md b/README.md
index 34d63f8..f08fb62 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,7 @@ You can finally install [YOLOv5 object detector](https://github.com/ultralytics/
This package is up-to-date with the latest release of [ultralytics/yolov5](https://github.com/ultralytics/yolov5).
-
+
## Installation
diff --git a/yolov5/__init__.py b/yolov5/__init__.py
index 61a43b0..92af750 100644
--- a/yolov5/__init__.py
+++ b/yolov5/__init__.py
@@ -1,5 +1,4 @@
from yolov5.helpers import YOLOv5
from yolov5.helpers import load_model as load
-
-__version__ = "5.0.0"
+__version__ = "5.0.1"
diff --git a/yolov5/data/argoverse_hd.yaml b/yolov5/data/argoverse_hd.yaml
index df7a936..0ba314d 100644
--- a/yolov5/data/argoverse_hd.yaml
+++ b/yolov5/data/argoverse_hd.yaml
@@ -1,6 +1,6 @@
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
# Train command: python train.py --data argoverse_hd.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /argoverse
# /yolov5
diff --git a/yolov5/data/coco.yaml b/yolov5/data/coco.yaml
index b9da2bf..f818a49 100644
--- a/yolov5/data/coco.yaml
+++ b/yolov5/data/coco.yaml
@@ -1,6 +1,6 @@
# COCO 2017 dataset http://cocodataset.org
# Train command: python train.py --data coco.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
@@ -30,6 +30,6 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', '
# Print classes
# with open('data/coco.yaml') as f:
-# d = yaml.load(f, Loader=yaml.FullLoader) # dict
+# d = yaml.safe_load(f) # dict
# for i, x in enumerate(d['names']):
# print(i, x)
diff --git a/yolov5/data/coco128.yaml b/yolov5/data/coco128.yaml
index c41bccf..83fbc29 100644
--- a/yolov5/data/coco128.yaml
+++ b/yolov5/data/coco128.yaml
@@ -1,6 +1,6 @@
# COCO 2017 dataset http://cocodataset.org - first 128 training images
# Train command: python train.py --data coco128.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco128
# /yolov5
diff --git a/yolov5/data/scripts/get_argoverse_hd.sh b/yolov5/data/scripts/get_argoverse_hd.sh
index caec61e..18131a6 100644
--- a/yolov5/data/scripts/get_argoverse_hd.sh
+++ b/yolov5/data/scripts/get_argoverse_hd.sh
@@ -2,7 +2,7 @@
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
# Download command: bash data/scripts/get_argoverse_hd.sh
# Train command: python train.py --data argoverse_hd.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /argoverse
# /yolov5
diff --git a/yolov5/data/scripts/get_coco.sh b/yolov5/data/scripts/get_coco.sh
index bbb1e92..caae375 100755
--- a/yolov5/data/scripts/get_coco.sh
+++ b/yolov5/data/scripts/get_coco.sh
@@ -2,7 +2,7 @@
# COCO 2017 dataset http://cocodataset.org
# Download command: bash data/scripts/get_coco.sh
# Train command: python train.py --data coco.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
diff --git a/yolov5/data/scripts/get_voc.sh b/yolov5/data/scripts/get_voc.sh
index 13b83c2..4c04aaa 100644
--- a/yolov5/data/scripts/get_voc.sh
+++ b/yolov5/data/scripts/get_voc.sh
@@ -2,7 +2,7 @@
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
# Download command: bash data/scripts/get_voc.sh
# Train command: python train.py --data voc.yaml
-# Default dataset location is next to /yolov5:
+# Default dataset location is next to YOLOv5:
# /parent_folder
# /VOC
# /yolov5
@@ -29,34 +29,27 @@ echo "Completed in" $runtime "seconds"
echo "Splitting dataset..."
python3 - "$@" <train.txt
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
-python3 - "$@" <= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
+ p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
@@ -112,9 +112,13 @@ def detect(save_img=False, opt=None):
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
- if save_img or view_img: # Add bbox to image
- label = f'{names[int(cls)]} {conf:.2f}'
- plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
+ if save_img or opt.save_crop or view_img: # Add bbox to image
+ c = int(cls) # integer class
+ label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
+
+ plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=opt.line_thickness)
+ if opt.save_crop:
+ save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
@@ -161,6 +165,7 @@ def main():
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
@@ -169,6 +174,9 @@ def main():
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
+ parser.add_argument('--hide-labels', default=True, action='store_true', help='hide labels')
+ parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences')
opt = parser.parse_args()
print(opt)
#check_requirements(exclude=('pycocotools', 'thop'))
diff --git a/yolov5/hubconf.py b/yolov5/hubconf.py
index 9532f59..ae6bc47 100644
--- a/yolov5/hubconf.py
+++ b/yolov5/hubconf.py
@@ -124,13 +124,15 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True):
# model = custom(path_or_model='path/to/model.pt') # custom example
# Verify inference
+ import cv2
import numpy as np
from PIL import Image
- imgs = [Image.open('data/images/bus.jpg'), # PIL
- 'data/images/zidane.jpg', # filename
- 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI
- np.zeros((640, 480, 3))] # numpy
+ imgs = ['data/images/zidane.jpg', # filename
+ 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI
+ cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
+ Image.open('data/images/bus.jpg'), # PIL
+ np.zeros((320, 640, 3))] # numpy
results = model(imgs) # batched inference
results.print()
diff --git a/yolov5/models/common.py b/yolov5/models/common.py
index 0725370..c991533 100644
--- a/yolov5/models/common.py
+++ b/yolov5/models/common.py
@@ -13,7 +13,8 @@
from torch.cuda import amp
from yolov5.utils.datasets import letterbox
from yolov5.utils.general import (increment_path, make_divisible,
- non_max_suppression, scale_coords, xyxy2xywh)
+ non_max_suppression, save_one_box,
+ scale_coords, xyxy2xywh)
from yolov5.utils.plots import color_list, plot_one_box
from yolov5.utils.torch_utils import time_synchronized
@@ -240,7 +241,7 @@ def autoshape(self):
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
- # filename: imgs = 'data/samples/zidane.jpg'
+ # filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
@@ -271,7 +272,7 @@ def forward(self, imgs, size=640, augment=False, profile=False):
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
- imgs[i] = im # update
+ imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
@@ -311,29 +312,33 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
- def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
+ def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
colors = color_list()
- for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
- str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
+ for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
+ str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
- if show or save or render:
+ if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
- plot_one_box(box, img, label=label, color=colors[int(cls) % 10])
- img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
+ if crop:
+ save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
+ else: # all others
+ plot_one_box(box, im, label=label, color=colors[int(cls) % 10])
+
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
- img.show(self.files[i]) # show
+ im.show(self.files[i]) # show
if save:
f = self.files[i]
- img.save(Path(save_dir) / f) # save
+ im.save(save_dir / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
- self.imgs[i] = np.asarray(img)
+ self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
@@ -343,10 +348,14 @@ def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
- save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir
- Path(save_dir).mkdir(parents=True, exist_ok=True)
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
+ def crop(self, save_dir='runs/hub/exp'):
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
+ self.display(crop=True, save_dir=save_dir) # crop results
+ print(f'Saved results to {save_dir}\n')
+
def render(self):
self.display(render=True) # render results
return self.imgs
diff --git a/yolov5/models/export.py b/yolov5/models/export.py
index 6a42595..68802f6 100644
--- a/yolov5/models/export.py
+++ b/yolov5/models/export.py
@@ -1,7 +1,7 @@
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
- $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
+ $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
"""
import argparse
@@ -12,20 +12,27 @@
import torch
import torch.nn as nn
import yolov5.models as models
+from torch.utils.mobile_optimizer import optimize_for_mobile
from yolov5.models.experimental import attempt_load
from yolov5.utils.activations import Hardswish, SiLU
-from yolov5.utils.general import check_img_size, set_logging
+from yolov5.utils.general import (check_img_size, check_requirements, colorstr,
+ set_logging)
from yolov5.utils.torch_utils import select_device
+#sys.path.append('./') # to run '$ python *.py' files in subdirectories
+
+
+
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
+ parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
- parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only
+ parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
@@ -61,55 +68,75 @@ def main():
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = not opt.grid # set Detect() layer grid export
- y = model(img) # dry run
+ for _ in range(2):
+ y = model(img) # dry runs
# remove yolov5 folder from system path
sys.path.remove(yolov5_folder_dir)
- # TorchScript export
+
+ # TorchScript export -----------------------------------------------------------------------------------------------
+ prefix = colorstr('TorchScript:')
try:
- print('\nStarting TorchScript export with torch %s...' % torch.__version__)
+ print(f'\n{prefix} starting export with torch {torch.__version__}...')
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
ts = torch.jit.trace(model, img, strict=False)
+ ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html
ts.save(f)
- print('TorchScript export success, saved as %s' % f)
+ print(f'{prefix} export success, saved as {f}')
except Exception as e:
- print('TorchScript export failure: %s' % e)
+ print(f'{prefix} export failure: {e}')
- # ONNX export
+ # ONNX export ------------------------------------------------------------------------------------------------------
+ prefix = colorstr('ONNX:')
try:
import onnx
- print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
+ print(f'{prefix} starting export with onnx {onnx.__version__}...')
f = opt.weights.replace('.pt', '.onnx') # filename
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
- output_names=['classes', 'boxes'] if y is None else ['output'],
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
# Checks
- onnx_model = onnx.load(f) # load onnx model
- onnx.checker.check_model(onnx_model) # check onnx model
- # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
- print('ONNX export success, saved as %s' % f)
+ model_onnx = onnx.load(f) # load onnx model
+ onnx.checker.check_model(model_onnx) # check onnx model
+ # print(onnx.helper.printable_graph(model_onnx.graph)) # print
+
+ # Simplify
+ if opt.simplify:
+ try:
+ check_requirements(['onnx-simplifier'])
+ import onnxsim
+
+ print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
+ model_onnx, check = onnxsim.simplify(model_onnx,
+ dynamic_input_shape=opt.dynamic,
+ input_shapes={'images': list(img.shape)} if opt.dynamic else None)
+ assert check, 'assert check failed'
+ onnx.save(model_onnx, f)
+ except Exception as e:
+ print(f'{prefix} simplifier failure: {e}')
+ print(f'{prefix} export success, saved as {f}')
except Exception as e:
- print('ONNX export failure: %s' % e)
+ print(f'{prefix} export failure: {e}')
- # CoreML export
+ # CoreML export ----------------------------------------------------------------------------------------------------
+ prefix = colorstr('CoreML:')
try:
import coremltools as ct
- print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
+ print(f'{prefix} starting export with coremltools {ct.__version__}...')
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
- print('CoreML export success, saved as %s' % f)
+ print(f'{prefix} export success, saved as {f}')
except Exception as e:
- print('CoreML export failure: %s' % e)
+ print(f'{prefix} export failure: {e}')
# Finish
- print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
+ print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')
if __name__ == '__main__':
main()
diff --git a/yolov5/models/yolo.py b/yolov5/models/yolo.py
index 767fc94..12862e5 100644
--- a/yolov5/models/yolo.py
+++ b/yolov5/models/yolo.py
@@ -5,6 +5,7 @@
import sys
from copy import deepcopy
+#sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from yolov5.models.common import *
@@ -73,7 +74,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None, verbose=1):
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
- self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
+ self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
@@ -268,14 +269,14 @@ def parse_model(d, ch, verbose=1): # model_dict, input_channels(3)
# Create model
model = Model(opt.cfg).to(device)
model.train()
-
+
# Profile
- # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
+ # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device)
# y = model(img, profile=True)
- # Tensorboard
+ # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
# from torch.utils.tensorboard import SummaryWriter
- # tb_writer = SummaryWriter()
- # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
- # tb_writer.add_graph(model.model, img) # add model to tensorboard
+ # tb_writer = SummaryWriter('.')
+ # print("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
+ # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
diff --git a/yolov5/test.py b/yolov5/test.py
index 2568acb..b6fbbb6 100644
--- a/yolov5/test.py
+++ b/yolov5/test.py
@@ -54,7 +54,7 @@ def test(data,
device = select_device(opt.device, batch_size=batch_size)
# Directories
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
@@ -76,7 +76,7 @@ def test(data,
if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
with open(data) as f:
- data = yaml.load(f, Loader=yaml.SafeLoader)
+ data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
diff --git a/yolov5/train.py b/yolov5/train.py
index adeb6e2..fb26493 100644
--- a/yolov5/train.py
+++ b/yolov5/train.py
@@ -60,16 +60,16 @@ def train(hyp, opt, device, tb_writer=None):
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.dump(vars(opt), f, sort_keys=False)
+ yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
+ data_dict = yaml.safe_load(f) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
@@ -77,7 +77,7 @@ def train(hyp, opt, device, tb_writer=None):
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
- wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
+ wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
@@ -524,8 +524,9 @@ def main():
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
- opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
+ opt = argparse.Namespace(**yaml.safe_load(f)) # replace
+ opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
+ '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
@@ -533,7 +534,7 @@ def main():
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
@@ -548,7 +549,7 @@ def main():
# Hyperparameters
with open(opt.hyp) as f:
- hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
+ hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
diff --git a/yolov5/utils/activations.py b/yolov5/utils/activations.py
index aa3ddf0..1d095c1 100644
--- a/yolov5/utils/activations.py
+++ b/yolov5/utils/activations.py
@@ -19,23 +19,6 @@ def forward(x):
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
-class MemoryEfficientSwish(nn.Module):
- class F(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x):
- ctx.save_for_backward(x)
- return x * torch.sigmoid(x)
-
- @staticmethod
- def backward(ctx, grad_output):
- x = ctx.saved_tensors[0]
- sx = torch.sigmoid(x)
- return grad_output * (sx * (1 + x * (1 - sx)))
-
- def forward(self, x):
- return self.F.apply(x)
-
-
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
@@ -70,3 +53,44 @@ def __init__(self, c1, k=3): # ch_in, kernel
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
+
+
+# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
+class AconC(nn.Module):
+ r""" ACON activation (activate or not).
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
+ according to "Activate or Not: Learning Customized Activation" .
+ """
+
+ def __init__(self, c1):
+ super().__init__()
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
+
+ def forward(self, x):
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
+
+
+class MetaAconC(nn.Module):
+ r""" ACON activation (activate or not).
+ MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
+ according to "Activate or Not: Learning Customized Activation" .
+ """
+
+ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
+ super().__init__()
+ c2 = max(r, c1 // r)
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False)
+ self.bn1 = nn.BatchNorm2d(c2)
+ self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False)
+ self.bn2 = nn.BatchNorm2d(c1)
+
+ def forward(self, x):
+ y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
+ beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y)))))
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
diff --git a/yolov5/utils/autoanchor.py b/yolov5/utils/autoanchor.py
index 2609c3e..1ff9816 100644
--- a/yolov5/utils/autoanchor.py
+++ b/yolov5/utils/autoanchor.py
@@ -101,7 +101,7 @@ def print_results(k):
if isinstance(path, str): # *.yaml file
with open(path) as f:
- data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict
+ data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
diff --git a/yolov5/utils/aws/resume.py b/yolov5/utils/aws/resume.py
index faad8d2..4b0d424 100644
--- a/yolov5/utils/aws/resume.py
+++ b/yolov5/utils/aws/resume.py
@@ -19,7 +19,7 @@
# Load opt.yaml
with open(last.parent.parent / 'opt.yaml') as f:
- opt = yaml.load(f, Loader=yaml.SafeLoader)
+ opt = yaml.safe_load(f)
# Get device count
d = opt['device'].split(',') # devices
diff --git a/yolov5/utils/datasets.py b/yolov5/utils/datasets.py
index ecf59d7..03f3ce5 100755
--- a/yolov5/utils/datasets.py
+++ b/yolov5/utils/datasets.py
@@ -273,15 +273,15 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32):
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
- for i, s in enumerate(sources):
- # Start the thread to read frames from the video stream
+ for i, s in enumerate(sources): # index, source
+ # Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
- url = eval(s) if s.isnumeric() else s
- if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video
+ if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
#check_requirements(('pafy', 'youtube_dl'))
import pafy
- url = pafy.new(url).getbest(preftype="mp4").url
- cap = cv2.VideoCapture(url)
+ s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
+ s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
+ cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
@@ -635,10 +635,10 @@ def load_image(self, index):
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
- r = self.img_size / max(h0, w0) # resize image to img_size
- if r != 1: # always resize down, only resize up if training with augmentation
- interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
- img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
+ r = self.img_size / max(h0, w0) # ratio
+ if r != 1: # if sizes are not equal
+ img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
diff --git a/yolov5/utils/general.py b/yolov5/utils/general.py
index 00c7a35..0bc4f9a 100755
--- a/yolov5/utils/general.py
+++ b/yolov5/utils/general.py
@@ -9,6 +9,8 @@
import re
import subprocess
import time
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
@@ -110,7 +112,7 @@ def check_requirements(requirements='requirements.txt', exclude=()):
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
n += 1
print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...")
- print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
+ print(subprocess.check_output(f"pip install {e.req}", shell=True).decode())
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
@@ -160,18 +162,40 @@ def check_dataset(dict):
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
- print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
+ print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
- else: # bash script
+ r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip
+ elif s.startswith('bash '): # bash script
+ print(f'Running {s} ...')
r = os.system(s)
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
+ else: # python script
+ r = exec(s) # return None
+ print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
+def download(url, dir='.', multi_thread=False):
+ # Multi-threaded file download function
+ def download_one(url, dir):
+ # Download 1 file
+ f = dir / Path(url).name # filename
+ print(f'Downloading {url} to {f}...')
+ torch.hub.download_url_to_file(url, f, progress=True) # download
+ if f.suffix == '.zip':
+ os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite
+
+ dir = Path(dir)
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ if multi_thread:
+ ThreadPool(8).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # 8 threads
+ else:
+ for u in tuple(url) if isinstance(url, str) else url:
+ download_one(u, dir)
+
+
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
@@ -549,14 +573,14 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
- # applies a second stage classifier to yolo outputs
+ # Apply a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
@@ -590,14 +614,31 @@ def apply_classifier(x, model, img, im0):
return x
-def increment_path(path, exist_ok=True, sep=''):
- # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
+def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False):
+ # Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels
+ xyxy = torch.tensor(xyxy).view(-1, 4)
+ b = xyxy2xywh(xyxy) # boxes
+ if square:
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
+ b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
+ xyxy = xywh2xyxy(b).long()
+ clip_coords(xyxy, im.shape)
+ crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])]
+ cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1])
+
+
+def increment_path(path, exist_ok=False, sep='', mkdir=False):
+ # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
- if (path.exists() and exist_ok) or (not path.exists()):
- return str(path)
- else:
+ if path.exists() and not exist_ok:
+ suffix = path.suffix
+ path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
- return f"{path}{sep}{n}" # update path
+ path = Path(f"{path}{sep}{n}{suffix}") # update path
+ dir = path if path.suffix == '' else path.parent # directory
+ if not dir.exists() and mkdir:
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ return path
diff --git a/yolov5/utils/google_utils.py b/yolov5/utils/google_utils.py
index 0a7ca3b..6a4660b 100644
--- a/yolov5/utils/google_utils.py
+++ b/yolov5/utils/google_utils.py
@@ -18,7 +18,7 @@ def gsutil_getsize(url=''):
def attempt_download(file, repo='ultralytics/yolov5'):
# Attempt file download if does not exist
- file = Path(str(file).strip().replace("'", '').lower())
+ file = Path(str(file).strip().replace("'", ''))
if not file.exists():
try:
@@ -26,8 +26,12 @@ def attempt_download(file, repo='ultralytics/yolov5'):
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
- assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']
- tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]
+ assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
+ 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
+ try:
+ tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
+ except:
+ tag = 'v5.0' # current release
name = file.name
if name in assets:
diff --git a/yolov5/utils/metrics.py b/yolov5/utils/metrics.py
index 666b8c7..323c84b 100644
--- a/yolov5/utils/metrics.py
+++ b/yolov5/utils/metrics.py
@@ -145,7 +145,7 @@ def process_batch(self, detections, labels):
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
- self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
self.matrix[self.nc, gc] += 1 # background FP
diff --git a/yolov5/utils/plots.py b/yolov5/utils/plots.py
index ab9c76f..6ff97fa 100644
--- a/yolov5/utils/plots.py
+++ b/yolov5/utils/plots.py
@@ -53,32 +53,34 @@ def butter_lowpass(cutoff, fs, order):
return filtfilt(b, a, data) # forward-backward filter
-def plot_one_box(x, img, color=None, label=None, line_thickness=3):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
+def plot_one_box(x, im, color=None, label=None, line_thickness=3):
+ # Plots one bounding box on image 'im' using OpenCV
+ assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
+ tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
-def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
- img = Image.fromarray(img)
- draw = ImageDraw.Draw(img)
- line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
+def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None):
+ # Plots one bounding box on image 'im' using PIL
+ im = Image.fromarray(im)
+ draw = ImageDraw.Draw(im)
+ line_thickness = line_thickness or max(int(min(im.size) / 200), 2)
draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
if label:
- fontsize = max(round(max(img.size) / 40), 12)
+ fontsize = max(round(max(im.size) / 40), 12)
font = ImageFont.truetype("Arial.ttf", fontsize)
txt_width, txt_height = font.getsize(label)
draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
- return np.asarray(img)
+ return np.asarray(im)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
@@ -320,7 +322,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.SafeLoader)
+ hyp = yaml.safe_load(f)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
diff --git a/yolov5/utils/wandb_logging/log_dataset.py b/yolov5/utils/wandb_logging/log_dataset.py
index d7a521f..f45a230 100644
--- a/yolov5/utils/wandb_logging/log_dataset.py
+++ b/yolov5/utils/wandb_logging/log_dataset.py
@@ -9,7 +9,7 @@
def create_dataset_artifact(opt):
with open(opt.data) as f:
- data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
+ data = yaml.safe_load(f) # data dict
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
diff --git a/yolov5/utils/wandb_logging/wandb_utils.py b/yolov5/utils/wandb_logging/wandb_utils.py
index d8f50ae..d8fbd1e 100644
--- a/yolov5/utils/wandb_logging/wandb_utils.py
+++ b/yolov5/utils/wandb_logging/wandb_utils.py
@@ -55,7 +55,7 @@ def check_wandb_resume(opt):
def process_wandb_config_ddp_mode(opt):
with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
+ data_dict = yaml.safe_load(f) # data dict
train_dir, val_dir = None, None
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
api = wandb.Api()
@@ -73,7 +73,7 @@ def process_wandb_config_ddp_mode(opt):
if train_dir or val_dir:
ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
with open(ddp_data_path, 'w') as f:
- yaml.dump(data_dict, f)
+ yaml.safe_dump(data_dict, f)
opt.data = ddp_data_path
@@ -120,7 +120,7 @@ def check_and_upload_dataset(self, opt):
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
print("Created dataset config file ", config_path)
with open(config_path) as f:
- wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ wandb_data_dict = yaml.safe_load(f)
return wandb_data_dict
def setup_training(self, opt, data_dict):
@@ -192,7 +192,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
with open(data_file) as f:
- data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
+ data = yaml.safe_load(f) # data dict
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
names = {k: v for k, v in enumerate(names)} # to index dictionary
self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
@@ -206,7 +206,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=
path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
data.pop('download', None)
with open(path, 'w') as f:
- yaml.dump(data, f)
+ yaml.safe_dump(data, f)
if self.job_type == 'Training': # builds correct artifact pipeline graph
self.wandb_run.use_artifact(self.val_artifact)