diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4da9d55..219dcdf 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -19,10 +19,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
+
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
+
- name: Restore Ubuntu cache
uses: actions/cache@v1
if: matrix.operating-system == 'ubuntu-latest'
@@ -30,6 +32,7 @@ jobs:
path: ~/.cache/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Restore MacOS cache
uses: actions/cache@v1
if: matrix.operating-system == 'macos-latest'
@@ -37,6 +40,7 @@ jobs:
path: ~/Library/Caches/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Restore Windows cache
uses: actions/cache@v1
if: matrix.operating-system == 'windows-latest'
@@ -44,37 +48,46 @@ jobs:
path: ~\AppData\Local\pip\Cache
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Update pip
run: python -m pip install --upgrade pip
- - name: Install numpy and matplotlib for Python 3.6
- if: matrix.python-version == 3.6
- run: >
- pip install "numpy>=1.18.5,<1.20" "matplotlib>=3.2.2,<4"
+
- name: Install dependencies
run: >
pip install -r requirements.txt
+
- name: Test with unittest
run: |
python -m unittest
+
- name: Test scripts
run: |
pip install -e .
- di=cpu # inference devices # define device
# train
- python yolov5/train.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di
- yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di
- yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di --evolve 2
+ python yolov5/train.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu
+ yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu --freeze 10
+ yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu --evolve 2
# detect
- python yolov5/detect.py --weights yolov5/weights/yolov5n.pt --device $di
- yolov5 detect --weights yolov5/weights/yolov5n.pt --device $di
- python yolov5/detect.py --weights runs/train/exp/weights/last.pt --device $di
- yolov5 detect --weights runs/train/exp/weights/last.pt --device $di
+ python yolov5/detect.py --weights yolov5/weights/yolov5n.pt --device cpu
+ yolov5 detect --weights yolov5/weights/yolov5n.pt --device cpu
+ python yolov5/detect.py --weights runs/train/exp/weights/last.pt --device cpu
+ yolov5 detect --weights runs/train/exp/weights/last.pt --device cpu
# val
- python yolov5/val.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device $di
- yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device $di
- python yolov5/val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
- yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
+ python yolov5/val.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device cpu
+ yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device cpu
+ python yolov5/val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device cpu
+ yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device cpu
# export
pip install onnx onnx-simplifier tensorflowjs
- python yolov5/export.py --weights yolov5/weights/yolov5n.pt --device $di --include 'torchscript,onnx,tflite'
- yolov5 export --weights yolov5/weights/yolov5n.pt --device $di --simplify --include torchscript,onnx,saved_model,pb,tfjs
+ python yolov5/export.py --weights yolov5/weights/yolov5n.pt --device cpu --include torchscript,onnx,tflite
+ yolov5 export --weights yolov5/weights/yolov5n.pt --device cpu --simplify --include torchscript,onnx,saved_model,pb,tfjs
+ # benckmarks
+ yolov5 benchmarks --weights yolov5s.pt --img 128 --pt-only --device cpu
+ # classify
+ yolov5 classify train --img 32 --data mnist2560 --weights yolov5s-cls.pt --epochs 1 --device cpu
+ yolov5 classify val --img 32 --data datasets/mnist2560 --weights yolov5s-cls.pt --device cpu
+ yolov5 classify predict --img 32 --weights yolov5s-cls.pt --device cpu
+ # segment
+ yolov5 segment train --img 128 --weights yolov5s-seg.pt --epochs 1 --device cpu
+ # yolov5 segment val --img 128 --weights yolov5s-seg.pt --device cpu
+ yolov5 segment predict --img 128 --weights yolov5s-seg.pt --device cpu
\ No newline at end of file
diff --git a/.github/workflows/package_testing.yml b/.github/workflows/package_testing.yml
index f846c9a..033d56d 100644
--- a/.github/workflows/package_testing.yml
+++ b/.github/workflows/package_testing.yml
@@ -17,10 +17,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
+
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
+
- name: Restore Ubuntu cache
uses: actions/cache@v1
if: matrix.operating-system == 'ubuntu-latest'
@@ -28,6 +30,7 @@ jobs:
path: ~/.cache/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Restore MacOS cache
uses: actions/cache@v1
if: matrix.operating-system == 'macos-latest'
@@ -35,6 +38,7 @@ jobs:
path: ~/Library/Caches/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Restore Windows cache
uses: actions/cache@v1
if: matrix.operating-system == 'windows-latest'
@@ -42,37 +46,47 @@ jobs:
path: ~\AppData\Local\pip\Cache
key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}}
restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}-
+
- name: Update pip
run: python -m pip install --upgrade pip
- - name: Install numpy and matplotlib for Python 3.6
- if: matrix.python-version == 3.6
- run: >
- pip install "numpy>=1.18.5,<1.20" "matplotlib>=3.2.2,<4"
+
+
- name: Install latest YOLOv5 package
run: >
pip install --upgrade --force-reinstall yolov5
+
- name: Test with unittest
run: |
python -m unittest
+
- name: Test scripts
run: |
pip install -e .
- di=cpu # inference devices # define device
# train
- python yolov5/train.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di
- yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di
- yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device $di --evolve 2
+ python yolov5/train.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu
+ yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu --freeze 10
+ yolov5 train --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --epochs 1 --device cpu --evolve 2
# detect
- python yolov5/detect.py --weights yolov5/weights/yolov5n.pt --device $di
- yolov5 detect --weights yolov5/weights/yolov5n.pt --device $di
- python yolov5/detect.py --weights runs/train/exp/weights/last.pt --device $di
- yolov5 detect --weights runs/train/exp/weights/last.pt --device $di
+ python yolov5/detect.py --weights yolov5/weights/yolov5n.pt --device cpu
+ yolov5 detect --weights yolov5/weights/yolov5n.pt --device cpu
+ python yolov5/detect.py --weights runs/train/exp/weights/last.pt --device cpu
+ yolov5 detect --weights runs/train/exp/weights/last.pt --device cpu
# val
- python yolov5/val.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device $di
- yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device $di
- python yolov5/val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
- yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
+ python yolov5/val.py --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device cpu
+ yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights yolov5/weights/yolov5n.pt --device cpu
+ python yolov5/val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device cpu
+ yolov5 val --data yolov5/data/coco128.yaml --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device cpu
# export
pip install onnx onnx-simplifier tensorflowjs
- python yolov5/export.py --weights yolov5/weights/yolov5n.pt --device $di --include 'torchscript,onnx,tflite'
- yolov5 export --weights yolov5/weights/yolov5n.pt --device $di --simplify --include torchscript,onnx,saved_model,pb,tfjs
+ python yolov5/export.py --weights yolov5/weights/yolov5n.pt --device cpu --include torchscript,onnx,tflite
+ yolov5 export --weights yolov5/weights/yolov5n.pt --device cpu --simplify --include torchscript,onnx,saved_model,pb,tfjs
+ # benckmarks
+ yolov5 benchmarks --weights yolov5s.pt --img 128 --pt-only --device cpu
+ # classify
+ yolov5 classify train --img 224 --weights yolov5s-cls.pt --epochs 1 --device cpu
+ yolov5 classify val --img 224 --weights yolov5s-cls.pt --device cpu
+ yolov5 classify predict --img 224 --weights yolov5s-cls.pt --device cpu
+ # segment
+ yolov5 segment train --img 128 --weights yolov5s-seg.pt --epochs 1 --device cpu
+ # yolov5 segment val --img 128 --weights yolov5s-seg.pt --device cpu
+ yolov5 segment predict --img 128 --weights yolov5s-seg.pt --device cpu
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 0c21dc0..5aa3d15 100755
--- a/.gitignore
+++ b/.gitignore
@@ -44,6 +44,11 @@ pycocotools/*
results*.txt
gcp_test*.sh
.neptune
+*.bin
+*.pb
+*saved_model
+*.zip
+datasets/
# Datasets -------------------------------------------------------------------------------------------------------------
coco/
diff --git a/README.md b/README.md
index f2b93d9..8931cd5 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ You can finally install YOLOv5 o
-This yolov5 package contains everything from ultralytics/yolov5 at this commit plus:
+This yolov5 package contains everything from ultralytics/yolov5 at this commit plus:
1. Easy installation via pip: `pip install yolov5`
diff --git a/data_coco.yml b/data_coco.yml
new file mode 100644
index 0000000..18a32bf
--- /dev/null
+++ b/data_coco.yml
@@ -0,0 +1,4 @@
+train_json_path: "/home/fatihakyon/data/witwiser/banned_objects_with_negative_samples/train.json"
+train_image_dir: "/home/fatihakyon/data/witwiser/banned_objects_with_negative_samples"
+val_json_path: "/home/fatihakyon/data/witwiser/banned_objects_with_negative_samples/val.json"
+val_image_dir: "/home/fatihakyon/data/witwiser/banned_objects_with_negative_samples"
\ No newline at end of file
diff --git a/data_wandb.yaml b/data_wandb.yaml
new file mode 100644
index 0000000..360d7d4
--- /dev/null
+++ b/data_wandb.yaml
@@ -0,0 +1,13 @@
+names:
+- earphone
+- cable
+- cellphone
+- Headphone
+- calculator
+nc: 5
+train: wandb-artifact://YOLOv5/train
+train_image_dir: /home/fatihakyon/data/witwiser/banned_objects_with_negative_samples
+train_json_path: /home/fatihakyon/data/witwiser/banned_objects_with_negative_samples/train.json
+val: wandb-artifact://YOLOv5/val
+val_image_dir: /home/fatihakyon/data/witwiser/banned_objects_with_negative_samples
+val_json_path: /home/fatihakyon/data/witwiser/banned_objects_with_negative_samples/val.json
diff --git a/data_yolo.yml b/data_yolo.yml
new file mode 100644
index 0000000..c502a65
--- /dev/null
+++ b/data_yolo.yml
@@ -0,0 +1,5 @@
+names: ['earphone', 'cable', 'cellphone', 'Headphone', 'calculator']
+nc: 5
+train: /home/fatihakyon/dev/fcakyon/yolov5-pip/runs/train/yolo_upload_dataset_test/data/train
+val: /home/fatihakyon/dev/fcakyon/yolov5-pip/runs/train/yolo_upload_dataset_test/data/val
+yolo_s3_data_dir: s3://obss-ml/computer-vision/model/yolov5/yolo_upload_dataset_test2/data/
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 72ffe0a..d1d74d4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,11 +12,12 @@ scipy>=1.4.1
torch>=1.7.0
torchvision>=0.8.1
tqdm>=4.64.0
-protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
+# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
# Logging -------------------------------------
tensorboard>=2.4.1
# wandb
+# clearml
# Plotting ------------------------------------
pandas>=1.1.4
@@ -46,4 +47,4 @@ fire
# AWS
boto3>=1.19.1
# coco to yolov5 conversion
-sahi>=0.9.1
+sahi>=0.10.5
diff --git a/yolov5/__init__.py b/yolov5/__init__.py
index c51a34d..50f3112 100644
--- a/yolov5/__init__.py
+++ b/yolov5/__init__.py
@@ -1,4 +1,4 @@
from yolov5.helpers import YOLOv5
from yolov5.helpers import load_model as load
-__version__ = "6.1.9"
+__version__ = "6.2.0"
diff --git a/yolov5/utils/benchmarks.py b/yolov5/benchmarks.py
similarity index 66%
rename from yolov5/utils/benchmarks.py
rename to yolov5/benchmarks.py
index bcbf8f9..c1e2e8b 100644
--- a/yolov5/utils/benchmarks.py
+++ b/yolov5/benchmarks.py
@@ -22,31 +22,37 @@
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
Usage:
- $ python utils/benchmarks.py --weights yolov5s.pt --img 640
+ $ yolov5 benchmarks --weights yolov5s.pt --img 640
"""
import argparse
import platform
+import sys
import time
from pathlib import Path
import pandas as pd
FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = FILE.parents[0] # YOLOv5 root directory
# ROOT = ROOT.relative_to(Path.cwd()) # relative
-import yolov5.export as export
-import yolov5.val as val
+import yolov5.export
+from yolov5.models.experimental import attempt_load
+from yolov5.models.yolo import SegmentationModel
+from yolov5.segment.val import run as val_seg
from yolov5.utils import notebook_init
from yolov5.utils.general import LOGGER, check_yaml, file_size, print_args
from yolov5.utils.torch_utils import select_device
+from yolov5.val import run as val_det
def run(
- weights=ROOT / 'yolov5s.pt', # weights path
- imgsz=640, # inference size (pixels)
- batch_size=1, # batch size
+ weights='yolov5s.pt', # weights path
+ imgsz=None, # inference size (pixels)
+ img=None, # inference size (pixels)
+ batch_size=None, # batch size
+ batch=None, # batch size
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
@@ -54,11 +60,21 @@ def run(
pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
+ if imgsz is None and img is None:
+ imgsz = 640
+ elif img is not None:
+ imgsz = img
+ if batch_size is None and batch is None:
+ batch_size = 1
+ elif batch is not None:
+ batch_size = batch
+
y, t = [], time.time()
device = select_device(device)
- for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
+ model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
+ for i, (name, f, suffix, cpu, gpu) in yolov5.export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
try:
- assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
+ assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
if 'cpu' in device.type:
assert cpu, 'inference not supported on CPU'
@@ -69,14 +85,18 @@ def run(
if f == '-':
w = weights # PyTorch format
else:
- w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
+ w = yolov5.export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
assert suffix in str(w), 'export failed'
# Validate
- result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)
- metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls))
- speeds = result[2] # times (preprocess, inference, postprocess)
- y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference
+ if model_type == SegmentationModel:
+ result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)
+ metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
+ else: # DetectionModel:
+ result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)
+ metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
+ speed = result[2][1] # times (preprocess, inference, postprocess)
+ y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
except Exception as e:
if hard_fail:
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
@@ -87,12 +107,16 @@ def run(
# Print results
LOGGER.info('\n')
- parse_opt()
+ #parse_opt()
notebook_init() # print system info
- c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
+ c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
py = pd.DataFrame(y, columns=c)
LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
LOGGER.info(str(py if map else py.iloc[:, :2]))
+ if hard_fail and isinstance(hard_fail, str):
+ metrics = py['mAP50-95'].array # values to compare to floor
+ floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
+ assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
return py
@@ -109,10 +133,10 @@ def test(
):
y, t = [], time.time()
device = select_device(device)
- for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
+ for i, (name, f, suffix, gpu) in yolov5.export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
try:
w = weights if f == '-' else \
- export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
+ yolov5.export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
assert suffix in str(w), 'export failed'
y.append([name, True])
except Exception:
@@ -128,6 +152,13 @@ def test(
return py
+def run_cli(**kwargs):
+ '''
+ To be called from yolov5.cli
+ '''
+ _ = run(**kwargs)
+
+
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
@@ -138,17 +169,17 @@ def parse_opt():
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--test', action='store_true', help='test exports only')
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
- parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure')
+ parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
print_args(vars(opt))
return opt
-def main(opt):
+def main():
+ opt = parse_opt()
test(**vars(opt)) if opt.test else run(**vars(opt))
if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
+ main()
diff --git a/yolov5/classify/__init__.py b/yolov5/classify/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/yolov5/classify/predict.py b/yolov5/classify/predict.py
new file mode 100644
index 0000000..c2afe51
--- /dev/null
+++ b/yolov5/classify/predict.py
@@ -0,0 +1,225 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+ $ yolov5 classify predict --weights yolov5s-cls.pt --source 0 # webcam
+ img.jpg # image
+ vid.mp4 # video
+ path/ # directory
+ 'path/*.jpg' # glob
+ 'https://youtu.be/Zgi9g1ksQHc' # YouTube
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+ $ yolov5 classify predict --weights yolov5s-cls.pt # PyTorch
+ yolov5s-cls.torchscript # TorchScript
+ yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s-cls.xml # OpenVINO
+ yolov5s-cls.engine # TensorRT
+ yolov5s-cls.mlmodel # CoreML (macOS-only)
+ yolov5s-cls_saved_model # TensorFlow SavedModel
+ yolov5s-cls.pb # TensorFlow GraphDef
+ yolov5s-cls.tflite # TensorFlow Lite
+ yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s-cls_paddle_model # PaddlePaddle
+"""
+
+import argparse
+import os
+import platform
+import sys
+from pathlib import Path
+
+import torch
+import torch.nn.functional as F
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+from yolov5.models.common import DetectMultiBackend
+from yolov5.utils.augmentations import classify_transforms
+from yolov5.utils.dataloaders import (IMG_FORMATS, VID_FORMATS, LoadImages,
+ LoadStreams)
+from yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size,
+ check_imshow, check_requirements, colorstr,
+ cv2, increment_path, print_args,
+ strip_optimizer)
+from yolov5.utils.plots import Annotator
+from yolov5.utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+ weights='yolov5s-cls.pt', # model.pt path(s)
+ source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
+ imgsz=None, # inference size (height, width)
+ img=None, # inference size (pixels)
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
+ view_img=False, # show results
+ save_txt=False, # save results to *.txt
+ nosave=False, # do not save images/videos
+ augment=False, # augmented inference
+ visualize=False, # visualize features
+ update=False, # update all models
+ project=ROOT / 'runs/predict-cls', # save results to project/name
+ name='exp', # save results to project/name
+ exist_ok=False, # existing project/name ok, do not increment
+ half=False, # use FP16 half-precision inference
+ dnn=False, # use OpenCV DNN for ONNX inference
+ vid_stride=1, # video frame-rate stride
+):
+ source = str(source)
+ save_img = not nosave and not source.endswith('.txt') # save inference images
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+ is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+ webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
+ if is_url and is_file:
+ source = check_file(source) # download
+
+ if imgsz is None and img is None:
+ imgsz = 224
+ elif img is not None:
+ imgsz = img
+
+ if isinstance(imgsz, int):
+ imgsz = [imgsz, imgsz]
+
+ # Directories
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
+
+ # Load model
+ device = select_device(device)
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+ stride, names, pt = model.stride, model.names, model.pt
+ imgsz = check_img_size(imgsz, s=stride) # check image size
+
+ # Dataloader
+ if webcam:
+ view_img = check_imshow()
+ dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
+ bs = len(dataset) # batch_size
+ else:
+ dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
+ bs = 1 # batch_size
+ vid_path, vid_writer = [None] * bs, [None] * bs
+
+ # Run inference
+ model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+ for path, im, im0s, vid_cap, s in dataset:
+ with dt[0]:
+ im = torch.Tensor(im).to(device)
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
+ if len(im.shape) == 3:
+ im = im[None] # expand for batch dim
+
+ # Inference
+ with dt[1]:
+ results = model(im)
+
+ # Post-process
+ with dt[2]:
+ pred = F.softmax(results, dim=1) # probabilities
+
+ # Process predictions
+ for i, prob in enumerate(pred): # per image
+ seen += 1
+ if webcam: # batch_size >= 1
+ p, im0 = path[i], im0s[i].copy()
+ s += f'{i}: '
+ else:
+ p, im0 = path, im0s.copy()
+
+ p = Path(p) # to Path
+ save_path = str(save_dir / p.name) # im.jpg
+ s += '%gx%g ' % im.shape[2:] # print string
+ annotator = Annotator(im0, example=str(names), pil=True)
+
+ # Print results
+ top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
+ s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
+
+ # Write results
+ if save_img or view_img: # Add bbox to image
+ text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
+ annotator.text((32, 32), text, txt_color=(255, 255, 255))
+
+ # Stream results
+ im0 = annotator.result()
+ if view_img:
+ if platform.system() == 'Linux' and p not in windows:
+ windows.append(p)
+ cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
+ cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
+ cv2.imshow(str(p), im0)
+ cv2.waitKey(1) # 1 millisecond
+
+ # Save results (image with detections)
+ if save_img:
+ if dataset.mode == 'image':
+ cv2.imwrite(save_path, im0)
+ else: # 'video' or 'stream'
+ if vid_path[i] != save_path: # new video
+ vid_path[i] = save_path
+ if isinstance(vid_writer[i], cv2.VideoWriter):
+ vid_writer[i].release() # release previous video writer
+ if vid_cap: # video
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ else: # stream
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
+ save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
+ vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
+ vid_writer[i].write(im0)
+
+ # Print time (inference-only)
+ LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms")
+
+ # Print results
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
+ if save_txt or save_img:
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+ if update:
+ strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
+
+
+def parse_opt():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s-cls.pt', help='model path(s)')
+ parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam')
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--view-img', action='store_true', help='show results')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--visualize', action='store_true', help='visualize features')
+ parser.add_argument('--update', action='store_true', help='update all models')
+ parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name')
+ parser.add_argument('--name', default='exp', help='save results to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
+ opt = parser.parse_args()
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
+ print_args(vars(opt))
+ return opt
+
+
+def main():
+ opt = parse_opt()
+ #check_requirements(exclude=('tensorboard', 'thop'))
+ run(**vars(opt))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/yolov5/classify/train.py b/yolov5/classify/train.py
new file mode 100644
index 0000000..f5adb66
--- /dev/null
+++ b/yolov5/classify/train.py
@@ -0,0 +1,348 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Train a YOLOv5 classifier model on a classification dataset
+
+Usage - Single-GPU training:
+ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128
+
+Usage - Multi-GPU DDP training:
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
+
+Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
+YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
+Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+import time
+from copy import deepcopy
+from datetime import datetime
+from pathlib import Path
+
+import torch
+import torch.distributed as dist
+import torch.hub as hub
+import torch.optim.lr_scheduler as lr_scheduler
+import torchvision
+from torch.cuda import amp
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+from yolov5.classify import val as validate
+from yolov5.models.experimental import attempt_load
+from yolov5.models.yolo import ClassificationModel, DetectionModel
+from yolov5.utils.dataloaders import create_classification_dataloader
+from yolov5.utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory,
+ check_git_status, check_requirements,
+ colorstr, download, increment_path,
+ init_seeds, print_args, yaml_save)
+from yolov5.utils.loggers import GenericLogger
+from yolov5.utils.plots import imshow_cls
+from yolov5.utils.torch_utils import (ModelEMA, model_info,
+ reshape_classifier_output, select_device,
+ smart_DDP, smart_optimizer,
+ smartCrossEntropyLoss,
+ torch_distributed_zero_first)
+
+LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
+RANK = int(os.getenv('RANK', -1))
+WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
+
+
+def train(opt, device):
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
+ save_dir, data, bs, epochs, nw, imgsz, pretrained = \
+ opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
+ opt.imgsz, str(opt.pretrained).lower() == 'true'
+ cuda = device.type != 'cpu'
+
+ # Directories
+ wdir = save_dir / 'weights'
+ wdir.mkdir(parents=True, exist_ok=True) # make dir
+ last, best = wdir / 'last.pt', wdir / 'best.pt'
+
+ # Save run settings
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
+
+ # Logger
+ logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
+
+ # Download Dataset
+ with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
+ data_dir = data if data.is_dir() else (DATASETS_DIR / data)
+ if not data_dir.is_dir():
+ LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...')
+ t = time.time()
+ if str(data) == 'imagenet':
+ subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True)
+ else:
+ url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
+ download(url, dir=data_dir.parent)
+ s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
+ LOGGER.info(s)
+
+ # Dataloaders
+ nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes
+ trainloader = create_classification_dataloader(path=data_dir / 'train',
+ imgsz=imgsz,
+ batch_size=bs // WORLD_SIZE,
+ augment=True,
+ cache=opt.cache,
+ rank=LOCAL_RANK,
+ workers=nw)
+
+ test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val
+ if RANK in {-1, 0}:
+ testloader = create_classification_dataloader(path=test_dir,
+ imgsz=imgsz,
+ batch_size=bs // WORLD_SIZE * 2,
+ augment=False,
+ cache=opt.cache,
+ rank=-1,
+ workers=nw)
+
+ # Model
+ with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
+ if Path(opt.model).is_file() or opt.model.endswith('.pt'):
+ model = attempt_load(opt.model, device='cpu', fuse=False)
+ elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
+ model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
+ else:
+ m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
+ raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
+ if isinstance(model, DetectionModel):
+ LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
+ model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
+ reshape_classifier_output(model, nc) # update class count
+ for m in model.modules():
+ if not pretrained and hasattr(m, 'reset_parameters'):
+ m.reset_parameters()
+ if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
+ m.p = opt.dropout # set dropout
+ for p in model.parameters():
+ p.requires_grad = True # for training
+ model = model.to(device)
+
+ # Info
+ if RANK in {-1, 0}:
+ model.names = trainloader.dataset.classes # attach class names
+ model.transforms = testloader.dataset.torch_transforms # attach inference transforms
+ model_info(model)
+ if opt.verbose:
+ LOGGER.info(model)
+ images, labels = next(iter(trainloader))
+ file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
+ logger.log_images(file, name='Train Examples')
+ logger.log_graph(model, imgsz) # log model
+
+ # Optimizer
+ optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
+
+ # Scheduler
+ lrf = 0.01 # final lr (fraction of lr0)
+ # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
+ lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
+ # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
+ # final_div_factor=1 / 25 / lrf)
+
+ # EMA
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
+
+ # DDP mode
+ if cuda and RANK != -1:
+ model = smart_DDP(model)
+
+ # Train
+ t0 = time.time()
+ criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
+ best_fitness = 0.0
+ scaler = amp.GradScaler(enabled=cuda)
+ val = test_dir.stem # 'val' or 'test'
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
+ f'Using {nw * WORLD_SIZE} dataloader workers\n'
+ f"Logging results to {colorstr('bold', save_dir)}\n"
+ f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
+ f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
+ for epoch in range(epochs): # loop over the dataset multiple times
+ tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
+ model.train()
+ if RANK != -1:
+ trainloader.sampler.set_epoch(epoch)
+ pbar = enumerate(trainloader)
+ if RANK in {-1, 0}:
+ pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
+ for i, (images, labels) in pbar: # progress bar
+ images, labels = images.to(device, non_blocking=True), labels.to(device)
+
+ # Forward
+ with amp.autocast(enabled=cuda): # stability issues when enabled
+ loss = criterion(model(images), labels)
+
+ # Backward
+ scaler.scale(loss).backward()
+
+ # Optimize
+ scaler.unscale_(optimizer) # unscale gradients
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
+ scaler.step(optimizer)
+ scaler.update()
+ optimizer.zero_grad()
+ if ema:
+ ema.update(model)
+
+ if RANK in {-1, 0}:
+ # Print
+ tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
+ mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
+ pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
+
+ # Test
+ if i == len(pbar) - 1: # last batch
+ top1, top5, vloss = validate.run(model=ema.ema,
+ dataloader=testloader,
+ criterion=criterion,
+ pbar=pbar) # test accuracy, loss
+ fitness = top1 # define fitness as top1 accuracy
+
+ # Scheduler
+ scheduler.step()
+
+ # Log metrics
+ if RANK in {-1, 0}:
+ # Best fitness
+ if fitness > best_fitness:
+ best_fitness = fitness
+
+ # Log
+ metrics = {
+ "train/loss": tloss,
+ f"{val}/loss": vloss,
+ "metrics/accuracy_top1": top1,
+ "metrics/accuracy_top5": top5,
+ "lr/0": optimizer.param_groups[0]['lr']} # learning rate
+ logger.log_metrics(metrics, epoch)
+
+ # Save model
+ final_epoch = epoch + 1 == epochs
+ if (not opt.nosave) or final_epoch:
+ ckpt = {
+ 'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
+ 'ema': None, # deepcopy(ema.ema).half(),
+ 'updates': ema.updates,
+ 'optimizer': None, # optimizer.state_dict(),
+ 'opt': vars(opt),
+ 'date': datetime.now().isoformat()}
+
+ # Save last, best and delete
+ torch.save(ckpt, last)
+ if best_fitness == fitness:
+ torch.save(ckpt, best)
+ del ckpt
+
+ # Train complete
+ if RANK in {-1, 0} and final_epoch:
+ LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
+ f"\nResults saved to {colorstr('bold', save_dir)}"
+ f"\nPredict: yolov5 classify predict --weights {best} --source im.jpg"
+ f"\nValidate: yolov5 classify val --weights {best} --data {data_dir}"
+ f"\nExport: yolov5 export --weights {best} --include onnx"
+ f"\nPyPi: model = yolov5.load('custom.pt')"
+ f"\nVisualize: https://netron.app\n")
+
+ # Plot examples
+ images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
+ pred = torch.max(ema.ema(images.to(device)), 1)[1]
+ file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg')
+
+ # Log results
+ meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()}
+ logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
+ logger.log_model(best, epochs, metadata=meta)
+
+
+def parse_opt(known=False):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
+ parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
+ parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
+ parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)')
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+ parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
+ parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
+ parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
+ parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
+ parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
+ parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
+ parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
+ parser.add_argument('--verbose', action='store_true', help='Verbose mode')
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
+
+ # Neptune AI arguments
+ parser.add_argument('--neptune_token', type=str, default=None, help='neptune.ai api token')
+ parser.add_argument('--neptune_project', type=str, default=None, help='https://docs.neptune.ai/api-reference/neptune')
+
+ return parser.parse_known_args()[0] if known else parser.parse_args()
+
+
+def main(opt):
+ # Checks
+ if RANK in {-1, 0}:
+ print_args(vars(opt))
+ check_git_status()
+ check_requirements()
+
+ # DDP mode
+ device = select_device(opt.device, batch_size=opt.batch_size)
+ if LOCAL_RANK != -1:
+ assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
+ torch.cuda.set_device(LOCAL_RANK)
+ device = torch.device('cuda', LOCAL_RANK)
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
+
+ # Parameters
+ opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
+
+ # Train
+ train(opt, device)
+
+
+def run(**kwargs):
+ # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
+ opt = parse_opt(True)
+ for k, v in kwargs.items():
+ setattr(opt, k, v)
+ main(opt)
+ return opt
+
+
+def run_cli(**kwargs):
+ '''
+ To be called from yolov5.cli
+ '''
+ opt = parse_opt(True)
+ for k, v in kwargs.items():
+ setattr(opt, k, v)
+ main(opt)
+
+if __name__ == "__main__":
+ opt = parse_opt()
+ main(opt)
diff --git a/yolov5/classify/val.py b/yolov5/classify/val.py
new file mode 100644
index 0000000..7af54b8
--- /dev/null
+++ b/yolov5/classify/val.py
@@ -0,0 +1,181 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Validate a trained YOLOv5 classification model on a classification dataset
+
+Usage:
+ $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
+ $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
+
+Usage - formats:
+ $ python classify/val.py --weights yolov5s-cls.pt # PyTorch
+ yolov5s-cls.torchscript # TorchScript
+ yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s-cls.xml # OpenVINO
+ yolov5s-cls.engine # TensorRT
+ yolov5s-cls.mlmodel # CoreML (macOS-only)
+ yolov5s-cls_saved_model # TensorFlow SavedModel
+ yolov5s-cls.pb # TensorFlow GraphDef
+ yolov5s-cls.tflite # TensorFlow Lite
+ yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s-cls_paddle_model # PaddlePaddle
+"""
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import torch
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+from yolov5.models.common import DetectMultiBackend
+from yolov5.utils.dataloaders import create_classification_dataloader
+from yolov5.utils.general import (LOGGER, Profile, check_img_size,
+ check_requirements, colorstr, increment_path,
+ print_args)
+from yolov5.utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+ data=ROOT / '../datasets/mnist', # dataset dir
+ weights='yolov5s-cls.pt', # model.pt path(s)
+ batch_size=None, # batch size
+ batch=None, # batch size
+ imgsz=None, # inference size (pixels)
+ img=None, # inference size (pixels)
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
+ workers=8, # max dataloader workers (per RANK in DDP mode)
+ verbose=False, # verbose output
+ project='runs/val-cls', # save to project/name
+ name='exp', # save to project/name
+ exist_ok=False, # existing project/name ok, do not increment
+ half=False, # use FP16 half-precision inference
+ dnn=False, # use OpenCV DNN for ONNX inference
+ model=None,
+ dataloader=None,
+ criterion=None,
+ pbar=None,
+):
+
+ if imgsz is None and img is None:
+ imgsz = 224
+ elif img is not None:
+ imgsz = img
+ if batch_size is None and batch is None:
+ batch_size = 128
+ elif batch is not None:
+ batch_size = batch
+
+ # Initialize/load model and set device
+ training = model is not None
+ if training: # called by train.py
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
+ half &= device.type != 'cpu' # half precision only supported on CUDA
+ model.half() if half else model.float()
+ else: # called directly
+ device = select_device(device, batch_size=batch_size)
+
+ # Directories
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
+ save_dir.mkdir(parents=True, exist_ok=True) # make dir
+
+ # Load model
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
+ imgsz = check_img_size(imgsz, s=stride) # check image size
+ half = model.fp16 # FP16 supported on limited backends with CUDA
+ if engine:
+ batch_size = model.batch_size
+ else:
+ device = model.device
+ if not (pt or jit):
+ batch_size = 1 # export.py models default to batch-size 1
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
+
+ # Dataloader
+ data = Path(data)
+ test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
+ dataloader = create_classification_dataloader(path=test_dir,
+ imgsz=imgsz,
+ batch_size=batch_size,
+ augment=False,
+ rank=-1,
+ workers=workers)
+
+ model.eval()
+ pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
+ n = len(dataloader) # number of batches
+ action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
+ desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
+ bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0)
+ with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
+ for images, labels in bar:
+ with dt[0]:
+ images, labels = images.to(device, non_blocking=True), labels.to(device)
+
+ with dt[1]:
+ y = model(images)
+
+ with dt[2]:
+ pred.append(y.argsort(1, descending=True)[:, :5])
+ targets.append(labels)
+ if criterion:
+ loss += criterion(y, labels)
+
+ loss /= n
+ pred, targets = torch.cat(pred), torch.cat(targets)
+ correct = (targets[:, None] == pred).float()
+ acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
+ top1, top5 = acc.mean(0).tolist()
+
+ if pbar:
+ pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}"
+ if verbose: # all classes
+ LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
+ LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
+ for i, c in model.names.items():
+ aci = acc[targets == i]
+ top1i, top5i = aci.mean(0).tolist()
+ LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}")
+
+ # Print results
+ t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
+ shape = (1, 3, imgsz, imgsz)
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
+
+ return top1, top5, loss
+
+
+def parse_opt():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
+ parser.add_argument('--batch-size', type=int, default=128, help='batch size')
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+ parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
+ parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+ opt = parser.parse_args()
+ print_args(vars(opt))
+ return opt
+
+
+def main():
+ opt = parse_opt()
+ check_requirements(exclude=('tensorboard', 'thop'))
+ run(**vars(opt))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/yolov5/cli.py b/yolov5/cli.py
index 8a8f3bb..c062a28 100644
--- a/yolov5/cli.py
+++ b/yolov5/cli.py
@@ -1,7 +1,14 @@
import fire
+from yolov5.benchmarks import run_cli as benchmarks
+from yolov5.classify.predict import run as classify_predict
+from yolov5.classify.train import run_cli as classify_train
+from yolov5.classify.val import run as classify_val
from yolov5.detect import run as detect
from yolov5.export import run as export
+from yolov5.segment.predict import run as segment_predict
+from yolov5.segment.train import run_cli as segment_train
+from yolov5.segment.val import run as segment_val
from yolov5.train import run_cli as train
from yolov5.val import run as val
@@ -14,5 +21,8 @@ def app() -> None:
"val": val,
"detect": detect,
"export": export,
+ "benchmarks": benchmarks,
+ 'classify': {'train': classify_train, 'val': classify_val, 'predict': classify_predict},
+ 'segment': {'train': segment_train, 'val': segment_val, 'predict': segment_predict},
}
)
diff --git a/yolov5/data/Argoverse.yaml b/yolov5/data/Argoverse.yaml
index 9d21296..e3e9ba1 100644
--- a/yolov5/data/Argoverse.yaml
+++ b/yolov5/data/Argoverse.yaml
@@ -14,8 +14,15 @@ val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
# Classes
-nc: 8 # number of classes
-names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names
+names:
+ 0: person
+ 1: bicycle
+ 2: car
+ 3: motorcycle
+ 4: bus
+ 5: truck
+ 6: traffic_light
+ 7: stop_sign
# Download script/URL (optional) ---------------------------------------------------------------------------------------
diff --git a/yolov5/data/GlobalWheat2020.yaml b/yolov5/data/GlobalWheat2020.yaml
index 4c43693..01812d0 100644
--- a/yolov5/data/GlobalWheat2020.yaml
+++ b/yolov5/data/GlobalWheat2020.yaml
@@ -26,8 +26,8 @@ test: # test images (optional) 1276 images
- images/uq_1
# Classes
-nc: 1 # number of classes
-names: ['wheat_head'] # class names
+names:
+ 0: wheat_head
# Download script/URL (optional) ---------------------------------------------------------------------------------------
diff --git a/yolov5/data/ImageNet.yaml b/yolov5/data/ImageNet.yaml
new file mode 100644
index 0000000..14f1295
--- /dev/null
+++ b/yolov5/data/ImageNet.yaml
@@ -0,0 +1,1022 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
+# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
+# Example usage: python classify/train.py --data imagenet
+# parent
+# ├── yolov5
+# └── datasets
+# └── imagenet ← downloads here (144 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/imagenet # dataset root dir
+train: train # train images (relative to 'path') 1281167 images
+val: val # val images (relative to 'path') 50000 images
+test: # test images (optional)
+
+# Classes
+names:
+ 0: tench
+ 1: goldfish
+ 2: great white shark
+ 3: tiger shark
+ 4: hammerhead shark
+ 5: electric ray
+ 6: stingray
+ 7: cock
+ 8: hen
+ 9: ostrich
+ 10: brambling
+ 11: goldfinch
+ 12: house finch
+ 13: junco
+ 14: indigo bunting
+ 15: American robin
+ 16: bulbul
+ 17: jay
+ 18: magpie
+ 19: chickadee
+ 20: American dipper
+ 21: kite
+ 22: bald eagle
+ 23: vulture
+ 24: great grey owl
+ 25: fire salamander
+ 26: smooth newt
+ 27: newt
+ 28: spotted salamander
+ 29: axolotl
+ 30: American bullfrog
+ 31: tree frog
+ 32: tailed frog
+ 33: loggerhead sea turtle
+ 34: leatherback sea turtle
+ 35: mud turtle
+ 36: terrapin
+ 37: box turtle
+ 38: banded gecko
+ 39: green iguana
+ 40: Carolina anole
+ 41: desert grassland whiptail lizard
+ 42: agama
+ 43: frilled-necked lizard
+ 44: alligator lizard
+ 45: Gila monster
+ 46: European green lizard
+ 47: chameleon
+ 48: Komodo dragon
+ 49: Nile crocodile
+ 50: American alligator
+ 51: triceratops
+ 52: worm snake
+ 53: ring-necked snake
+ 54: eastern hog-nosed snake
+ 55: smooth green snake
+ 56: kingsnake
+ 57: garter snake
+ 58: water snake
+ 59: vine snake
+ 60: night snake
+ 61: boa constrictor
+ 62: African rock python
+ 63: Indian cobra
+ 64: green mamba
+ 65: sea snake
+ 66: Saharan horned viper
+ 67: eastern diamondback rattlesnake
+ 68: sidewinder
+ 69: trilobite
+ 70: harvestman
+ 71: scorpion
+ 72: yellow garden spider
+ 73: barn spider
+ 74: European garden spider
+ 75: southern black widow
+ 76: tarantula
+ 77: wolf spider
+ 78: tick
+ 79: centipede
+ 80: black grouse
+ 81: ptarmigan
+ 82: ruffed grouse
+ 83: prairie grouse
+ 84: peacock
+ 85: quail
+ 86: partridge
+ 87: grey parrot
+ 88: macaw
+ 89: sulphur-crested cockatoo
+ 90: lorikeet
+ 91: coucal
+ 92: bee eater
+ 93: hornbill
+ 94: hummingbird
+ 95: jacamar
+ 96: toucan
+ 97: duck
+ 98: red-breasted merganser
+ 99: goose
+ 100: black swan
+ 101: tusker
+ 102: echidna
+ 103: platypus
+ 104: wallaby
+ 105: koala
+ 106: wombat
+ 107: jellyfish
+ 108: sea anemone
+ 109: brain coral
+ 110: flatworm
+ 111: nematode
+ 112: conch
+ 113: snail
+ 114: slug
+ 115: sea slug
+ 116: chiton
+ 117: chambered nautilus
+ 118: Dungeness crab
+ 119: rock crab
+ 120: fiddler crab
+ 121: red king crab
+ 122: American lobster
+ 123: spiny lobster
+ 124: crayfish
+ 125: hermit crab
+ 126: isopod
+ 127: white stork
+ 128: black stork
+ 129: spoonbill
+ 130: flamingo
+ 131: little blue heron
+ 132: great egret
+ 133: bittern
+ 134: crane (bird)
+ 135: limpkin
+ 136: common gallinule
+ 137: American coot
+ 138: bustard
+ 139: ruddy turnstone
+ 140: dunlin
+ 141: common redshank
+ 142: dowitcher
+ 143: oystercatcher
+ 144: pelican
+ 145: king penguin
+ 146: albatross
+ 147: grey whale
+ 148: killer whale
+ 149: dugong
+ 150: sea lion
+ 151: Chihuahua
+ 152: Japanese Chin
+ 153: Maltese
+ 154: Pekingese
+ 155: Shih Tzu
+ 156: King Charles Spaniel
+ 157: Papillon
+ 158: toy terrier
+ 159: Rhodesian Ridgeback
+ 160: Afghan Hound
+ 161: Basset Hound
+ 162: Beagle
+ 163: Bloodhound
+ 164: Bluetick Coonhound
+ 165: Black and Tan Coonhound
+ 166: Treeing Walker Coonhound
+ 167: English foxhound
+ 168: Redbone Coonhound
+ 169: borzoi
+ 170: Irish Wolfhound
+ 171: Italian Greyhound
+ 172: Whippet
+ 173: Ibizan Hound
+ 174: Norwegian Elkhound
+ 175: Otterhound
+ 176: Saluki
+ 177: Scottish Deerhound
+ 178: Weimaraner
+ 179: Staffordshire Bull Terrier
+ 180: American Staffordshire Terrier
+ 181: Bedlington Terrier
+ 182: Border Terrier
+ 183: Kerry Blue Terrier
+ 184: Irish Terrier
+ 185: Norfolk Terrier
+ 186: Norwich Terrier
+ 187: Yorkshire Terrier
+ 188: Wire Fox Terrier
+ 189: Lakeland Terrier
+ 190: Sealyham Terrier
+ 191: Airedale Terrier
+ 192: Cairn Terrier
+ 193: Australian Terrier
+ 194: Dandie Dinmont Terrier
+ 195: Boston Terrier
+ 196: Miniature Schnauzer
+ 197: Giant Schnauzer
+ 198: Standard Schnauzer
+ 199: Scottish Terrier
+ 200: Tibetan Terrier
+ 201: Australian Silky Terrier
+ 202: Soft-coated Wheaten Terrier
+ 203: West Highland White Terrier
+ 204: Lhasa Apso
+ 205: Flat-Coated Retriever
+ 206: Curly-coated Retriever
+ 207: Golden Retriever
+ 208: Labrador Retriever
+ 209: Chesapeake Bay Retriever
+ 210: German Shorthaired Pointer
+ 211: Vizsla
+ 212: English Setter
+ 213: Irish Setter
+ 214: Gordon Setter
+ 215: Brittany
+ 216: Clumber Spaniel
+ 217: English Springer Spaniel
+ 218: Welsh Springer Spaniel
+ 219: Cocker Spaniels
+ 220: Sussex Spaniel
+ 221: Irish Water Spaniel
+ 222: Kuvasz
+ 223: Schipperke
+ 224: Groenendael
+ 225: Malinois
+ 226: Briard
+ 227: Australian Kelpie
+ 228: Komondor
+ 229: Old English Sheepdog
+ 230: Shetland Sheepdog
+ 231: collie
+ 232: Border Collie
+ 233: Bouvier des Flandres
+ 234: Rottweiler
+ 235: German Shepherd Dog
+ 236: Dobermann
+ 237: Miniature Pinscher
+ 238: Greater Swiss Mountain Dog
+ 239: Bernese Mountain Dog
+ 240: Appenzeller Sennenhund
+ 241: Entlebucher Sennenhund
+ 242: Boxer
+ 243: Bullmastiff
+ 244: Tibetan Mastiff
+ 245: French Bulldog
+ 246: Great Dane
+ 247: St. Bernard
+ 248: husky
+ 249: Alaskan Malamute
+ 250: Siberian Husky
+ 251: Dalmatian
+ 252: Affenpinscher
+ 253: Basenji
+ 254: pug
+ 255: Leonberger
+ 256: Newfoundland
+ 257: Pyrenean Mountain Dog
+ 258: Samoyed
+ 259: Pomeranian
+ 260: Chow Chow
+ 261: Keeshond
+ 262: Griffon Bruxellois
+ 263: Pembroke Welsh Corgi
+ 264: Cardigan Welsh Corgi
+ 265: Toy Poodle
+ 266: Miniature Poodle
+ 267: Standard Poodle
+ 268: Mexican hairless dog
+ 269: grey wolf
+ 270: Alaskan tundra wolf
+ 271: red wolf
+ 272: coyote
+ 273: dingo
+ 274: dhole
+ 275: African wild dog
+ 276: hyena
+ 277: red fox
+ 278: kit fox
+ 279: Arctic fox
+ 280: grey fox
+ 281: tabby cat
+ 282: tiger cat
+ 283: Persian cat
+ 284: Siamese cat
+ 285: Egyptian Mau
+ 286: cougar
+ 287: lynx
+ 288: leopard
+ 289: snow leopard
+ 290: jaguar
+ 291: lion
+ 292: tiger
+ 293: cheetah
+ 294: brown bear
+ 295: American black bear
+ 296: polar bear
+ 297: sloth bear
+ 298: mongoose
+ 299: meerkat
+ 300: tiger beetle
+ 301: ladybug
+ 302: ground beetle
+ 303: longhorn beetle
+ 304: leaf beetle
+ 305: dung beetle
+ 306: rhinoceros beetle
+ 307: weevil
+ 308: fly
+ 309: bee
+ 310: ant
+ 311: grasshopper
+ 312: cricket
+ 313: stick insect
+ 314: cockroach
+ 315: mantis
+ 316: cicada
+ 317: leafhopper
+ 318: lacewing
+ 319: dragonfly
+ 320: damselfly
+ 321: red admiral
+ 322: ringlet
+ 323: monarch butterfly
+ 324: small white
+ 325: sulphur butterfly
+ 326: gossamer-winged butterfly
+ 327: starfish
+ 328: sea urchin
+ 329: sea cucumber
+ 330: cottontail rabbit
+ 331: hare
+ 332: Angora rabbit
+ 333: hamster
+ 334: porcupine
+ 335: fox squirrel
+ 336: marmot
+ 337: beaver
+ 338: guinea pig
+ 339: common sorrel
+ 340: zebra
+ 341: pig
+ 342: wild boar
+ 343: warthog
+ 344: hippopotamus
+ 345: ox
+ 346: water buffalo
+ 347: bison
+ 348: ram
+ 349: bighorn sheep
+ 350: Alpine ibex
+ 351: hartebeest
+ 352: impala
+ 353: gazelle
+ 354: dromedary
+ 355: llama
+ 356: weasel
+ 357: mink
+ 358: European polecat
+ 359: black-footed ferret
+ 360: otter
+ 361: skunk
+ 362: badger
+ 363: armadillo
+ 364: three-toed sloth
+ 365: orangutan
+ 366: gorilla
+ 367: chimpanzee
+ 368: gibbon
+ 369: siamang
+ 370: guenon
+ 371: patas monkey
+ 372: baboon
+ 373: macaque
+ 374: langur
+ 375: black-and-white colobus
+ 376: proboscis monkey
+ 377: marmoset
+ 378: white-headed capuchin
+ 379: howler monkey
+ 380: titi
+ 381: Geoffroy's spider monkey
+ 382: common squirrel monkey
+ 383: ring-tailed lemur
+ 384: indri
+ 385: Asian elephant
+ 386: African bush elephant
+ 387: red panda
+ 388: giant panda
+ 389: snoek
+ 390: eel
+ 391: coho salmon
+ 392: rock beauty
+ 393: clownfish
+ 394: sturgeon
+ 395: garfish
+ 396: lionfish
+ 397: pufferfish
+ 398: abacus
+ 399: abaya
+ 400: academic gown
+ 401: accordion
+ 402: acoustic guitar
+ 403: aircraft carrier
+ 404: airliner
+ 405: airship
+ 406: altar
+ 407: ambulance
+ 408: amphibious vehicle
+ 409: analog clock
+ 410: apiary
+ 411: apron
+ 412: waste container
+ 413: assault rifle
+ 414: backpack
+ 415: bakery
+ 416: balance beam
+ 417: balloon
+ 418: ballpoint pen
+ 419: Band-Aid
+ 420: banjo
+ 421: baluster
+ 422: barbell
+ 423: barber chair
+ 424: barbershop
+ 425: barn
+ 426: barometer
+ 427: barrel
+ 428: wheelbarrow
+ 429: baseball
+ 430: basketball
+ 431: bassinet
+ 432: bassoon
+ 433: swimming cap
+ 434: bath towel
+ 435: bathtub
+ 436: station wagon
+ 437: lighthouse
+ 438: beaker
+ 439: military cap
+ 440: beer bottle
+ 441: beer glass
+ 442: bell-cot
+ 443: bib
+ 444: tandem bicycle
+ 445: bikini
+ 446: ring binder
+ 447: binoculars
+ 448: birdhouse
+ 449: boathouse
+ 450: bobsleigh
+ 451: bolo tie
+ 452: poke bonnet
+ 453: bookcase
+ 454: bookstore
+ 455: bottle cap
+ 456: bow
+ 457: bow tie
+ 458: brass
+ 459: bra
+ 460: breakwater
+ 461: breastplate
+ 462: broom
+ 463: bucket
+ 464: buckle
+ 465: bulletproof vest
+ 466: high-speed train
+ 467: butcher shop
+ 468: taxicab
+ 469: cauldron
+ 470: candle
+ 471: cannon
+ 472: canoe
+ 473: can opener
+ 474: cardigan
+ 475: car mirror
+ 476: carousel
+ 477: tool kit
+ 478: carton
+ 479: car wheel
+ 480: automated teller machine
+ 481: cassette
+ 482: cassette player
+ 483: castle
+ 484: catamaran
+ 485: CD player
+ 486: cello
+ 487: mobile phone
+ 488: chain
+ 489: chain-link fence
+ 490: chain mail
+ 491: chainsaw
+ 492: chest
+ 493: chiffonier
+ 494: chime
+ 495: china cabinet
+ 496: Christmas stocking
+ 497: church
+ 498: movie theater
+ 499: cleaver
+ 500: cliff dwelling
+ 501: cloak
+ 502: clogs
+ 503: cocktail shaker
+ 504: coffee mug
+ 505: coffeemaker
+ 506: coil
+ 507: combination lock
+ 508: computer keyboard
+ 509: confectionery store
+ 510: container ship
+ 511: convertible
+ 512: corkscrew
+ 513: cornet
+ 514: cowboy boot
+ 515: cowboy hat
+ 516: cradle
+ 517: crane (machine)
+ 518: crash helmet
+ 519: crate
+ 520: infant bed
+ 521: Crock Pot
+ 522: croquet ball
+ 523: crutch
+ 524: cuirass
+ 525: dam
+ 526: desk
+ 527: desktop computer
+ 528: rotary dial telephone
+ 529: diaper
+ 530: digital clock
+ 531: digital watch
+ 532: dining table
+ 533: dishcloth
+ 534: dishwasher
+ 535: disc brake
+ 536: dock
+ 537: dog sled
+ 538: dome
+ 539: doormat
+ 540: drilling rig
+ 541: drum
+ 542: drumstick
+ 543: dumbbell
+ 544: Dutch oven
+ 545: electric fan
+ 546: electric guitar
+ 547: electric locomotive
+ 548: entertainment center
+ 549: envelope
+ 550: espresso machine
+ 551: face powder
+ 552: feather boa
+ 553: filing cabinet
+ 554: fireboat
+ 555: fire engine
+ 556: fire screen sheet
+ 557: flagpole
+ 558: flute
+ 559: folding chair
+ 560: football helmet
+ 561: forklift
+ 562: fountain
+ 563: fountain pen
+ 564: four-poster bed
+ 565: freight car
+ 566: French horn
+ 567: frying pan
+ 568: fur coat
+ 569: garbage truck
+ 570: gas mask
+ 571: gas pump
+ 572: goblet
+ 573: go-kart
+ 574: golf ball
+ 575: golf cart
+ 576: gondola
+ 577: gong
+ 578: gown
+ 579: grand piano
+ 580: greenhouse
+ 581: grille
+ 582: grocery store
+ 583: guillotine
+ 584: barrette
+ 585: hair spray
+ 586: half-track
+ 587: hammer
+ 588: hamper
+ 589: hair dryer
+ 590: hand-held computer
+ 591: handkerchief
+ 592: hard disk drive
+ 593: harmonica
+ 594: harp
+ 595: harvester
+ 596: hatchet
+ 597: holster
+ 598: home theater
+ 599: honeycomb
+ 600: hook
+ 601: hoop skirt
+ 602: horizontal bar
+ 603: horse-drawn vehicle
+ 604: hourglass
+ 605: iPod
+ 606: clothes iron
+ 607: jack-o'-lantern
+ 608: jeans
+ 609: jeep
+ 610: T-shirt
+ 611: jigsaw puzzle
+ 612: pulled rickshaw
+ 613: joystick
+ 614: kimono
+ 615: knee pad
+ 616: knot
+ 617: lab coat
+ 618: ladle
+ 619: lampshade
+ 620: laptop computer
+ 621: lawn mower
+ 622: lens cap
+ 623: paper knife
+ 624: library
+ 625: lifeboat
+ 626: lighter
+ 627: limousine
+ 628: ocean liner
+ 629: lipstick
+ 630: slip-on shoe
+ 631: lotion
+ 632: speaker
+ 633: loupe
+ 634: sawmill
+ 635: magnetic compass
+ 636: mail bag
+ 637: mailbox
+ 638: tights
+ 639: tank suit
+ 640: manhole cover
+ 641: maraca
+ 642: marimba
+ 643: mask
+ 644: match
+ 645: maypole
+ 646: maze
+ 647: measuring cup
+ 648: medicine chest
+ 649: megalith
+ 650: microphone
+ 651: microwave oven
+ 652: military uniform
+ 653: milk can
+ 654: minibus
+ 655: miniskirt
+ 656: minivan
+ 657: missile
+ 658: mitten
+ 659: mixing bowl
+ 660: mobile home
+ 661: Model T
+ 662: modem
+ 663: monastery
+ 664: monitor
+ 665: moped
+ 666: mortar
+ 667: square academic cap
+ 668: mosque
+ 669: mosquito net
+ 670: scooter
+ 671: mountain bike
+ 672: tent
+ 673: computer mouse
+ 674: mousetrap
+ 675: moving van
+ 676: muzzle
+ 677: nail
+ 678: neck brace
+ 679: necklace
+ 680: nipple
+ 681: notebook computer
+ 682: obelisk
+ 683: oboe
+ 684: ocarina
+ 685: odometer
+ 686: oil filter
+ 687: organ
+ 688: oscilloscope
+ 689: overskirt
+ 690: bullock cart
+ 691: oxygen mask
+ 692: packet
+ 693: paddle
+ 694: paddle wheel
+ 695: padlock
+ 696: paintbrush
+ 697: pajamas
+ 698: palace
+ 699: pan flute
+ 700: paper towel
+ 701: parachute
+ 702: parallel bars
+ 703: park bench
+ 704: parking meter
+ 705: passenger car
+ 706: patio
+ 707: payphone
+ 708: pedestal
+ 709: pencil case
+ 710: pencil sharpener
+ 711: perfume
+ 712: Petri dish
+ 713: photocopier
+ 714: plectrum
+ 715: Pickelhaube
+ 716: picket fence
+ 717: pickup truck
+ 718: pier
+ 719: piggy bank
+ 720: pill bottle
+ 721: pillow
+ 722: ping-pong ball
+ 723: pinwheel
+ 724: pirate ship
+ 725: pitcher
+ 726: hand plane
+ 727: planetarium
+ 728: plastic bag
+ 729: plate rack
+ 730: plow
+ 731: plunger
+ 732: Polaroid camera
+ 733: pole
+ 734: police van
+ 735: poncho
+ 736: billiard table
+ 737: soda bottle
+ 738: pot
+ 739: potter's wheel
+ 740: power drill
+ 741: prayer rug
+ 742: printer
+ 743: prison
+ 744: projectile
+ 745: projector
+ 746: hockey puck
+ 747: punching bag
+ 748: purse
+ 749: quill
+ 750: quilt
+ 751: race car
+ 752: racket
+ 753: radiator
+ 754: radio
+ 755: radio telescope
+ 756: rain barrel
+ 757: recreational vehicle
+ 758: reel
+ 759: reflex camera
+ 760: refrigerator
+ 761: remote control
+ 762: restaurant
+ 763: revolver
+ 764: rifle
+ 765: rocking chair
+ 766: rotisserie
+ 767: eraser
+ 768: rugby ball
+ 769: ruler
+ 770: running shoe
+ 771: safe
+ 772: safety pin
+ 773: salt shaker
+ 774: sandal
+ 775: sarong
+ 776: saxophone
+ 777: scabbard
+ 778: weighing scale
+ 779: school bus
+ 780: schooner
+ 781: scoreboard
+ 782: CRT screen
+ 783: screw
+ 784: screwdriver
+ 785: seat belt
+ 786: sewing machine
+ 787: shield
+ 788: shoe store
+ 789: shoji
+ 790: shopping basket
+ 791: shopping cart
+ 792: shovel
+ 793: shower cap
+ 794: shower curtain
+ 795: ski
+ 796: ski mask
+ 797: sleeping bag
+ 798: slide rule
+ 799: sliding door
+ 800: slot machine
+ 801: snorkel
+ 802: snowmobile
+ 803: snowplow
+ 804: soap dispenser
+ 805: soccer ball
+ 806: sock
+ 807: solar thermal collector
+ 808: sombrero
+ 809: soup bowl
+ 810: space bar
+ 811: space heater
+ 812: space shuttle
+ 813: spatula
+ 814: motorboat
+ 815: spider web
+ 816: spindle
+ 817: sports car
+ 818: spotlight
+ 819: stage
+ 820: steam locomotive
+ 821: through arch bridge
+ 822: steel drum
+ 823: stethoscope
+ 824: scarf
+ 825: stone wall
+ 826: stopwatch
+ 827: stove
+ 828: strainer
+ 829: tram
+ 830: stretcher
+ 831: couch
+ 832: stupa
+ 833: submarine
+ 834: suit
+ 835: sundial
+ 836: sunglass
+ 837: sunglasses
+ 838: sunscreen
+ 839: suspension bridge
+ 840: mop
+ 841: sweatshirt
+ 842: swimsuit
+ 843: swing
+ 844: switch
+ 845: syringe
+ 846: table lamp
+ 847: tank
+ 848: tape player
+ 849: teapot
+ 850: teddy bear
+ 851: television
+ 852: tennis ball
+ 853: thatched roof
+ 854: front curtain
+ 855: thimble
+ 856: threshing machine
+ 857: throne
+ 858: tile roof
+ 859: toaster
+ 860: tobacco shop
+ 861: toilet seat
+ 862: torch
+ 863: totem pole
+ 864: tow truck
+ 865: toy store
+ 866: tractor
+ 867: semi-trailer truck
+ 868: tray
+ 869: trench coat
+ 870: tricycle
+ 871: trimaran
+ 872: tripod
+ 873: triumphal arch
+ 874: trolleybus
+ 875: trombone
+ 876: tub
+ 877: turnstile
+ 878: typewriter keyboard
+ 879: umbrella
+ 880: unicycle
+ 881: upright piano
+ 882: vacuum cleaner
+ 883: vase
+ 884: vault
+ 885: velvet
+ 886: vending machine
+ 887: vestment
+ 888: viaduct
+ 889: violin
+ 890: volleyball
+ 891: waffle iron
+ 892: wall clock
+ 893: wallet
+ 894: wardrobe
+ 895: military aircraft
+ 896: sink
+ 897: washing machine
+ 898: water bottle
+ 899: water jug
+ 900: water tower
+ 901: whiskey jug
+ 902: whistle
+ 903: wig
+ 904: window screen
+ 905: window shade
+ 906: Windsor tie
+ 907: wine bottle
+ 908: wing
+ 909: wok
+ 910: wooden spoon
+ 911: wool
+ 912: split-rail fence
+ 913: shipwreck
+ 914: yawl
+ 915: yurt
+ 916: website
+ 917: comic book
+ 918: crossword
+ 919: traffic sign
+ 920: traffic light
+ 921: dust jacket
+ 922: menu
+ 923: plate
+ 924: guacamole
+ 925: consomme
+ 926: hot pot
+ 927: trifle
+ 928: ice cream
+ 929: ice pop
+ 930: baguette
+ 931: bagel
+ 932: pretzel
+ 933: cheeseburger
+ 934: hot dog
+ 935: mashed potato
+ 936: cabbage
+ 937: broccoli
+ 938: cauliflower
+ 939: zucchini
+ 940: spaghetti squash
+ 941: acorn squash
+ 942: butternut squash
+ 943: cucumber
+ 944: artichoke
+ 945: bell pepper
+ 946: cardoon
+ 947: mushroom
+ 948: Granny Smith
+ 949: strawberry
+ 950: orange
+ 951: lemon
+ 952: fig
+ 953: pineapple
+ 954: banana
+ 955: jackfruit
+ 956: custard apple
+ 957: pomegranate
+ 958: hay
+ 959: carbonara
+ 960: chocolate syrup
+ 961: dough
+ 962: meatloaf
+ 963: pizza
+ 964: pot pie
+ 965: burrito
+ 966: red wine
+ 967: espresso
+ 968: cup
+ 969: eggnog
+ 970: alp
+ 971: bubble
+ 972: cliff
+ 973: coral reef
+ 974: geyser
+ 975: lakeshore
+ 976: promontory
+ 977: shoal
+ 978: seashore
+ 979: valley
+ 980: volcano
+ 981: baseball player
+ 982: bridegroom
+ 983: scuba diver
+ 984: rapeseed
+ 985: daisy
+ 986: yellow lady's slipper
+ 987: corn
+ 988: acorn
+ 989: rose hip
+ 990: horse chestnut seed
+ 991: coral fungus
+ 992: agaric
+ 993: gyromitra
+ 994: stinkhorn mushroom
+ 995: earth star
+ 996: hen-of-the-woods
+ 997: bolete
+ 998: ear
+ 999: toilet paper
+
+
+# Download script/URL (optional)
+download: data/scripts/get_imagenet.sh
diff --git a/yolov5/data/Objects365.yaml b/yolov5/data/Objects365.yaml
index 4cc9475..05b26a1 100644
--- a/yolov5/data/Objects365.yaml
+++ b/yolov5/data/Objects365.yaml
@@ -14,48 +14,372 @@ val: images/val # val images (relative to 'path') 80000 images
test: # test images (optional)
# Classes
-nc: 365 # number of classes
-names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup',
- 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book',
- 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag',
- 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV',
- 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle',
- 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird',
- 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',
- 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning',
- 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife',
- 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock',
- 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish',
- 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan',
- 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard',
- 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign',
- 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
- 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard',
- 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry',
- 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
- 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors',
- 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape',
- 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',
- 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette',
- 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket',
- 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine',
- 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine',
- 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon',
- 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse',
- 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball',
- 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin',
- 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts',
- 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
- 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD',
- 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder',
- 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips',
- 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab',
- 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',
- 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart',
- 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French',
- 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell',
- 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil',
- 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis']
+names:
+ 0: Person
+ 1: Sneakers
+ 2: Chair
+ 3: Other Shoes
+ 4: Hat
+ 5: Car
+ 6: Lamp
+ 7: Glasses
+ 8: Bottle
+ 9: Desk
+ 10: Cup
+ 11: Street Lights
+ 12: Cabinet/shelf
+ 13: Handbag/Satchel
+ 14: Bracelet
+ 15: Plate
+ 16: Picture/Frame
+ 17: Helmet
+ 18: Book
+ 19: Gloves
+ 20: Storage box
+ 21: Boat
+ 22: Leather Shoes
+ 23: Flower
+ 24: Bench
+ 25: Potted Plant
+ 26: Bowl/Basin
+ 27: Flag
+ 28: Pillow
+ 29: Boots
+ 30: Vase
+ 31: Microphone
+ 32: Necklace
+ 33: Ring
+ 34: SUV
+ 35: Wine Glass
+ 36: Belt
+ 37: Monitor/TV
+ 38: Backpack
+ 39: Umbrella
+ 40: Traffic Light
+ 41: Speaker
+ 42: Watch
+ 43: Tie
+ 44: Trash bin Can
+ 45: Slippers
+ 46: Bicycle
+ 47: Stool
+ 48: Barrel/bucket
+ 49: Van
+ 50: Couch
+ 51: Sandals
+ 52: Basket
+ 53: Drum
+ 54: Pen/Pencil
+ 55: Bus
+ 56: Wild Bird
+ 57: High Heels
+ 58: Motorcycle
+ 59: Guitar
+ 60: Carpet
+ 61: Cell Phone
+ 62: Bread
+ 63: Camera
+ 64: Canned
+ 65: Truck
+ 66: Traffic cone
+ 67: Cymbal
+ 68: Lifesaver
+ 69: Towel
+ 70: Stuffed Toy
+ 71: Candle
+ 72: Sailboat
+ 73: Laptop
+ 74: Awning
+ 75: Bed
+ 76: Faucet
+ 77: Tent
+ 78: Horse
+ 79: Mirror
+ 80: Power outlet
+ 81: Sink
+ 82: Apple
+ 83: Air Conditioner
+ 84: Knife
+ 85: Hockey Stick
+ 86: Paddle
+ 87: Pickup Truck
+ 88: Fork
+ 89: Traffic Sign
+ 90: Balloon
+ 91: Tripod
+ 92: Dog
+ 93: Spoon
+ 94: Clock
+ 95: Pot
+ 96: Cow
+ 97: Cake
+ 98: Dinning Table
+ 99: Sheep
+ 100: Hanger
+ 101: Blackboard/Whiteboard
+ 102: Napkin
+ 103: Other Fish
+ 104: Orange/Tangerine
+ 105: Toiletry
+ 106: Keyboard
+ 107: Tomato
+ 108: Lantern
+ 109: Machinery Vehicle
+ 110: Fan
+ 111: Green Vegetables
+ 112: Banana
+ 113: Baseball Glove
+ 114: Airplane
+ 115: Mouse
+ 116: Train
+ 117: Pumpkin
+ 118: Soccer
+ 119: Skiboard
+ 120: Luggage
+ 121: Nightstand
+ 122: Tea pot
+ 123: Telephone
+ 124: Trolley
+ 125: Head Phone
+ 126: Sports Car
+ 127: Stop Sign
+ 128: Dessert
+ 129: Scooter
+ 130: Stroller
+ 131: Crane
+ 132: Remote
+ 133: Refrigerator
+ 134: Oven
+ 135: Lemon
+ 136: Duck
+ 137: Baseball Bat
+ 138: Surveillance Camera
+ 139: Cat
+ 140: Jug
+ 141: Broccoli
+ 142: Piano
+ 143: Pizza
+ 144: Elephant
+ 145: Skateboard
+ 146: Surfboard
+ 147: Gun
+ 148: Skating and Skiing shoes
+ 149: Gas stove
+ 150: Donut
+ 151: Bow Tie
+ 152: Carrot
+ 153: Toilet
+ 154: Kite
+ 155: Strawberry
+ 156: Other Balls
+ 157: Shovel
+ 158: Pepper
+ 159: Computer Box
+ 160: Toilet Paper
+ 161: Cleaning Products
+ 162: Chopsticks
+ 163: Microwave
+ 164: Pigeon
+ 165: Baseball
+ 166: Cutting/chopping Board
+ 167: Coffee Table
+ 168: Side Table
+ 169: Scissors
+ 170: Marker
+ 171: Pie
+ 172: Ladder
+ 173: Snowboard
+ 174: Cookies
+ 175: Radiator
+ 176: Fire Hydrant
+ 177: Basketball
+ 178: Zebra
+ 179: Grape
+ 180: Giraffe
+ 181: Potato
+ 182: Sausage
+ 183: Tricycle
+ 184: Violin
+ 185: Egg
+ 186: Fire Extinguisher
+ 187: Candy
+ 188: Fire Truck
+ 189: Billiards
+ 190: Converter
+ 191: Bathtub
+ 192: Wheelchair
+ 193: Golf Club
+ 194: Briefcase
+ 195: Cucumber
+ 196: Cigar/Cigarette
+ 197: Paint Brush
+ 198: Pear
+ 199: Heavy Truck
+ 200: Hamburger
+ 201: Extractor
+ 202: Extension Cord
+ 203: Tong
+ 204: Tennis Racket
+ 205: Folder
+ 206: American Football
+ 207: earphone
+ 208: Mask
+ 209: Kettle
+ 210: Tennis
+ 211: Ship
+ 212: Swing
+ 213: Coffee Machine
+ 214: Slide
+ 215: Carriage
+ 216: Onion
+ 217: Green beans
+ 218: Projector
+ 219: Frisbee
+ 220: Washing Machine/Drying Machine
+ 221: Chicken
+ 222: Printer
+ 223: Watermelon
+ 224: Saxophone
+ 225: Tissue
+ 226: Toothbrush
+ 227: Ice cream
+ 228: Hot-air balloon
+ 229: Cello
+ 230: French Fries
+ 231: Scale
+ 232: Trophy
+ 233: Cabbage
+ 234: Hot dog
+ 235: Blender
+ 236: Peach
+ 237: Rice
+ 238: Wallet/Purse
+ 239: Volleyball
+ 240: Deer
+ 241: Goose
+ 242: Tape
+ 243: Tablet
+ 244: Cosmetics
+ 245: Trumpet
+ 246: Pineapple
+ 247: Golf Ball
+ 248: Ambulance
+ 249: Parking meter
+ 250: Mango
+ 251: Key
+ 252: Hurdle
+ 253: Fishing Rod
+ 254: Medal
+ 255: Flute
+ 256: Brush
+ 257: Penguin
+ 258: Megaphone
+ 259: Corn
+ 260: Lettuce
+ 261: Garlic
+ 262: Swan
+ 263: Helicopter
+ 264: Green Onion
+ 265: Sandwich
+ 266: Nuts
+ 267: Speed Limit Sign
+ 268: Induction Cooker
+ 269: Broom
+ 270: Trombone
+ 271: Plum
+ 272: Rickshaw
+ 273: Goldfish
+ 274: Kiwi fruit
+ 275: Router/modem
+ 276: Poker Card
+ 277: Toaster
+ 278: Shrimp
+ 279: Sushi
+ 280: Cheese
+ 281: Notepaper
+ 282: Cherry
+ 283: Pliers
+ 284: CD
+ 285: Pasta
+ 286: Hammer
+ 287: Cue
+ 288: Avocado
+ 289: Hamimelon
+ 290: Flask
+ 291: Mushroom
+ 292: Screwdriver
+ 293: Soap
+ 294: Recorder
+ 295: Bear
+ 296: Eggplant
+ 297: Board Eraser
+ 298: Coconut
+ 299: Tape Measure/Ruler
+ 300: Pig
+ 301: Showerhead
+ 302: Globe
+ 303: Chips
+ 304: Steak
+ 305: Crosswalk Sign
+ 306: Stapler
+ 307: Camel
+ 308: Formula 1
+ 309: Pomegranate
+ 310: Dishwasher
+ 311: Crab
+ 312: Hoverboard
+ 313: Meat ball
+ 314: Rice Cooker
+ 315: Tuba
+ 316: Calculator
+ 317: Papaya
+ 318: Antelope
+ 319: Parrot
+ 320: Seal
+ 321: Butterfly
+ 322: Dumbbell
+ 323: Donkey
+ 324: Lion
+ 325: Urinal
+ 326: Dolphin
+ 327: Electric Drill
+ 328: Hair Dryer
+ 329: Egg tart
+ 330: Jellyfish
+ 331: Treadmill
+ 332: Lighter
+ 333: Grapefruit
+ 334: Game board
+ 335: Mop
+ 336: Radish
+ 337: Baozi
+ 338: Target
+ 339: French
+ 340: Spring Rolls
+ 341: Monkey
+ 342: Rabbit
+ 343: Pencil Case
+ 344: Yak
+ 345: Red Cabbage
+ 346: Binoculars
+ 347: Asparagus
+ 348: Barbell
+ 349: Scallop
+ 350: Noddles
+ 351: Comb
+ 352: Dumpling
+ 353: Oyster
+ 354: Table Tennis paddle
+ 355: Cosmetics Brush/Eyeliner Pencil
+ 356: Chainsaw
+ 357: Eraser
+ 358: Lobster
+ 359: Durian
+ 360: Okra
+ 361: Lipstick
+ 362: Cosmetics Mirror
+ 363: Curling
+ 364: Table Tennis
# Download script/URL (optional) ---------------------------------------------------------------------------------------
diff --git a/yolov5/data/SKU-110K.yaml b/yolov5/data/SKU-110K.yaml
index 2acf34d..edae717 100644
--- a/yolov5/data/SKU-110K.yaml
+++ b/yolov5/data/SKU-110K.yaml
@@ -14,8 +14,8 @@ val: val.txt # val images (relative to 'path') 588 images
test: test.txt # test images (optional) 2936 images
# Classes
-nc: 1 # number of classes
-names: ['object'] # class names
+names:
+ 0: object
# Download script/URL (optional) ---------------------------------------------------------------------------------------
diff --git a/yolov5/data/VOC.yaml b/yolov5/data/VOC.yaml
index 636ddc4..27d3810 100644
--- a/yolov5/data/VOC.yaml
+++ b/yolov5/data/VOC.yaml
@@ -20,9 +20,27 @@ test: # test images (optional)
- images/test2007
# Classes
-nc: 20 # number of classes
-names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
- 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names
+names:
+ 0: aeroplane
+ 1: bicycle
+ 2: bird
+ 3: boat
+ 4: bottle
+ 5: bus
+ 6: car
+ 7: cat
+ 8: chair
+ 9: cow
+ 10: diningtable
+ 11: dog
+ 12: horse
+ 13: motorbike
+ 14: person
+ 15: pottedplant
+ 16: sheep
+ 17: sofa
+ 18: train
+ 19: tvmonitor
# Download script/URL (optional) ---------------------------------------------------------------------------------------
@@ -47,12 +65,13 @@ download: |
w = int(size.find('width').text)
h = int(size.find('height').text)
+ names = list(yaml['names'].values()) # names list
for obj in root.iter('object'):
cls = obj.find('name').text
- if cls in yaml['names'] and not int(obj.find('difficult').text) == 1:
+ if cls in names and int(obj.find('difficult').text) != 1:
xmlbox = obj.find('bndbox')
bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
- cls_id = yaml['names'].index(cls) # class id
+ cls_id = names.index(cls) # class id
out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
diff --git a/yolov5/data/VisDrone.yaml b/yolov5/data/VisDrone.yaml
index 10337b4..a8bcf8e 100644
--- a/yolov5/data/VisDrone.yaml
+++ b/yolov5/data/VisDrone.yaml
@@ -14,8 +14,17 @@ val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
# Classes
-nc: 10 # number of classes
-names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor']
+names:
+ 0: pedestrian
+ 1: people
+ 2: bicycle
+ 3: car
+ 4: van
+ 5: truck
+ 6: tricycle
+ 7: awning-tricycle
+ 8: bus
+ 9: motor
# Download script/URL (optional) ---------------------------------------------------------------------------------------
diff --git a/yolov5/data/coco.yaml b/yolov5/data/coco.yaml
index 0c0c4ad..d64dfc7 100644
--- a/yolov5/data/coco.yaml
+++ b/yolov5/data/coco.yaml
@@ -14,16 +14,87 @@ val: val2017.txt # val images (relative to 'path') 5000 images
test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# Classes
-nc: 80 # number of classes
-names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
- 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
- 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
- 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
- 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
- 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
- 'hair drier', 'toothbrush'] # class names
+names:
+ 0: person
+ 1: bicycle
+ 2: car
+ 3: motorcycle
+ 4: airplane
+ 5: bus
+ 6: train
+ 7: truck
+ 8: boat
+ 9: traffic light
+ 10: fire hydrant
+ 11: stop sign
+ 12: parking meter
+ 13: bench
+ 14: bird
+ 15: cat
+ 16: dog
+ 17: horse
+ 18: sheep
+ 19: cow
+ 20: elephant
+ 21: bear
+ 22: zebra
+ 23: giraffe
+ 24: backpack
+ 25: umbrella
+ 26: handbag
+ 27: tie
+ 28: suitcase
+ 29: frisbee
+ 30: skis
+ 31: snowboard
+ 32: sports ball
+ 33: kite
+ 34: baseball bat
+ 35: baseball glove
+ 36: skateboard
+ 37: surfboard
+ 38: tennis racket
+ 39: bottle
+ 40: wine glass
+ 41: cup
+ 42: fork
+ 43: knife
+ 44: spoon
+ 45: bowl
+ 46: banana
+ 47: apple
+ 48: sandwich
+ 49: orange
+ 50: broccoli
+ 51: carrot
+ 52: hot dog
+ 53: pizza
+ 54: donut
+ 55: cake
+ 56: chair
+ 57: couch
+ 58: potted plant
+ 59: bed
+ 60: dining table
+ 61: toilet
+ 62: tv
+ 63: laptop
+ 64: mouse
+ 65: remote
+ 66: keyboard
+ 67: cell phone
+ 68: microwave
+ 69: oven
+ 70: toaster
+ 71: sink
+ 72: refrigerator
+ 73: book
+ 74: clock
+ 75: vase
+ 76: scissors
+ 77: teddy bear
+ 78: hair drier
+ 79: toothbrush
# Download script/URL (optional)
diff --git a/yolov5/data/coco128-seg.yaml b/yolov5/data/coco128-seg.yaml
new file mode 100644
index 0000000..5e81910
--- /dev/null
+++ b/yolov5/data/coco128-seg.yaml
@@ -0,0 +1,101 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
+# Example usage: python train.py --data coco128.yaml
+# parent
+# ├── yolov5
+# └── datasets
+# └── coco128-seg ← downloads here (7 MB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/coco128-seg # dataset root dir
+train: images/train2017 # train images (relative to 'path') 128 images
+val: images/train2017 # val images (relative to 'path') 128 images
+test: # test images (optional)
+
+# Classes
+names:
+ 0: person
+ 1: bicycle
+ 2: car
+ 3: motorcycle
+ 4: airplane
+ 5: bus
+ 6: train
+ 7: truck
+ 8: boat
+ 9: traffic light
+ 10: fire hydrant
+ 11: stop sign
+ 12: parking meter
+ 13: bench
+ 14: bird
+ 15: cat
+ 16: dog
+ 17: horse
+ 18: sheep
+ 19: cow
+ 20: elephant
+ 21: bear
+ 22: zebra
+ 23: giraffe
+ 24: backpack
+ 25: umbrella
+ 26: handbag
+ 27: tie
+ 28: suitcase
+ 29: frisbee
+ 30: skis
+ 31: snowboard
+ 32: sports ball
+ 33: kite
+ 34: baseball bat
+ 35: baseball glove
+ 36: skateboard
+ 37: surfboard
+ 38: tennis racket
+ 39: bottle
+ 40: wine glass
+ 41: cup
+ 42: fork
+ 43: knife
+ 44: spoon
+ 45: bowl
+ 46: banana
+ 47: apple
+ 48: sandwich
+ 49: orange
+ 50: broccoli
+ 51: carrot
+ 52: hot dog
+ 53: pizza
+ 54: donut
+ 55: cake
+ 56: chair
+ 57: couch
+ 58: potted plant
+ 59: bed
+ 60: dining table
+ 61: toilet
+ 62: tv
+ 63: laptop
+ 64: mouse
+ 65: remote
+ 66: keyboard
+ 67: cell phone
+ 68: microwave
+ 69: oven
+ 70: toaster
+ 71: sink
+ 72: refrigerator
+ 73: book
+ 74: clock
+ 75: vase
+ 76: scissors
+ 77: teddy bear
+ 78: hair drier
+ 79: toothbrush
+
+
+# Download script/URL (optional)
+download: https://ultralytics.com/assets/coco128-seg.zip
diff --git a/yolov5/data/coco128.yaml b/yolov5/data/coco128.yaml
index 2517d20..1255673 100644
--- a/yolov5/data/coco128.yaml
+++ b/yolov5/data/coco128.yaml
@@ -14,16 +14,87 @@ val: images/train2017 # val images (relative to 'path') 128 images
test: # test images (optional)
# Classes
-nc: 80 # number of classes
-names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
- 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
- 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
- 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
- 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
- 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
- 'hair drier', 'toothbrush'] # class names
+names:
+ 0: person
+ 1: bicycle
+ 2: car
+ 3: motorcycle
+ 4: airplane
+ 5: bus
+ 6: train
+ 7: truck
+ 8: boat
+ 9: traffic light
+ 10: fire hydrant
+ 11: stop sign
+ 12: parking meter
+ 13: bench
+ 14: bird
+ 15: cat
+ 16: dog
+ 17: horse
+ 18: sheep
+ 19: cow
+ 20: elephant
+ 21: bear
+ 22: zebra
+ 23: giraffe
+ 24: backpack
+ 25: umbrella
+ 26: handbag
+ 27: tie
+ 28: suitcase
+ 29: frisbee
+ 30: skis
+ 31: snowboard
+ 32: sports ball
+ 33: kite
+ 34: baseball bat
+ 35: baseball glove
+ 36: skateboard
+ 37: surfboard
+ 38: tennis racket
+ 39: bottle
+ 40: wine glass
+ 41: cup
+ 42: fork
+ 43: knife
+ 44: spoon
+ 45: bowl
+ 46: banana
+ 47: apple
+ 48: sandwich
+ 49: orange
+ 50: broccoli
+ 51: carrot
+ 52: hot dog
+ 53: pizza
+ 54: donut
+ 55: cake
+ 56: chair
+ 57: couch
+ 58: potted plant
+ 59: bed
+ 60: dining table
+ 61: toilet
+ 62: tv
+ 63: laptop
+ 64: mouse
+ 65: remote
+ 66: keyboard
+ 67: cell phone
+ 68: microwave
+ 69: oven
+ 70: toaster
+ 71: sink
+ 72: refrigerator
+ 73: book
+ 74: clock
+ 75: vase
+ 76: scissors
+ 77: teddy bear
+ 78: hair drier
+ 79: toothbrush
# Download script/URL (optional)
diff --git a/yolov5/data/hyps/hyp.finetune.yaml b/yolov5/data/hyps/hyp.finetune.yaml
deleted file mode 100644
index b89d66f..0000000
--- a/yolov5/data/hyps/hyp.finetune.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-# Hyperparameters for VOC finetuning
-# python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50
-# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
-
-# Hyperparameter Evolution Results
-# Generations: 306
-# P R mAP.5 mAP.5:.95 box obj cls
-# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
-
-lr0: 0.0032
-lrf: 0.12
-momentum: 0.843
-weight_decay: 0.00036
-warmup_epochs: 2.0
-warmup_momentum: 0.5
-warmup_bias_lr: 0.05
-box: 0.0296
-cls: 0.243
-cls_pw: 0.631
-obj: 0.301
-obj_pw: 0.911
-iou_t: 0.2
-anchor_t: 2.91
-# anchors: 3.63
-fl_gamma: 0.0
-hsv_h: 0.0138
-hsv_s: 0.664
-hsv_v: 0.464
-degrees: 0.373
-translate: 0.245
-scale: 0.898
-shear: 0.602
-perspective: 0.0
-flipud: 0.00856
-fliplr: 0.5
-mosaic: 1.0
-mixup: 0.243
-copy_paste: 0.0
diff --git a/yolov5/data/hyps/hyp.finetune_objects365.yaml b/yolov5/data/hyps/hyp.finetune_objects365.yaml
deleted file mode 100644
index 073720a..0000000
--- a/yolov5/data/hyps/hyp.finetune_objects365.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-
-lr0: 0.00258
-lrf: 0.17
-momentum: 0.779
-weight_decay: 0.00058
-warmup_epochs: 1.33
-warmup_momentum: 0.86
-warmup_bias_lr: 0.0711
-box: 0.0539
-cls: 0.299
-cls_pw: 0.825
-obj: 0.632
-obj_pw: 1.0
-iou_t: 0.2
-anchor_t: 3.44
-anchors: 3.2
-fl_gamma: 0.0
-hsv_h: 0.0188
-hsv_s: 0.704
-hsv_v: 0.36
-degrees: 0.0
-translate: 0.0902
-scale: 0.491
-shear: 0.0
-perspective: 0.0
-flipud: 0.0
-fliplr: 0.5
-mosaic: 1.0
-mixup: 0.0
-copy_paste: 0.0
diff --git a/yolov5/data/hyps/hyp.scratch.yaml b/yolov5/data/hyps/hyp.scratch.yaml
deleted file mode 100644
index 31f6d14..0000000
--- a/yolov5/data/hyps/hyp.scratch.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-# Hyperparameters for COCO training from scratch
-# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
-# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
-
-lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
-lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
-momentum: 0.937 # SGD momentum/Adam beta1
-weight_decay: 0.0005 # optimizer weight decay 5e-4
-warmup_epochs: 3.0 # warmup epochs (fractions ok)
-warmup_momentum: 0.8 # warmup initial momentum
-warmup_bias_lr: 0.1 # warmup initial bias lr
-box: 0.05 # box loss gain
-cls: 0.5 # cls loss gain
-cls_pw: 1.0 # cls BCELoss positive_weight
-obj: 1.0 # obj loss gain (scale with pixels)
-obj_pw: 1.0 # obj BCELoss positive_weight
-iou_t: 0.20 # IoU training threshold
-anchor_t: 4.0 # anchor-multiple threshold
-# anchors: 3 # anchors per output layer (0 to ignore)
-fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
-hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
-hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
-hsv_v: 0.4 # image HSV-Value augmentation (fraction)
-degrees: 0.0 # image rotation (+/- deg)
-translate: 0.1 # image translation (+/- fraction)
-scale: 0.5 # image scale (+/- gain)
-shear: 0.0 # image shear (+/- deg)
-perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
-flipud: 0.0 # image flip up-down (probability)
-fliplr: 0.5 # image flip left-right (probability)
-mosaic: 1.0 # image mosaic (probability)
-mixup: 0.0 # image mixup (probability)
-copy_paste: 0.0 # segment copy-paste (probability)
diff --git a/yolov5/data/scripts/download_weights.sh b/yolov5/data/scripts/download_weights.sh
index e9fa653..a4f3bec 100755
--- a/yolov5/data/scripts/download_weights.sh
+++ b/yolov5/data/scripts/download_weights.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Download latest models from https://github.com/ultralytics/yolov5/releases
-# Example usage: bash path/to/download_weights.sh
+# Example usage: bash data/scripts/download_weights.sh
# parent
# └── yolov5
# ├── yolov5s.pt ← downloads here
@@ -11,10 +11,11 @@
python - < 1)}, " # add to string
# Write results
@@ -211,10 +208,10 @@ def run(
vid_writer[i].write(im0)
# Print time (inference-only)
- LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
+ LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
# Print results
- t = tuple(x / seen * 1E3 for x in dt) # speeds per image
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
@@ -251,6 +248,7 @@ def parse_opt():
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(vars(opt))
diff --git a/yolov5/export.py b/yolov5/export.py
index 2de17fb..8b108da 100644
--- a/yolov5/export.py
+++ b/yolov5/export.py
@@ -15,6 +15,7 @@
TensorFlow Lite | `tflite` | yolov5s.tflite
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
TensorFlow.js | `tfjs` | yolov5s_web_model/
+PaddlePaddle | `paddle` | yolov5s_paddle_model/
Requirements:
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
@@ -24,16 +25,17 @@
$ yolov5 export --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
Inference:
- $ yolov5 export --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s.xml # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (macOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ $ yolov5 detect --weights yolov5s.pt # PyTorch
+ yolov5s.torchscript # TorchScript
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s.xml # OpenVINO
+ yolov5s.engine # TensorRT
+ yolov5s.mlmodel # CoreML (macOS-only)
+ yolov5s_saved_model # TensorFlow SavedModel
+ yolov5s.pb # TensorFlow GraphDef
+ yolov5s.tflite # TensorFlow Lite
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s_paddle_model # PaddlePaddle
TensorFlow.js:
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@@ -53,7 +55,6 @@
import pandas as pd
import torch
-import yaml
from torch.utils.mobile_optimizer import optimize_for_mobile
FILE = Path(__file__).resolve()
@@ -62,13 +63,14 @@
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from yolov5.models.experimental import attempt_load
-from yolov5.models.yolo import Detect
+from yolov5.models.yolo import ClassificationModel, Detect
from yolov5.utils.dataloaders import LoadImages
-from yolov5.utils.general import (LOGGER, check_dataset, check_img_size,
- check_requirements, check_version,
- check_yaml, colorstr, file_size, print_args,
- url2file)
-from yolov5.utils.torch_utils import select_device
+from yolov5.utils.general import (LOGGER, Profile, check_dataset,
+ check_img_size, check_requirements,
+ check_version, check_yaml, colorstr,
+ file_size, get_default_args, print_args,
+ url2file, yaml_save)
+from yolov5.utils.torch_utils import select_device, smart_inference_mode
def export_formats():
@@ -84,204 +86,216 @@ def export_formats():
['TensorFlow GraphDef', 'pb', '.pb', True, True],
['TensorFlow Lite', 'tflite', '.tflite', True, False],
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
- ['TensorFlow.js', 'tfjs', '_web_model', False, False],]
+ ['TensorFlow.js', 'tfjs', '_web_model', False, False],
+ ['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
+def try_export(inner_func):
+ # YOLOv5 export decorator, i..e @try_export
+ inner_args = get_default_args(inner_func)
+
+ def outer_func(*args, **kwargs):
+ prefix = inner_args['prefix']
+ try:
+ with Profile() as dt:
+ f, model = inner_func(*args, **kwargs)
+ LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
+ return f, model
+ except Exception as e:
+ LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
+ return None, None
+
+ return outer_func
+
+
+@try_export
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
# YOLOv5 TorchScript model export
- try:
- LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
- f = file.with_suffix('.torchscript')
-
- ts = torch.jit.trace(model, im, strict=False)
- d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
- extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
- if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
- optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
- else:
- ts.save(str(f), _extra_files=extra_files)
+ LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
+ f = file.with_suffix('.torchscript')
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'{prefix} export failure: {e}')
+ ts = torch.jit.trace(model, im, strict=False)
+ d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
+ extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
+ if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
+ optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
+ else:
+ ts.save(str(f), _extra_files=extra_files)
+ return f, None
-def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
+@try_export
+def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
# YOLOv5 ONNX export
- try:
- check_requirements(('onnx',))
- import onnx
-
- LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
- f = file.with_suffix('.onnx')
-
- torch.onnx.export(
- model.cpu() if dynamic else model, # --dynamic only compatible with cpu
- im.cpu() if dynamic else im,
- f,
- verbose=False,
- opset_version=opset,
- training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
- do_constant_folding=not train,
- input_names=['images'],
- output_names=['output'],
- dynamic_axes={
- 'images': {
- 0: 'batch',
- 2: 'height',
- 3: 'width'}, # shape(1,3,640,640)
- 'output': {
- 0: 'batch',
- 1: 'anchors'} # shape(1,25200,85)
- } if dynamic else None)
-
- # Checks
- model_onnx = onnx.load(f) # load onnx model
- onnx.checker.check_model(model_onnx) # check onnx model
-
- # Metadata
- d = {'stride': int(max(model.stride)), 'names': model.names}
- for k, v in d.items():
- meta = model_onnx.metadata_props.add()
- meta.key, meta.value = k, str(v)
- onnx.save(model_onnx, f)
-
- # Simplify
- if simplify:
- try:
- cuda = torch.cuda.is_available()
- check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
- import onnxsim
-
- LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
- model_onnx, check = onnxsim.simplify(model_onnx)
- assert check, 'assert check failed'
- onnx.save(model_onnx, f)
- except Exception as e:
- LOGGER.info(f'{prefix} simplifier failure: {e}')
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'{prefix} export failure: {e}')
-
-
-def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')):
+ check_requirements('onnx')
+ import onnx
+
+ LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
+ f = file.with_suffix('.onnx')
+
+ torch.onnx.export(
+ model.cpu() if dynamic else model, # --dynamic only compatible with cpu
+ im.cpu() if dynamic else im,
+ f,
+ verbose=False,
+ opset_version=opset,
+ do_constant_folding=True,
+ input_names=['images'],
+ output_names=['output'],
+ dynamic_axes={
+ 'images': {
+ 0: 'batch',
+ 2: 'height',
+ 3: 'width'}, # shape(1,3,640,640)
+ 'output': {
+ 0: 'batch',
+ 1: 'anchors'} # shape(1,25200,85)
+ } if dynamic else None)
+
+ # Checks
+ model_onnx = onnx.load(f) # load onnx model
+ onnx.checker.check_model(model_onnx) # check onnx model
+
+ # Metadata
+ d = {'stride': int(max(model.stride)), 'names': model.names}
+ for k, v in d.items():
+ meta = model_onnx.metadata_props.add()
+ meta.key, meta.value = k, str(v)
+ onnx.save(model_onnx, f)
+
+ # Simplify
+ if simplify:
+ try:
+ cuda = torch.cuda.is_available()
+ check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
+ import onnxsim
+
+ LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
+ model_onnx, check = onnxsim.simplify(model_onnx)
+ assert check, 'assert check failed'
+ onnx.save(model_onnx, f)
+ except Exception as e:
+ LOGGER.info(f'{prefix} simplifier failure: {e}')
+ return f, model_onnx
+
+
+@try_export
+def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
# YOLOv5 OpenVINO export
- try:
- check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
- import openvino.inference_engine as ie
+ check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
+ import openvino.inference_engine as ie
+
+ LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
+ f = str(file).replace('.pt', f'_openvino_model{os.sep}')
- LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
- f = str(file).replace('.pt', f'_openvino_model{os.sep}')
+ cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
+ subprocess.run(cmd.split(), check=True, env=os.environ) # export
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
+ return f, None
- cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
- subprocess.check_output(cmd.split()) # export
- with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g:
- yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
+@try_export
+def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
+ # YOLOv5 Paddle export
+ check_requirements(('paddlepaddle', 'x2paddle'))
+ import x2paddle
+ from x2paddle.convert import pytorch2paddle
+ LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
+ f = str(file).replace('.pt', f'_paddle_model{os.sep}')
+ pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
+ return f, None
+
+
+@try_export
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
# YOLOv5 CoreML export
- try:
- check_requirements(('coremltools',))
- import coremltools as ct
-
- LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
- f = file.with_suffix('.mlmodel')
-
- ts = torch.jit.trace(model, im, strict=False) # TorchScript model
- ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
- bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
- if bits < 32:
- if platform.system() == 'Darwin': # quantization only supported on macOS
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning
- ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
- else:
- print(f'{prefix} quantization only supported on macOS, skipping...')
- ct_model.save(f)
-
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return ct_model, f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
- return None, None
-
-
-def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False):
+ check_requirements('coremltools')
+ import coremltools as ct
+
+ LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
+ f = file.with_suffix('.mlmodel')
+
+ ts = torch.jit.trace(model, im, strict=False) # TorchScript model
+ ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
+ bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
+ if bits < 32:
+ if platform.system() == 'Darwin': # quantization only supported on macOS
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning
+ ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
+ else:
+ print(f'{prefix} quantization only supported on macOS, skipping...')
+ ct_model.save(f)
+ return f, ct_model
+
+
+@try_export
+def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
- prefix = colorstr('TensorRT:')
+ assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
try:
- assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
- try:
- import tensorrt as trt
- except Exception:
- if platform.system() == 'Linux':
- check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',))
- import tensorrt as trt
-
- if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
- grid = model.model[-1].anchor_grid
- model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
- export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12
- model.model[-1].anchor_grid = grid
- else: # TensorRT >= 8
- check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
- export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13
- onnx = file.with_suffix('.onnx')
-
- LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
- assert onnx.exists(), f'failed to export ONNX file: {onnx}'
- f = file.with_suffix('.engine') # TensorRT engine file
- logger = trt.Logger(trt.Logger.INFO)
- if verbose:
- logger.min_severity = trt.Logger.Severity.VERBOSE
-
- builder = trt.Builder(logger)
- config = builder.create_builder_config()
- config.max_workspace_size = workspace * 1 << 30
- # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
-
- flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
- network = builder.create_network(flag)
- parser = trt.OnnxParser(network, logger)
- if not parser.parse_from_file(str(onnx)):
- raise RuntimeError(f'failed to load ONNX file: {onnx}')
-
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
- LOGGER.info(f'{prefix} Network Description:')
+ import tensorrt as trt
+ except Exception:
+ if platform.system() == 'Linux':
+ check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
+ import tensorrt as trt
+
+ if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
+ grid = model.model[-1].anchor_grid
+ model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
+ export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12
+ model.model[-1].anchor_grid = grid
+ else: # TensorRT >= 8
+ check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
+ export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12
+ onnx = file.with_suffix('.onnx')
+
+ LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
+ assert onnx.exists(), f'failed to export ONNX file: {onnx}'
+ f = file.with_suffix('.engine') # TensorRT engine file
+ logger = trt.Logger(trt.Logger.INFO)
+ if verbose:
+ logger.min_severity = trt.Logger.Severity.VERBOSE
+
+ builder = trt.Builder(logger)
+ config = builder.create_builder_config()
+ config.max_workspace_size = workspace * 1 << 30
+ # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
+
+ flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
+ network = builder.create_network(flag)
+ parser = trt.OnnxParser(network, logger)
+ if not parser.parse_from_file(str(onnx)):
+ raise RuntimeError(f'failed to load ONNX file: {onnx}')
+
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
+ for inp in inputs:
+ LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
+ for out in outputs:
+ LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
+
+ if dynamic:
+ if im.shape[0] <= 1:
+ LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument")
+ profile = builder.create_optimization_profile()
for inp in inputs:
- LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
- for out in outputs:
- LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
-
- if dynamic:
- if im.shape[0] <= 1:
- LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument")
- profile = builder.create_optimization_profile()
- for inp in inputs:
- profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
- config.add_optimization_profile(profile)
-
- LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}')
- if builder.platform_has_fast_fp16 and half:
- config.set_flag(trt.BuilderFlag.FP16)
- with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
- t.write(engine.serialize())
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
+ profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
+ config.add_optimization_profile(profile)
+
+ LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
+ if builder.platform_has_fast_fp16 and half:
+ config.set_flag(trt.BuilderFlag.FP16)
+ with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
+ t.write(engine.serialize())
+ return f, None
+@try_export
def export_saved_model(model,
im,
file,
@@ -295,178 +309,157 @@ def export_saved_model(model,
keras=False,
prefix=colorstr('TensorFlow SavedModel:')):
# YOLOv5 TensorFlow SavedModel export
- try:
- import tensorflow as tf
- from tensorflow.python.framework.convert_to_constants import \
- convert_variables_to_constants_v2
-
- from models.tf import TFModel
-
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
- f = str(file).replace('.pt', '_saved_model')
- batch_size, ch, *imgsz = list(im.shape) # BCHW
-
- tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
- im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
- _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
- inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
- outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
- keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
- keras_model.trainable = False
- keras_model.summary()
- if keras:
- keras_model.save(f, save_format='tf')
- else:
- spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
- m = tf.function(lambda x: keras_model(x)) # full model
- m = m.get_concrete_function(spec)
- frozen_func = convert_variables_to_constants_v2(m)
- tfm = tf.Module()
- tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec])
- tfm.__call__(im)
- tf.saved_model.save(tfm,
- f,
- options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)
- if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions())
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return keras_model, f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
- return None, None
+ import tensorflow as tf
+ from tensorflow.python.framework.convert_to_constants import \
+ convert_variables_to_constants_v2
+
+ from yolov5.models.tf import TFModel
+
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
+ f = str(file).replace('.pt', '_saved_model')
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
+
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
+ im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
+ _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
+ inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
+ outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
+ keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
+ keras_model.trainable = False
+ keras_model.summary()
+ if keras:
+ keras_model.save(f, save_format='tf')
+ else:
+ spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
+ m = tf.function(lambda x: keras_model(x)) # full model
+ m = m.get_concrete_function(spec)
+ frozen_func = convert_variables_to_constants_v2(m)
+ tfm = tf.Module()
+ tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec])
+ tfm.__call__(im)
+ tf.saved_model.save(tfm,
+ f,
+ options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
+ tf.__version__, '2.6') else tf.saved_model.SaveOptions())
+ return f, keras_model
+@try_export
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
- try:
- import tensorflow as tf
- from tensorflow.python.framework.convert_to_constants import \
- convert_variables_to_constants_v2
+ import tensorflow as tf
+ from tensorflow.python.framework.convert_to_constants import \
+ convert_variables_to_constants_v2
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
- f = file.with_suffix('.pb')
-
- m = tf.function(lambda x: keras_model(x)) # full model
- m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
- frozen_func = convert_variables_to_constants_v2(m)
- frozen_func.graph.as_graph_def()
- tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
+ f = file.with_suffix('.pb')
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
+ m = tf.function(lambda x: keras_model(x)) # full model
+ m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
+ frozen_func = convert_variables_to_constants_v2(m)
+ frozen_func.graph.as_graph_def()
+ tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
+ return f, None
+@try_export
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
# YOLOv5 TensorFlow Lite export
- try:
- import tensorflow as tf
-
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
- batch_size, ch, *imgsz = list(im.shape) # BCHW
- f = str(file).replace('.pt', '-fp16.tflite')
-
- converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
- converter.target_spec.supported_types = [tf.float16]
- converter.optimizations = [tf.lite.Optimize.DEFAULT]
- if int8:
- from models.tf import representative_dataset_gen
- dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
- converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
- converter.target_spec.supported_types = []
- converter.inference_input_type = tf.uint8 # or tf.int8
- converter.inference_output_type = tf.uint8 # or tf.int8
- converter.experimental_new_quantizer = True
- f = str(file).replace('.pt', '-int8.tflite')
- if nms or agnostic_nms:
- converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
-
- tflite_model = converter.convert()
- open(f, "wb").write(tflite_model)
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
-
-
+ import tensorflow as tf
+
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
+ f = str(file).replace('.pt', '-fp16.tflite')
+
+ converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
+ converter.target_spec.supported_types = [tf.float16]
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
+ if int8:
+ from yolov5.models.tf import representative_dataset_gen
+ dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
+ converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+ converter.target_spec.supported_types = []
+ converter.inference_input_type = tf.uint8 # or tf.int8
+ converter.inference_output_type = tf.uint8 # or tf.int8
+ converter.experimental_new_quantizer = True
+ f = str(file).replace('.pt', '-int8.tflite')
+ if nms or agnostic_nms:
+ converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
+
+ tflite_model = converter.convert()
+ open(f, "wb").write(tflite_model)
+ return f, None
+
+
+@try_export
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
- try:
- cmd = 'edgetpu_compiler --version'
- help_url = 'https://coral.ai/docs/edgetpu/compiler/'
- assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
- if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0:
- LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
- sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
- for c in (
- 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
- 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
- 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
- subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
- ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
-
- LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
- f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
- f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
-
- cmd = f"edgetpu_compiler -s -o {file.parent} {f_tfl}"
- subprocess.run(cmd.split(), check=True)
-
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
-
-
+ cmd = 'edgetpu_compiler --version'
+ help_url = 'https://coral.ai/docs/edgetpu/compiler/'
+ assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
+ if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0:
+ LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
+ sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
+ for c in (
+ 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
+ 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
+ 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
+ subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
+ ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
+
+ LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
+ f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
+ f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
+
+ cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}"
+ subprocess.run(cmd.split(), check=True)
+ return f, None
+
+
+@try_export
def export_tfjs(file, prefix=colorstr('TensorFlow.js:')):
# YOLOv5 TensorFlow.js export
- try:
- check_requirements(('tensorflowjs',))
- import re
-
- import tensorflowjs as tfjs
-
- LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
- f = str(file).replace('.pt', '_web_model') # js dir
- f_pb = file.with_suffix('.pb') # *.pb path
- f_json = f'{f}/model.json' # *.json path
-
- cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
- f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}'
- subprocess.run(cmd.split())
-
- with open(f_json) as j:
- json = j.read()
- with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
- subst = re.sub(
- r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
- r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
- r'"Identity_1": {"name": "Identity_1"}, '
- r'"Identity_2": {"name": "Identity_2"}, '
- r'"Identity_3": {"name": "Identity_3"}}}', json)
- j.write(subst)
-
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
- return f
- except Exception as e:
- LOGGER.info(f'\n{prefix} export failure: {e}')
-
-
-@torch.no_grad()
+ check_requirements('tensorflowjs')
+ import re
+
+ import tensorflowjs as tfjs
+
+ LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
+ f = str(file).replace('.pt', '_web_model') # js dir
+ f_pb = file.with_suffix('.pb') # *.pb path
+ f_json = f'{f}/model.json' # *.json path
+
+ cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
+ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}'
+ subprocess.run(cmd.split())
+
+ json = Path(f_json).read_text()
+ with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
+ subst = re.sub(
+ r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
+ r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
+ r'"Identity_1": {"name": "Identity_1"}, '
+ r'"Identity_2": {"name": "Identity_2"}, '
+ r'"Identity_3": {"name": "Identity_3"}}}', json)
+ j.write(subst)
+ return f, None
+
+
+@smart_inference_mode()
def run(
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
weights='yolov5s.pt', # weights path
- imgsz=(640, 640), # image (height, width)
+ imgsz=None, # inference size (pixels)
+ img=None, # inference size (pixels)
batch_size=1, # batch size
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=('torchscript', 'onnx'), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
- train=False, # model.train() mode
keras=False, # use Keras
optimize=False, # TorchScript: optimize for mobile
int8=False, # CoreML/TF INT8 quantization
@@ -488,11 +481,16 @@ def run(
if isinstance(include, list) and (',' in include[0]):
include = include[0].split(',')
+ if imgsz is None and img is None:
+ imgsz = (640, 640)
+ elif img is not None:
+ imgsz = img
+
include = [x.lower() for x in include] # to lowercase
fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
flags = [x in include for x in fmts]
assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
- jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans
+ jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
# Load PyTorch model
@@ -501,11 +499,9 @@ def run(
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
- nc, names = model.nc, model.names # number of classes, class names
# Checks
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
- assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}'
if optimize:
assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
@@ -515,64 +511,66 @@ def run(
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model
- model.train() if train else model.eval() # training mode = no Detect() layer grid construction
+ model.eval()
for k, m in model.named_modules():
if isinstance(m, Detect):
m.inplace = inplace
- m.onnx_dynamic = dynamic
+ m.dynamic = dynamic
m.export = True
for _ in range(2):
y = model(im) # dry runs
if half and not coreml:
im, model = im.half(), model.half() # to FP16
- shape = tuple(y[0].shape) # model output shape
+ shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
+ metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
# Exports
- f = [''] * 10 # exported filenames
+ f = [''] * len(fmts) # exported filenames
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
- if jit:
- f[0] = export_torchscript(model, im, file, optimize)
+ if jit: # TorchScript
+ f[0], _ = export_torchscript(model, im, file, optimize)
if engine: # TensorRT required before ONNX
- f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose)
+ f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
if onnx or xml: # OpenVINO requires ONNX
- f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
+ f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
if xml: # OpenVINO
- f[3] = export_openvino(model, file, half)
- if coreml:
- _, f[4] = export_coreml(model, im, file, int8, half)
-
- # TensorFlow Exports
- if any((saved_model, pb, tflite, edgetpu, tfjs)):
+ f[3], _ = export_openvino(file, metadata, half)
+ if coreml: # CoreML
+ f[4], _ = export_coreml(model, im, file, int8, half)
+ if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707
- check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow`
+ check_requirements('flatbuffers==1.12') # required before `import tensorflow`
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
- model, f[5] = export_saved_model(model.cpu(),
- im,
- file,
- dynamic,
- tf_nms=nms or agnostic_nms or tfjs,
- agnostic_nms=agnostic_nms or tfjs,
- topk_per_class=topk_per_class,
- topk_all=topk_all,
- iou_thres=iou_thres,
- conf_thres=conf_thres,
- keras=keras)
+ assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
+ f[5], s_model = export_saved_model(model.cpu(),
+ im,
+ file,
+ dynamic,
+ tf_nms=nms or agnostic_nms or tfjs,
+ agnostic_nms=agnostic_nms or tfjs,
+ topk_per_class=topk_per_class,
+ topk_all=topk_all,
+ iou_thres=iou_thres,
+ conf_thres=conf_thres,
+ keras=keras)
if pb or tfjs: # pb prerequisite to tfjs
- f[6] = export_pb(model, file)
+ f[6], _ = export_pb(s_model, file)
if tflite or edgetpu:
- f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
+ f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
if edgetpu:
- f[8] = export_edgetpu(file)
+ f[8], _ = export_edgetpu(file)
if tfjs:
- f[9] = export_tfjs(file)
+ f[9], _ = export_tfjs(file)
+ if paddle: # PaddlePaddle
+ f[10], _ = export_paddle(model, im, file, metadata)
# Finish
f = [str(x) for x in f if x] # filter out '' and None
if any(f):
h = '--half' if half else '' # --half FP16 inference arg
- LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)'
+ LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nDetect: python detect.py --weights {f[-1]} {h}"
f"\nValidate: python val.py --weights {f[-1]} {h}"
@@ -590,7 +588,6 @@ def parse_opt():
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
- parser.add_argument('--train', action='store_true', help='model.train() mode')
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
@@ -607,7 +604,7 @@ def parse_opt():
parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
parser.add_argument('--include',
nargs='+',
- default=['torchscript', 'onnx'],
+ default=['torchscript'],
help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs')
opt = parser.parse_args()
print_args(vars(opt))
diff --git a/yolov5/helpers.py b/yolov5/helpers.py
index 0068292..ef75132 100644
--- a/yolov5/helpers.py
+++ b/yolov5/helpers.py
@@ -2,6 +2,7 @@
from yolov5.models.common import AutoShape, DetectMultiBackend
from yolov5.models.experimental import attempt_load
+from yolov5.models.yolo import ClassificationModel
from yolov5.utils.general import LOGGER, logging
from yolov5.utils.torch_utils import select_device
@@ -32,7 +33,11 @@ def load_model(model_path, device=None, autoshape=True, verbose=False):
try:
model = DetectMultiBackend(model_path, device=device, fuse=autoshape) # detection model
if autoshape:
- model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
+ if model.pt and isinstance(model.model, ClassificationModel):
+ LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. '
+ 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
+ else:
+ model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception:
model = attempt_load(model_path, device=device, fuse=False) # arbitrary model
@@ -66,7 +71,7 @@ def predict(self, image_list, size=640, augment=False):
Returns results as a yolov5.models.common.Detections object.
"""
assert self.model is not None, "before predict, you need to call .load_model()"
- results = self.model(imgs=image_list, size=size, augment=augment)
+ results = self.model(ims=image_list, size=size, augment=augment)
return results
if __name__ == "__main__":
diff --git a/yolov5/hubconf.py b/yolov5/hubconf.py
index 4a17433..2a8377f 100644
--- a/yolov5/hubconf.py
+++ b/yolov5/hubconf.py
@@ -1,11 +1,11 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
-PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
+PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
- model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch
+ model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch
"""
import torch
@@ -30,16 +30,14 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
from yolov5.models.common import AutoShape, DetectMultiBackend
from yolov5.models.experimental import attempt_load
- from yolov5.models.yolo import Model
+ from yolov5.models.yolo import ClassificationModel, DetectionModel
from yolov5.utils.downloads import attempt_download
- from yolov5.utils.general import (LOGGER, check_requirements,
- intersect_dicts, logging,
- yolov5_in_syspath)
+ from yolov5.utils.general import LOGGER, check_requirements, intersect_dicts, logging, yolov5_in_syspath
from yolov5.utils.torch_utils import select_device
if not verbose:
LOGGER.setLevel(logging.WARNING)
- check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
+ check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop'))
name = Path(name)
path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
try:
@@ -48,12 +46,16 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
try:
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
if autoshape:
- model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
+ if model.pt and isinstance(model.model, ClassificationModel):
+ LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. '
+ 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
+ else:
+ model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception:
model = attempt_load(path, device=device, fuse=False) # arbitrary model
else:
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
- model = Model(cfg, channels, classes) # create model
+ model = DetectionModel(cfg, channels, classes) # create model
if pretrained:
with yolov5_in_syspath:
ckpt = torch.load(attempt_download(path), map_location=device) # load
diff --git a/yolov5/models/common.py b/yolov5/models/common.py
index b0e5c21..bdf5b63 100644
--- a/yolov5/models/common.py
+++ b/yolov5/models/common.py
@@ -17,33 +17,36 @@
import requests
import torch
import torch.nn as nn
-import yaml
from PIL import Image
from torch.cuda import amp
from yolov5.utils.dataloaders import exif_transpose, letterbox
-from yolov5.utils.general import (LOGGER, check_requirements, check_suffix,
- check_version, colorstr, increment_path,
- make_divisible, non_max_suppression,
- scale_coords, xywh2xyxy, xyxy2xywh,
- yolov5_in_syspath)
+from yolov5.utils.general import (LOGGER, ROOT, Profile, check_requirements,
+ check_suffix, check_version, colorstr,
+ increment_path, make_divisible,
+ non_max_suppression, scale_coords, xywh2xyxy,
+ xyxy2xywh, yaml_load, yolov5_in_syspath)
from yolov5.utils.plots import Annotator, colors, save_one_box
-from yolov5.utils.torch_utils import copy_attr, time_sync
+from yolov5.utils.torch_utils import copy_attr, smart_inference_mode
-def autopad(k, p=None): # kernel, padding
- # Pad to 'same'
+def autopad(k, p=None, d=1): # kernel, padding, dilation
+ # Pad to 'same' shape outputs
+ if d > 1:
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
- # Standard convolution
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
+ act = nn.SiLU() # default activation
+
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
super().__init__()
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
- self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+ self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
@@ -53,13 +56,13 @@ def forward_fuse(self, x):
class DWConv(Conv):
- # Depth-wise convolution class
- def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
+ # Depth-wise convolution
+ def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
+ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
class DWConvTranspose2d(nn.ConvTranspose2d):
- # Depth-wise transpose convolution class
+ # Depth-wise transpose convolution
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
@@ -312,7 +315,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
# PyTorch: weights = *.pt
# TorchScript: *.torchscript
# ONNX Runtime: *.onnx
- # ONNX OpenCV DNN: *.onnx with --dnn
+ # ONNX OpenCV DNN: *.onnx --dnn
# OpenVINO: *.xml
# CoreML: *.mlmodel
# TensorRT: *.engine
@@ -320,18 +323,17 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
# TensorFlow GraphDef: *.pb
# TensorFlow Lite: *.tflite
# TensorFlow Edge TPU: *_edgetpu.tflite
+ # PaddlePaddle: *_paddle_model
from yolov5.models.experimental import ( # scoped to avoid circular import
attempt_download, attempt_load)
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
- pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type
w = attempt_download(w) # download if not local
- fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
- stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults
- if data: # assign class names (optional)
- with open(data, errors='ignore') as f:
- names = yaml.safe_load(f)['names']
+ fp16 &= pt or jit or onnx or engine # FP16
+ stride = 32 # default stride
+ cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
if pt: # PyTorch
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
@@ -345,28 +347,30 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
with yolov5_in_syspath():
model = torch.jit.load(w, _extra_files=extra_files)
model.half() if fp16 else model.float()
- if extra_files['config.txt']:
- d = json.loads(extra_files['config.txt']) # extra_files dict
+ if extra_files['config.txt']: # load metadata dict
+ d = json.loads(extra_files['config.txt'],
+ object_hook=lambda d: {int(k) if k.isdigit() else k: v
+ for k, v in d.items()})
stride, names = int(d['stride']), d['names']
elif dnn: # ONNX OpenCV DNN
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
- check_requirements(('opencv-python>=4.5.4',))
+ check_requirements('opencv-python>=4.5.4')
with yolov5_in_syspath():
net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
- cuda = torch.cuda.is_available()
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
with yolov5_in_syspath():
session = onnxruntime.InferenceSession(w, providers=providers)
- meta = session.get_modelmeta().custom_metadata_map # metadata
+ output_names = [x.name for x in session.get_outputs()]
+ meta = session.get_modelmeta().custom_metadata_map # metadata
if 'stride' in meta:
stride, names = int(meta['stride']), eval(meta['names'])
elif xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...')
- check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
+ check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch
ie = Core()
if not Path(w).is_file(): # if not *.xml
@@ -379,14 +383,13 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
if batch_dim.is_static:
batch_size = batch_dim.get_length()
executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
- output_layer = next(iter(executable_network.outputs))
- meta = Path(w).with_suffix('.yaml')
- if meta.exists():
- stride, names = self._load_metadata(meta) # load metadata
+ stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
+ if device.type == 'cpu':
+ device = torch.device('cuda:0')
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
with yolov5_in_syspath():
@@ -395,19 +398,19 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
context = model.create_execution_context()
bindings = OrderedDict()
fp16 = False # default updated below
- dynamic_input = False
+ dynamic = False
for index in range(model.num_bindings):
name = model.get_binding_name(index)
dtype = trt.nptype(model.get_binding_dtype(index))
if model.binding_is_input(index):
if -1 in tuple(model.get_binding_shape(index)): # dynamic
- dynamic_input = True
+ dynamic = True
context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2]))
if dtype == np.float16:
fp16 = True
shape = tuple(context.get_binding_shape(index))
- data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)
- bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))
+ im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
+ bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
elif coreml: # CoreML
@@ -415,82 +418,103 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
import coremltools as ct
with yolov5_in_syspath():
model = ct.models.MLModel(w)
- else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
- if saved_model: # SavedModel
- LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
- import tensorflow as tf
- keras = False # assume TF1 saved_model
- with yolov5_in_syspath():
- model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
- elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
- LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
- import tensorflow as tf
+ elif saved_model: # TF SavedModel
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
+ import tensorflow as tf
+ keras = False # assume TF1 saved_model
+ with yolov5_in_syspath():
+ model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
+ import tensorflow as tf
- def wrap_frozen_graph(gd, inputs, outputs):
- x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
- ge = x.graph.as_graph_element
- return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
+ def wrap_frozen_graph(gd, inputs, outputs):
+ x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
+ ge = x.graph.as_graph_element
+ return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
- gd = tf.Graph().as_graph_def() # graph_def
+ gd = tf.Graph().as_graph_def() # TF GraphDef
+ with yolov5_in_syspath():
+ with open(w, 'rb') as f:
+ gd.ParseFromString(f.read())
+ frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0")
+ elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
+ try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
+ from tflite_runtime.interpreter import (Interpreter,
+ load_delegate)
+ except ImportError:
+ import tensorflow as tf
+ Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
+ if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
+ LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
+ delegate = {
+ 'Linux': 'libedgetpu.so.1',
+ 'Darwin': 'libedgetpu.1.dylib',
+ 'Windows': 'edgetpu.dll'}[platform.system()]
with yolov5_in_syspath():
- with open(w, 'rb') as f:
- gd.ParseFromString(f.read())
- frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0")
- elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
- try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
- from tflite_runtime.interpreter import (Interpreter,
- load_delegate)
- except ImportError:
- import tensorflow as tf
- Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
- if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime
- LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
- delegate = {
- 'Linux': 'libedgetpu.so.1',
- 'Darwin': 'libedgetpu.1.dylib',
- 'Windows': 'edgetpu.dll'}[platform.system()]
- with yolov5_in_syspath():
- interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
- else: # Lite
- LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
- with yolov5_in_syspath():
- interpreter = Interpreter(model_path=w) # load TFLite model
- interpreter.allocate_tensors() # allocate
- input_details = interpreter.get_input_details() # inputs
- output_details = interpreter.get_output_details() # outputs
- elif tfjs:
- raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
- else:
- raise Exception(f'ERROR: {w} is not a supported format')
+ interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
+ else: # TFLite
+ LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
+ with yolov5_in_syspath():
+ interpreter = Interpreter(model_path=w) # load TFLite model
+ interpreter.allocate_tensors() # allocate
+ input_details = interpreter.get_input_details() # inputs
+ output_details = interpreter.get_output_details() # outputs
+ elif tfjs: # TF.js
+ raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
+ elif paddle: # PaddlePaddle
+ LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
+ check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
+ import paddle.inference as pdi
+ with yolov5_in_syspath():
+ if not Path(w).is_file(): # if not *.pdmodel
+ w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir
+ weights = Path(w).with_suffix('.pdiparams')
+ config = pdi.Config(str(w), str(weights))
+ if cuda:
+ config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
+ predictor = pdi.create_predictor(config)
+ input_names = predictor.get_input_names()
+ input_handle = predictor.get_input_handle(input_names[0])
+ else:
+ raise NotImplementedError(f'ERROR: {w} is not a supported format')
+
+ # class names
+ if 'names' not in locals():
+ names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
+ if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
+ names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
+
self.__dict__.update(locals()) # assign all variables to self
- def forward(self, im, augment=False, visualize=False, val=False):
+ def forward(self, im, augment=False, visualize=False):
# YOLOv5 MultiBackend inference
b, ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.pt: # PyTorch
- y = self.model(im, augment=augment, visualize=visualize)[0]
+ y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
elif self.jit: # TorchScript
- y = self.model(im)[0]
+ y = self.model(im)
elif self.dnn: # ONNX OpenCV DNN
im = im.cpu().numpy() # torch to numpy
self.net.setInput(im)
y = self.net.forward()
elif self.onnx: # ONNX Runtime
im = im.cpu().numpy() # torch to numpy
- y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
+ y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
elif self.xml: # OpenVINO
im = im.cpu().numpy() # FP32
- y = self.executable_network([im])[self.output_layer]
+ y = list(self.executable_network([im]).values())
elif self.engine: # TensorRT
- if im.shape != self.bindings['images'].shape and self.dynamic_input:
- self.context.set_binding_shape(self.model.get_binding_index('images'), im.shape) # reshape if dynamic
+ if self.dynamic and im.shape != self.bindings['images'].shape:
+ i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output'))
+ self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic
self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
- assert im.shape == self.bindings['images'].shape, (
- f"image shape {im.shape} exceeds model max shape {self.bindings['images'].shape}" if self.dynamic_input
- else f"image shape {im.shape} does not match model shape {self.bindings['images'].shape}")
+ self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out)))
+ s = self.bindings['images'].shape
+ assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs['images'] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = self.bindings['output'].data
@@ -506,6 +530,13 @@ def forward(self, im, augment=False, visualize=False, val=False):
else:
k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key
y = y[k] # output
+ elif self.paddle: # PaddlePaddle
+ im = im.cpu().numpy().astype("float32")
+ self.input_handle.copy_from_cpu(im)
+ self.predictor.run()
+ output_names = self.predictor.get_output_names()
+ output_handle = self.predictor.get_output_handle(output_names[0])
+ y = output_handle.copy_to_cpu()
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.saved_model: # SavedModel
@@ -526,36 +557,41 @@ def forward(self, im, augment=False, visualize=False, val=False):
y = (y.astype(np.float32) - zero_point) * scale # re-scale
y[..., :4] *= [w, h, w, h] # xywh normalized to pixels
- if isinstance(y, np.ndarray):
- y = torch.tensor(y, device=self.device)
- return (y, []) if val else y
+ if isinstance(y, (list, tuple)):
+ return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
+ else:
+ return self.from_numpy(y)
+
+ def from_numpy(self, x):
+ return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
# Warmup model by running inference once
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb
if any(warmup_types) and self.device.type != 'cpu':
- im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
+ im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup
@staticmethod
- def model_type(p='path/to/model.pt'):
+ def _model_type(p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
from yolov5.export import export_formats
- suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes
- check_suffix(p, suffixes) # checks
+ sf = list(export_formats().Suffix) + ['.xml'] # export suffixes
+ check_suffix(p, sf) # checks
p = Path(p).name # eliminate trailing separators
- pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes)
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf)
xml |= xml2 # *_openvino_model or *.xml
tflite &= not edgetpu # *.tflite
- return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
+ return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle
@staticmethod
- def _load_metadata(f='path/to/meta.yaml'):
+ def _load_metadata(f=Path('path/to/meta.yaml')):
# Load metadata from meta.yaml if it exists
- with open(f, errors='ignore') as f:
- d = yaml.safe_load(f)
- return d['stride'], d['names'] # assign stride, names
+ if f.exists():
+ d = yaml_load(f)
+ return d['stride'], d['names'] # assign stride, names
+ return None, None
class AutoShape(nn.Module):
@@ -579,6 +615,7 @@ def __init__(self, model, verbose=True):
if self.pt:
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
m.inplace = False # Detect.inplace=False for safe multithread inference
+ m.export = True # do not output loss values
def _apply(self, fn):
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
@@ -591,10 +628,10 @@ def _apply(self, fn):
m.anchor_grid = list(map(fn, m.anchor_grid))
return self
- @torch.no_grad()
- def forward(self, imgs, size=640, augment=False, profile=False):
- # Inference from various sources. For height=640, width=1280, RGB images example inputs are:
- # file: imgs = 'data/images/zidane.jpg' # str or PosixPath
+ @smart_inference_mode()
+ def forward(self, ims, size=640, augment=False, profile=False):
+ # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
+ # file: ims = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
@@ -602,65 +639,67 @@ def forward(self, imgs, size=640, augment=False, profile=False):
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
- t = [time_sync()]
- p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type
- autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
- if isinstance(imgs, torch.Tensor): # torch
- with amp.autocast(autocast):
- return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
-
- # Pre-process
- n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images
- shape0, shape1, files = [], [], [] # image and inference shapes, filenames
- for i, im in enumerate(imgs):
- f = f'image{i}' # filename
- if isinstance(im, (str, Path)): # filename or uri
- im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
- im = np.asarray(exif_transpose(im))
- elif isinstance(im, Image.Image): # PIL Image
- im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
- files.append(Path(f).with_suffix('.jpg').name)
- if im.shape[0] < 5: # image in CHW
- im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
- im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
- s = im.shape[:2] # HWC
- shape0.append(s) # image shape
- g = (size / max(s)) # gain
- shape1.append([y * g for y in s])
- imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
- shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape
- x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad
- x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
- x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
- t.append(time_sync())
+ dt = (Profile(), Profile(), Profile())
+ with dt[0]:
+ if isinstance(size, int): # expand
+ size = (size, size)
+ p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
+ autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
+ if isinstance(ims, torch.Tensor): # torch
+ with amp.autocast(autocast):
+ return self.model(ims.to(p.device).type_as(p), augment, profile) # inference
+
+ # Pre-process
+ n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
+ for i, im in enumerate(ims):
+ f = f'image{i}' # filename
+ if isinstance(im, (str, Path)): # filename or uri
+ im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
+ im = np.asarray(exif_transpose(im))
+ elif isinstance(im, Image.Image): # PIL Image
+ im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
+ files.append(Path(f).with_suffix('.jpg').name)
+ if im.shape[0] < 5: # image in CHW
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
+ im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
+ s = im.shape[:2] # HWC
+ shape0.append(s) # image shape
+ g = max(size) / max(s) # gain
+ shape1.append([y * g for y in s])
+ ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
+ shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape
+ x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
+ x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
with amp.autocast(autocast):
# Inference
- y = self.model(x, augment, profile) # forward
- t.append(time_sync())
+ with dt[1]:
+ y = self.model(x, augment, profile) # forward
# Post-process
- y = non_max_suppression(y if self.dmb else y[0],
- self.conf,
- self.iou,
- self.classes,
- self.agnostic,
- self.multi_label,
- max_det=self.max_det) # NMS
- for i in range(n):
- scale_coords(shape1, y[i][:, :4], shape0[i])
+ with dt[2]:
+ y = non_max_suppression(y if self.dmb else y[0],
+ self.conf,
+ self.iou,
+ self.classes,
+ self.agnostic,
+ self.multi_label,
+ max_det=self.max_det) # NMS
+ for i in range(n):
+ scale_coords(shape1, y[i][:, :4], shape0[i])
- t.append(time_sync())
- return Detections(imgs, y, files, t, self.names, x.shape)
+ return Detections(ims, y, files, dt, self.names, x.shape)
class Detections:
# YOLOv5 detections class for inference results
- def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None):
+ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
super().__init__()
d = pred[0].device # device
- gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
- self.imgs = imgs # list of images as numpy arrays
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
+ self.ims = ims # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
@@ -670,12 +709,12 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
- self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
+ self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
crops = []
- for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
+ for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
if pred.shape[0]:
for c in pred[:, -1].unique():
@@ -710,7 +749,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False
if i == self.n - 1:
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
if render:
- self.imgs[i] = np.asarray(im)
+ self.ims[i] = np.asarray(im)
if crop:
if save:
LOGGER.info(f'Saved results to {save_dir}\n')
@@ -733,7 +772,7 @@ def crop(self, save=True, save_dir='runs/detect/exp'):
def render(self, labels=True):
self.display(render=True, labels=labels) # render results
- return self.imgs
+ return self.ims
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
@@ -748,9 +787,9 @@ def pandas(self):
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
r = range(self.n) # iterable
- x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
+ x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
# for d in x:
- # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
+ # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
# setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
@@ -762,14 +801,30 @@ def __str__(self):
return ''
+class Proto(nn.Module):
+ # YOLOv5 mask Proto module for segmentation models
+ def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
+ super().__init__()
+ self.cv1 = Conv(c1, c_, k=3)
+ self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+ self.cv2 = Conv(c_, c_, k=3)
+ self.cv3 = Conv(c_, c2)
+
+ def forward(self, x):
+ return self.cv3(self.cv2(self.upsample(self.cv1(x))))
+
+
class Classify(nn.Module):
- # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
+ # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
- self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
- self.flat = nn.Flatten()
+ c_ = 1280 # efficientnet_b0 size
+ self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
+ self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
+ self.drop = nn.Dropout(p=0.0, inplace=True)
+ self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x):
- z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
- return self.flat(self.conv(z)) # flatten to x(b,c2)
+ if isinstance(x, list):
+ x = torch.cat(x, 1)
+ return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
diff --git a/yolov5/models/experimental.py b/yolov5/models/experimental.py
index 7d47917..c9e931d 100644
--- a/yolov5/models/experimental.py
+++ b/yolov5/models/experimental.py
@@ -80,9 +80,16 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
with yolov5_in_syspath():
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
- model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode
- # Compatibility updates
+ # Model compatibility updates
+ if not hasattr(ckpt, 'stride'):
+ ckpt.stride = torch.tensor([32.])
+ if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
+ ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
+
+ model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
+
+ # Module compatibility updates
for m in model.modules():
t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
@@ -93,11 +100,14 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
m.recompute_scale_factor = None # torch 1.11.0 compatibility
+ # Return model
if len(model) == 1:
- return model[-1] # return model
+ return model[-1]
+
+ # Return detection ensemble
print(f'Ensemble created with {weights}\n')
for k in 'names', 'nc', 'yaml':
setattr(model, k, getattr(model[0], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
- return model # return ensemble
+ return model
diff --git a/yolov5/models/tf.py b/yolov5/models/tf.py
index d3d34c4..bdea965 100644
--- a/yolov5/models/tf.py
+++ b/yolov5/models/tf.py
@@ -7,7 +7,7 @@
$ python models/tf.py --weights yolov5s.pt
Export:
- $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
+ $ yolov5 export --weights yolov5s.pt --include saved_model pb tflite tfjs
"""
import argparse
@@ -30,7 +30,7 @@
C3x, Concat, Conv, CrossConv, DWConv,
DWConvTranspose2d, Focus, autopad)
from yolov5.models.experimental import MixConv2d, attempt_load
-from yolov5.models.yolo import Detect
+from yolov5.models.yolo import Detect, Segment
from yolov5.utils.activations import SiLU
from yolov5.utils.general import LOGGER, make_divisible, print_args
@@ -320,6 +320,36 @@ def _make_grid(nx=20, ny=20):
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
+class TFSegment(TFDetect):
+ # YOLOv5 Segment head for segmentation models
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
+ super().__init__(nc, anchors, ch, imgsz, w)
+ self.nm = nm # number of masks
+ self.npr = npr # number of protos
+ self.no = 5 + nc + self.nm # number of outputs per anchor
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
+ self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
+ self.detect = TFDetect.call
+
+ def call(self, x):
+ p = self.proto(x[0])
+ x = self.detect(self, x)
+ return (x, p) if self.training else ((x[0], p),)
+
+
+class TFProto(keras.layers.Layer):
+
+ def __init__(self, c1, c_=256, c2=32, w=None):
+ super().__init__()
+ self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
+ self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
+ self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
+ self.cv3 = TFConv(c_, c2, w=w.cv3)
+
+ def call(self, inputs):
+ return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
+
+
class TFUpsample(keras.layers.Layer):
# TF version of torch.nn.Upsample()
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
@@ -377,10 +407,12 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
- elif m is Detect:
+ elif m in [Detect, Segment]:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
+ if m is Segment:
+ args[3] = make_divisible(args[3] * gw, 8)
args.append(imgsz)
else:
c2 = ch[f]
diff --git a/yolov5/models/yolo.py b/yolov5/models/yolo.py
index 6afd7f1..bb0510e 100644
--- a/yolov5/models/yolo.py
+++ b/yolov5/models/yolo.py
@@ -3,7 +3,7 @@
YOLO-specific modules
Usage:
- $ python path/to/models/yolo.py --cfg yolov5s.yaml
+ $ python models/yolo.py --cfg yolov5s.yaml
"""
import argparse
@@ -21,10 +21,12 @@
from yolov5.models.common import *
from yolov5.models.experimental import *
from yolov5.utils.autoanchor import check_anchor_order
-from yolov5.utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
+from yolov5.utils.general import (LOGGER, check_version, check_yaml,
+ make_divisible, print_args)
from yolov5.utils.plots import feature_visualization
-from yolov5.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
- time_sync)
+from yolov5.utils.torch_utils import (fuse_conv_and_bn, initialize_weights,
+ model_info, profile, scale_img,
+ select_device, time_sync)
try:
import thop # for FLOPs computation
@@ -33,8 +35,9 @@
class Detect(nn.Module):
+ # YOLOv5 Detect head for detection models
stride = None # strides computed during build
- onnx_dynamic = False # ONNX export parameter
+ dynamic = False # force grid reconstruction
export = False # export mode
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
@@ -43,8 +46,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
- self.grid = [torch.zeros(1)] * self.nl # init grid
- self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid
+ self.grid = [torch.empty(1)] * self.nl # init grid
+ self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use inplace ops (e.g. slice assignment)
@@ -57,38 +60,109 @@ def forward(self, x):
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
- if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
+ if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
- y = x[i].sigmoid()
+ y = x[i].clone()
+ y[..., :5 + self.nc].sigmoid_()
if self.inplace:
y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
- xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0
+ xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
- y = torch.cat((xy, wh, conf), 4)
+ y = torch.cat((xy, wh, etc), 4)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
- def _make_grid(self, nx=20, ny=20, i=0):
+ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
d = self.anchors[i].device
t = self.anchors[i].dtype
shape = 1, self.na, ny, nx, 2 # grid shape
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
- if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
- yv, xv = torch.meshgrid(y, x, indexing='ij')
- else:
- yv, xv = torch.meshgrid(y, x)
+ yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
return grid, anchor_grid
-class Model(nn.Module):
- # YOLOv5 model
+class Segment(Detect):
+ # YOLOv5 Segment head for segmentation models
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
+ super().__init__(nc, anchors, ch, inplace)
+ self.nm = nm # number of masks
+ self.npr = npr # number of protos
+ self.no = 5 + nc + self.nm # number of outputs per anchor
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+ self.proto = Proto(ch[0], self.npr, self.nm) # protos
+ self.detect = Detect.forward
+
+ def forward(self, x):
+ p = self.proto(x[0])
+ x = self.detect(self, x)
+ return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
+
+
+class BaseModel(nn.Module):
+ # YOLOv5 base model
+ def forward(self, x, profile=False, visualize=False):
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
+
+ def _forward_once(self, x, profile=False, visualize=False):
+ y, dt = [], [] # outputs
+ for m in self.model:
+ if m.f != -1: # if not from previous layer
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
+ if profile:
+ self._profile_one_layer(m, x, dt)
+ x = m(x) # run
+ y.append(x if m.i in self.save else None) # save output
+ if visualize:
+ feature_visualization(x, m.type, m.i, save_dir=visualize)
+ return x
+
+ def _profile_one_layer(self, m, x, dt):
+ c = m == self.model[-1] # is final layer, copy input as inplace fix
+ o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
+ t = time_sync()
+ for _ in range(10):
+ m(x.copy() if c else x)
+ dt.append((time_sync() - t) * 100)
+ if m == self.model[0]:
+ LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
+ LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
+ if c:
+ LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
+
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
+ LOGGER.info('Fusing layers... ')
+ for m in self.model.modules():
+ if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
+ delattr(m, 'bn') # remove batchnorm
+ m.forward = m.forward_fuse # update forward
+ self.info()
+ return self
+
+ def info(self, verbose=False, img_size=640): # print model information
+ model_info(self, verbose, img_size)
+
+ def _apply(self, fn):
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
+ self = super()._apply(fn)
+ m = self.model[-1] # Detect()
+ if isinstance(m, (Detect, Segment)):
+ m.stride = fn(m.stride)
+ m.grid = list(map(fn, m.grid))
+ if isinstance(m.anchor_grid, list):
+ m.anchor_grid = list(map(fn, m.anchor_grid))
+ return self
+
+
+class DetectionModel(BaseModel):
+ # YOLOv5 detection model
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
if isinstance(cfg, dict):
@@ -113,11 +187,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i
# Build strides, anchors
m = self.model[-1] # Detect()
- if isinstance(m, Detect):
+ if isinstance(m, (Detect, Segment)):
s = 256 # 2x min stride
m.inplace = self.inplace
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
- check_anchor_order(m) # must be in pixel-space (not grid-space)
+ forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
+ m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
+ check_anchor_order(m)
m.anchors /= m.stride.view(-1, 1, 1)
self.stride = m.stride
self._initialize_biases() # only run once
@@ -146,19 +221,6 @@ def _forward_augment(self, x):
y = self._clip_augmented(y) # clip augmented tails
return torch.cat(y, 1), None # augmented inference, train
- def _forward_once(self, x, profile=False, visualize=False):
- y, dt = [], [] # outputs
- for m in self.model:
- if m.f != -1: # if not from previous layer
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
- if profile:
- self._profile_one_layer(m, x, dt)
- x = m(x) # run
- y.append(x if m.i in self.save else None) # save output
- if visualize:
- feature_visualization(x, m.type, m.i, save_dir=visualize)
- return x
-
def _descale_pred(self, p, flips, scale, img_size):
# de-scale predictions following augmented inference (inverse operation)
if self.inplace:
@@ -187,69 +249,59 @@ def _clip_augmented(self, y):
y[-1] = y[-1][:, i:] # small
return y
- def _profile_one_layer(self, m, x, dt):
- c = isinstance(m, Detect) # is final layer, copy input as inplace fix
- o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
- t = time_sync()
- for _ in range(10):
- m(x.copy() if c else x)
- dt.append((time_sync() - t) * 100)
- if m == self.model[0]:
- LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
- LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
- if c:
- LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
-
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
- b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85)
- b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
- b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
+ b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
- def _print_biases(self):
- m = self.model[-1] # Detect() module
- for mi in m.m: # from
- b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
- LOGGER.info(
- ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
- # def _print_weights(self):
- # for m in self.model.modules():
- # if type(m) is Bottleneck:
- # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
+Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
- def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
- LOGGER.info('Fusing layers... ')
- for m in self.model.modules():
- if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
- m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
- delattr(m, 'bn') # remove batchnorm
- m.forward = m.forward_fuse # update forward
- self.info()
- return self
- def info(self, verbose=False, img_size=640): # print model information
- model_info(self, verbose, img_size)
+class SegmentationModel(DetectionModel):
+ # YOLOv5 segmentation model
+ def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
+ super().__init__(cfg, ch, nc, anchors)
- def _apply(self, fn):
- # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
- self = super()._apply(fn)
- m = self.model[-1] # Detect()
- if isinstance(m, Detect):
- m.stride = fn(m.stride)
- m.grid = list(map(fn, m.grid))
- if isinstance(m.anchor_grid, list):
- m.anchor_grid = list(map(fn, m.anchor_grid))
- return self
+
+class ClassificationModel(BaseModel):
+ # YOLOv5 classification model
+ def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
+ super().__init__()
+ self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
+
+ def _from_detection_model(self, model, nc=1000, cutoff=10):
+ # Create a YOLOv5 classification model from a YOLOv5 detection model
+ if isinstance(model, DetectMultiBackend):
+ model = model.model # unwrap DetectMultiBackend
+ model.model = model.model[:cutoff] # backbone
+ m = model.model[-1] # last layer
+ ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
+ c = Classify(ch, nc) # Classify()
+ c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
+ model.model[-1] = c # replace
+ self.model = model.model
+ self.stride = model.stride
+ self.save = []
+ self.nc = nc
+
+ def _from_yaml(self, cfg):
+ # Create a YOLOv5 classification model from a *.yaml file
+ self.model = None
def parse_model(d, ch): # model_dict, input_channels(3)
+ # Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
- anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
+ anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
+ if act:
+ Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU()
+ LOGGER.info(f"{colorstr('activation:')} {act}") # print
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -261,24 +313,28 @@ def parse_model(d, ch): # model_dict, input_channels(3)
args[j] = eval(a) if isinstance(a, str) else a # eval strings
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
- if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
- BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x):
+ if m in {
+ Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
+ BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
- if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]:
+ if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
- elif m is Detect:
+ # TODO: channel, gw, gd
+ elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
+ if m is Segment:
+ args[3] = make_divisible(args[3] * gw, 8)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
@@ -318,7 +374,7 @@ def parse_model(d, ch): # model_dict, input_channels(3)
# Options
if opt.line_profile: # profile layer by layer
- _ = model(im, profile=True)
+ model(im, profile=True)
elif opt.profile: # profile forward-backward
results = profile(input=im, ops=[model], n=3)
diff --git a/yolov5/segment/__init__.py b/yolov5/segment/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/yolov5/segment/predict.py b/yolov5/segment/predict.py
new file mode 100644
index 0000000..8d4d3a4
--- /dev/null
+++ b/yolov5/segment/predict.py
@@ -0,0 +1,276 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
+
+Usage - sources:
+ $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
+ img.jpg # image
+ vid.mp4 # video
+ path/ # directory
+ 'path/*.jpg' # glob
+ 'https://youtu.be/Zgi9g1ksQHc' # YouTube
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+ $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
+ yolov5s-seg.torchscript # TorchScript
+ yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s-seg.xml # OpenVINO
+ yolov5s-seg.engine # TensorRT
+ yolov5s-seg.mlmodel # CoreML (macOS-only)
+ yolov5s-seg_saved_model # TensorFlow SavedModel
+ yolov5s-seg.pb # TensorFlow GraphDef
+ yolov5s-seg.tflite # TensorFlow Lite
+ yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s-seg_paddle_model # PaddlePaddle
+"""
+
+import argparse
+import os
+import platform
+from pathlib import Path
+
+import torch
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+from yolov5.models.common import DetectMultiBackend
+from yolov5.utils.dataloaders import (IMG_FORMATS, VID_FORMATS, LoadImages,
+ LoadStreams)
+from yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size,
+ check_imshow, check_requirements, colorstr,
+ cv2, increment_path, non_max_suppression,
+ print_args, scale_coords, strip_optimizer,
+ xyxy2xywh)
+from yolov5.utils.plots import Annotator, colors, save_one_box
+from yolov5.utils.segment.general import process_mask
+from yolov5.utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+ weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s)
+ source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
+ imgsz=None, # inference size (pixels)
+ img=None, # inference size (pixels)
+ conf_thres=0.25, # confidence threshold
+ iou_thres=0.45, # NMS IOU threshold
+ max_det=1000, # maximum detections per image
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
+ view_img=False, # show results
+ save_txt=False, # save results to *.txt
+ save_conf=False, # save confidences in --save-txt labels
+ save_crop=False, # save cropped prediction boxes
+ nosave=False, # do not save images/videos
+ classes=None, # filter by class: --class 0, or --class 0 2 3
+ agnostic_nms=False, # class-agnostic NMS
+ augment=False, # augmented inference
+ visualize=False, # visualize features
+ update=False, # update all models
+ project=ROOT / 'runs/predict-seg', # save results to project/name
+ name='exp', # save results to project/name
+ exist_ok=False, # existing project/name ok, do not increment
+ line_thickness=3, # bounding box thickness (pixels)
+ hide_labels=False, # hide labels
+ hide_conf=False, # hide confidences
+ half=False, # use FP16 half-precision inference
+ dnn=False, # use OpenCV DNN for ONNX inference
+ vid_stride=1, # video frame-rate stride
+ retina_masks=False,
+):
+ source = str(source)
+ save_img = not nosave and not source.endswith('.txt') # save inference images
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+ is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+ webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
+ if is_url and is_file:
+ source = check_file(source) # download
+
+ if imgsz is None and img is None:
+ imgsz = 640
+ elif img is not None:
+ imgsz = img
+
+ if isinstance(imgsz, int):
+ imgsz = [imgsz, imgsz]
+
+ # Directories
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
+
+ # Load model
+ device = select_device(device)
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+ stride, names, pt = model.stride, model.names, model.pt
+ imgsz = check_img_size(imgsz, s=stride) # check image size
+
+ # Dataloader
+ if webcam:
+ view_img = check_imshow()
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+ bs = len(dataset) # batch_size
+ else:
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+ bs = 1 # batch_size
+ vid_path, vid_writer = [None] * bs, [None] * bs
+
+ # Run inference
+ model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+ for path, im, im0s, vid_cap, s in dataset:
+ with dt[0]:
+ im = torch.from_numpy(im).to(device)
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
+ im /= 255 # 0 - 255 to 0.0 - 1.0
+ if len(im.shape) == 3:
+ im = im[None] # expand for batch dim
+
+ # Inference
+ with dt[1]:
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
+ pred, proto = model(im, augment=augment, visualize=visualize)[:2]
+
+ # NMS
+ with dt[2]:
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
+
+ # Second-stage classifier (optional)
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+
+ # Process predictions
+ for i, det in enumerate(pred): # per image
+ seen += 1
+ if webcam: # batch_size >= 1
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
+ s += f'{i}: '
+ else:
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+ p = Path(p) # to Path
+ save_path = str(save_dir / p.name) # im.jpg
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
+ s += '%gx%g ' % im.shape[2:] # print string
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
+ imc = im0.copy() if save_crop else im0 # for save_crop
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
+ if len(det):
+ masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
+
+ # Rescale boxes from img_size to im0 size
+ det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
+
+ # Print results
+ for c in det[:, 5].unique():
+ n = (det[:, 5] == c).sum() # detections per class
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
+
+ # Mask plotting
+ annotator.masks(masks,
+ colors=[colors(x, True) for x in det[:, 5]],
+ im_gpu=None if retina_masks else im[i])
+
+ # Write results
+ for *xyxy, conf, cls in reversed(det[:, :6]):
+ if save_txt: # Write to file
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
+ with open(f'{txt_path}.txt', 'a') as f:
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+ if save_img or save_crop or view_img: # Add bbox to image
+ c = int(cls) # integer class
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
+ annotator.box_label(xyxy, label, color=colors(c, True))
+ if save_crop:
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
+
+ # Stream results
+ im0 = annotator.result()
+ if view_img:
+ if platform.system() == 'Linux' and p not in windows:
+ windows.append(p)
+ cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
+ cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
+ cv2.imshow(str(p), im0)
+ if cv2.waitKey(1) == ord('q'): # 1 millisecond
+ exit()
+
+ # Save results (image with detections)
+ if save_img:
+ if dataset.mode == 'image':
+ cv2.imwrite(save_path, im0)
+ else: # 'video' or 'stream'
+ if vid_path[i] != save_path: # new video
+ vid_path[i] = save_path
+ if isinstance(vid_writer[i], cv2.VideoWriter):
+ vid_writer[i].release() # release previous video writer
+ if vid_cap: # video
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ else: # stream
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
+ save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
+ vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
+ vid_writer[i].write(im0)
+
+ # Print time (inference-only)
+ LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
+
+ # Print results
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
+ if save_txt or save_img:
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+ if update:
+ strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
+
+
+def parse_opt():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
+ parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam')
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
+ parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--view-img', action='store_true', help='show results')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--visualize', action='store_true', help='visualize features')
+ parser.add_argument('--update', action='store_true', help='update all models')
+ parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
+ parser.add_argument('--name', default='exp', help='save results to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
+ parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
+ parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
+ parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
+ opt = parser.parse_args()
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
+ print_args(vars(opt))
+ return opt
+
+
+def main(opt):
+ check_requirements(exclude=('tensorboard', 'thop'))
+ run(**vars(opt))
+
+
+if __name__ == "__main__":
+ opt = parse_opt()
+ main(opt)
diff --git a/yolov5/segment/train.py b/yolov5/segment/train.py
new file mode 100644
index 0000000..a1152d4
--- /dev/null
+++ b/yolov5/segment/train.py
@@ -0,0 +1,697 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Train a YOLOv5 segment model on a segment dataset
+Models and datasets download automatically from the latest YOLOv5 release.
+
+Usage - Single-GPU training:
+ $ yolov5 segment train --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended)
+ $ yolov5 segment train --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch
+
+Usage - Multi-GPU DDP training:
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
+
+Models: https://github.com/ultralytics/yolov5/tree/master/models
+Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
+"""
+
+import argparse
+import math
+import os
+import random
+import sys
+import time
+from copy import deepcopy
+from datetime import datetime
+from pathlib import Path
+
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+import yaml
+from torch.optim import lr_scheduler
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+import torch.nn.functional as F
+import yolov5.segment.val as validate # for end-of-epoch mAP
+from yolov5.models.experimental import attempt_load
+from yolov5.models.yolo import SegmentationModel
+from yolov5.utils.autoanchor import check_anchors
+from yolov5.utils.autobatch import check_train_batch_size
+from yolov5.utils.callbacks import Callbacks
+from yolov5.utils.downloads import attempt_download, is_url
+from yolov5.utils.general import (LOGGER, check_amp, check_dataset, check_file,
+ check_git_status, check_img_size,
+ check_requirements, check_suffix, check_yaml,
+ colorstr, get_latest_run, increment_path,
+ init_seeds, intersect_dicts,
+ labels_to_class_weights,
+ labels_to_image_weights, one_cycle,
+ print_args, print_mutation, strip_optimizer,
+ yaml_save, yolov5_in_syspath)
+from yolov5.utils.loggers import GenericLogger
+from yolov5.utils.plots import plot_evolve, plot_labels
+from yolov5.utils.segment.dataloaders import create_dataloader
+from yolov5.utils.segment.loss import ComputeLoss
+from yolov5.utils.segment.metrics import KEYS, fitness
+from yolov5.utils.segment.plots import (plot_images_and_masks,
+ plot_results_with_masks)
+from yolov5.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel,
+ select_device, smart_DDP,
+ smart_optimizer, smart_resume,
+ torch_distributed_zero_first)
+
+LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
+RANK = int(os.getenv('RANK', -1))
+WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
+
+
+def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio
+ # callbacks.run('on_pretrain_routine_start')
+
+ # Directories
+ w = save_dir / 'weights' # weights dir
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
+ last, best = w / 'last.pt', w / 'best.pt'
+
+ # Hyperparameters
+ if isinstance(hyp, str):
+ with open(hyp, errors='ignore') as f:
+ hyp = yaml.safe_load(f) # load hyps dict
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
+
+ # Save run settings
+ if not evolve:
+ yaml_save(save_dir / 'hyp.yaml', hyp)
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
+
+ # Loggers
+ data_dict = None
+ if RANK in {-1, 0}:
+ logger = GenericLogger(opt=opt, console_logger=LOGGER)
+ # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
+ # if loggers.clearml:
+ # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML
+ # if loggers.wandb:
+ # data_dict = loggers.wandb.data_dict
+ # if resume:
+ # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
+ #
+ # # Register actions
+ # for k in methods(loggers):
+ # callbacks.register_action(k, callback=getattr(loggers, k))
+
+ # Config
+ plots = not evolve and not opt.noplots # create plots
+ overlap = not opt.no_overlap
+ cuda = device.type != 'cpu'
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
+ with torch_distributed_zero_first(LOCAL_RANK):
+ data_dict = data_dict or check_dataset(data) # check if None
+ train_path, val_path = data_dict['train'], data_dict['val']
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
+ is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
+
+ # Model
+ check_suffix(weights, '.pt') # check weights
+ pretrained = weights.endswith('.pt')
+ if pretrained:
+ with torch_distributed_zero_first(LOCAL_RANK):
+ weights = attempt_download(weights) # download if not found locally
+ with yolov5_in_syspath():
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
+ model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
+ model.load_state_dict(csd, strict=False) # load
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
+ else:
+ model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
+ amp = check_amp(model) # check AMP
+
+ # Freeze
+ if isinstance(freeze, int):
+ freeze = [freeze]
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
+ for k, v in model.named_parameters():
+ v.requires_grad = True # train all layers
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
+ if any(x in k for x in freeze):
+ LOGGER.info(f'freezing {k}')
+ v.requires_grad = False
+
+ # Image size
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
+
+ # Batch size
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
+ batch_size = check_train_batch_size(model, imgsz, amp)
+ logger.update_params({"batch_size": batch_size})
+ # loggers.on_params_update({"batch_size": batch_size})
+
+ # Optimizer
+ nbs = 64 # nominal batch size
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
+
+ # Scheduler
+ if opt.cos_lr:
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
+ else:
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
+
+ # EMA
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
+
+ # Resume
+ best_fitness, start_epoch = 0.0, 0
+ if pretrained:
+ if resume:
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
+ del ckpt, csd
+
+ # DP mode
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
+ LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
+ 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
+ model = torch.nn.DataParallel(model)
+
+ # SyncBatchNorm
+ if opt.sync_bn and cuda and RANK != -1:
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
+ LOGGER.info('Using SyncBatchNorm()')
+
+ # Trainloader
+ train_loader, dataset = create_dataloader(
+ train_path,
+ imgsz,
+ batch_size // WORLD_SIZE,
+ gs,
+ single_cls,
+ hyp=hyp,
+ augment=True,
+ cache=None if opt.cache == 'val' else opt.cache,
+ rect=opt.rect,
+ rank=LOCAL_RANK,
+ workers=workers,
+ image_weights=opt.image_weights,
+ quad=opt.quad,
+ prefix=colorstr('train: '),
+ shuffle=True,
+ mask_downsample_ratio=mask_ratio,
+ overlap_mask=overlap,
+ )
+ labels = np.concatenate(dataset.labels, 0)
+ mlc = int(labels[:, 0].max()) # max label class
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
+
+ # Process 0
+ if RANK in {-1, 0}:
+ val_loader = create_dataloader(val_path,
+ imgsz,
+ batch_size // WORLD_SIZE * 2,
+ gs,
+ single_cls,
+ hyp=hyp,
+ cache=None if noval else opt.cache,
+ rect=True,
+ rank=-1,
+ workers=workers * 2,
+ pad=0.5,
+ mask_downsample_ratio=mask_ratio,
+ overlap_mask=overlap,
+ prefix=colorstr('val: '))[0]
+
+ if not resume:
+ if not opt.noautoanchor:
+ check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
+ model.half().float() # pre-reduce anchor precision
+
+ if plots:
+ plot_labels(labels, names, save_dir)
+ # callbacks.run('on_pretrain_routine_end', labels, names)
+
+ # DDP mode
+ if cuda and RANK != -1:
+ model = smart_DDP(model)
+
+ # Model attributes
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
+ hyp['box'] *= 3 / nl # scale to layers
+ hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
+ hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
+ hyp['label_smoothing'] = opt.label_smoothing
+ model.nc = nc # attach number of classes to model
+ model.hyp = hyp # attach hyperparameters to model
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
+ model.names = names
+
+ # Start training
+ t0 = time.time()
+ nb = len(train_loader) # number of batches
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
+ last_opt_step = -1
+ maps = np.zeros(nc) # mAP per class
+ results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
+ scheduler.last_epoch = start_epoch - 1 # do not move
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
+ stopper, stop = EarlyStopping(patience=opt.patience), False
+ compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
+ # callbacks.run('on_train_start')
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
+ f"Logging results to {colorstr('bold', save_dir)}\n"
+ f'Starting training for {epochs} epochs...')
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
+ # callbacks.run('on_train_epoch_start')
+ model.train()
+
+ # Update image weights (optional, single-GPU only)
+ if opt.image_weights:
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
+
+ # Update mosaic border (optional)
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
+
+ mloss = torch.zeros(4, device=device) # mean losses
+ if RANK != -1:
+ train_loader.sampler.set_epoch(epoch)
+ pbar = enumerate(train_loader)
+ LOGGER.info(('\n' + '%11s' * 8) %
+ ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
+ if RANK in {-1, 0}:
+ pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
+ optimizer.zero_grad()
+ for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------
+ # callbacks.run('on_train_batch_start')
+ ni = i + nb * epoch # number integrated batches (since train start)
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
+
+ # Warmup
+ if ni <= nw:
+ xi = [0, nw] # x interp
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
+ for j, x in enumerate(optimizer.param_groups):
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
+ if 'momentum' in x:
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
+
+ # Multi-scale
+ if opt.multi_scale:
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
+ sf = sz / max(imgs.shape[2:]) # scale factor
+ if sf != 1:
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
+
+ # Forward
+ with torch.cuda.amp.autocast(amp):
+ pred = model(imgs) # forward
+ loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
+ if RANK != -1:
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
+ if opt.quad:
+ loss *= 4.
+
+ # Backward
+ scaler.scale(loss).backward()
+
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
+ if ni - last_opt_step >= accumulate:
+ scaler.unscale_(optimizer) # unscale gradients
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
+ scaler.step(optimizer) # optimizer.step
+ scaler.update()
+ optimizer.zero_grad()
+ if ema:
+ ema.update(model)
+ last_opt_step = ni
+
+ # Log
+ if RANK in {-1, 0}:
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 6) %
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
+ # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
+ # if callbacks.stop_training:
+ # return
+
+ # Mosaic plots
+ if plots:
+ if ni < 3:
+ plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg")
+ if ni == 10:
+ files = sorted(save_dir.glob('train*.jpg'))
+ logger.log_images(files, "Mosaics", epoch)
+ # end batch ------------------------------------------------------------------------------------------------
+
+ # Scheduler
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
+ scheduler.step()
+
+ if RANK in {-1, 0}:
+ # mAP
+ # callbacks.run('on_train_epoch_end', epoch=epoch)
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
+ if not noval or final_epoch: # Calculate mAP
+ results, maps, _ = validate.run(data_dict,
+ batch_size=batch_size // WORLD_SIZE * 2,
+ imgsz=imgsz,
+ half=amp,
+ model=ema.ema,
+ single_cls=single_cls,
+ dataloader=val_loader,
+ save_dir=save_dir,
+ plots=False,
+ callbacks=callbacks,
+ compute_loss=compute_loss,
+ mask_downsample_ratio=mask_ratio,
+ overlap=overlap)
+
+ # Update best mAP
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
+ if fi > best_fitness:
+ best_fitness = fi
+ log_vals = list(mloss) + list(results) + lr
+ # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
+ # Log val metrics and media
+ metrics_dict = dict(zip(KEYS, log_vals))
+ logger.log_metrics(metrics_dict, epoch)
+
+ # Save model
+ if (not nosave) or (final_epoch and not evolve): # if save
+ ckpt = {
+ 'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'model': deepcopy(de_parallel(model)).half(),
+ 'ema': deepcopy(ema.ema).half(),
+ 'updates': ema.updates,
+ 'optimizer': optimizer.state_dict(),
+ # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
+ 'opt': vars(opt),
+ 'date': datetime.now().isoformat()}
+
+ # Save last, best and delete
+ torch.save(ckpt, last)
+ if best_fitness == fi:
+ torch.save(ckpt, best)
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
+ logger.log_model(w / f'epoch{epoch}.pt')
+ del ckpt
+ # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
+
+ # EarlyStopping
+ if RANK != -1: # if DDP training
+ broadcast_list = [stop if RANK == 0 else None]
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
+ if RANK != 0:
+ stop = broadcast_list[0]
+ if stop:
+ break # must break all DDP ranks
+
+ # end epoch ----------------------------------------------------------------------------------------------------
+ # end training -----------------------------------------------------------------------------------------------------
+ if RANK in {-1, 0}:
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
+ for f in last, best:
+ if f.exists():
+ strip_optimizer(f) # strip optimizers
+ if f is best:
+ LOGGER.info(f'\nValidating {f}...')
+ results, _, _ = validate.run(
+ data_dict,
+ batch_size=batch_size // WORLD_SIZE * 2,
+ imgsz=imgsz,
+ model=attempt_load(f, device).half(),
+ iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
+ single_cls=single_cls,
+ dataloader=val_loader,
+ save_dir=save_dir,
+ save_json=is_coco,
+ verbose=True,
+ plots=plots,
+ callbacks=callbacks,
+ compute_loss=compute_loss,
+ mask_downsample_ratio=mask_ratio,
+ overlap=overlap) # val best model with plots
+ if is_coco:
+ # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
+ metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
+ logger.log_metrics(metrics_dict, epoch)
+
+ # callbacks.run('on_train_end', last, best, epoch, results)
+ # on train end callback using genericLogger
+ logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
+ if not opt.evolve:
+ logger.log_model(best, epoch)
+ if plots:
+ plot_results_with_masks(file=save_dir / 'results.csv') # save results.png
+ files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
+ files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
+ logger.log_images(files, "Results", epoch + 1)
+ logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1)
+ torch.cuda.empty_cache()
+ return results
+
+
+def parse_opt(known=False):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path')
+ parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
+ parser.add_argument('--epochs', type=int, default=300, help='total training epochs')
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+ parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
+
+ # Instance Segmentation Args
+ parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory')
+ parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP')
+
+ # Neptune AI arguments
+ parser.add_argument('--neptune_token', type=str, default=None, help='neptune.ai api token')
+ parser.add_argument('--neptune_project', type=str, default=None, help='https://docs.neptune.ai/api-reference/neptune')
+
+ # Weights & Biases arguments
+ # parser.add_argument('--entity', default=None, help='W&B: Entity')
+ # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
+ # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
+ # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
+
+ return parser.parse_known_args()[0] if known else parser.parse_args()
+
+
+def main(opt, callbacks=Callbacks()):
+ # Checks
+ if RANK in {-1, 0}:
+ print_args(vars(opt))
+ check_git_status()
+ check_requirements()
+
+ # Resume
+ if opt.resume and not opt.evolve: # resume from specified or most recent last.pt
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
+ opt_data = opt.data # original dataset
+ if opt_yaml.is_file():
+ with open(opt_yaml, errors='ignore') as f:
+ d = yaml.safe_load(f)
+ else:
+ d = torch.load(last, map_location='cpu')['opt']
+ opt = argparse.Namespace(**d) # replace
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
+ if is_url(opt_data):
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
+ else:
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
+ if opt.evolve:
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
+ opt.project = str(ROOT / 'runs/evolve')
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
+ if opt.name == 'cfg':
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
+
+ # DDP mode
+ device = select_device(opt.device, batch_size=opt.batch_size)
+ if LOCAL_RANK != -1:
+ msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
+ assert not opt.image_weights, f'--image-weights {msg}'
+ assert not opt.evolve, f'--evolve {msg}'
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
+ torch.cuda.set_device(LOCAL_RANK)
+ device = torch.device('cuda', LOCAL_RANK)
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
+
+ # Train
+ if not opt.evolve:
+ train(opt.hyp, opt, device, callbacks)
+
+ # Evolve hyperparameters (optional)
+ else:
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
+ meta = {
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
+ 'box': (1, 0.02, 0.2), # box loss gain
+ 'cls': (1, 0.2, 4.0), # cls loss gain
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
+
+ with open(opt.hyp, errors='ignore') as f:
+ hyp = yaml.safe_load(f) # load hyps dict
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
+ hyp['anchors'] = 3
+ if opt.noautoanchor:
+ del hyp['anchors'], meta['anchors']
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
+ if opt.bucket:
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
+
+ for _ in range(opt.evolve): # generations to evolve
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
+ # Select parent(s)
+ parent = 'single' # parent selection method: 'single' or 'weighted'
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
+ n = min(5, len(x)) # number of previous results to consider
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
+ if parent == 'single' or len(x) == 1:
+ # x = x[random.randint(0, n - 1)] # random selection
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
+ elif parent == 'weighted':
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
+
+ # Mutate
+ mp, s = 0.8, 0.2 # mutation probability, sigma
+ npr = np.random
+ npr.seed(int(time.time()))
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
+ ng = len(meta)
+ v = np.ones(ng)
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
+
+ # Constrain to limits
+ for k, v in meta.items():
+ hyp[k] = max(hyp[k], v[1]) # lower limit
+ hyp[k] = min(hyp[k], v[2]) # upper limit
+ hyp[k] = round(hyp[k], 5) # significant digits
+
+ # Train mutation
+ results = train(hyp.copy(), opt, device, callbacks)
+ callbacks = Callbacks()
+ # Write mutation results
+ print_mutation(results, hyp.copy(), save_dir, opt.bucket)
+
+ # Plot results
+ plot_evolve(evolve_csv)
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
+ f"Results saved to {colorstr('bold', save_dir)}\n"
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
+
+
+def run(**kwargs):
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
+ opt = parse_opt(True)
+ for k, v in kwargs.items():
+ setattr(opt, k, v)
+ main(opt)
+ return opt
+
+
+def run_cli(**kwargs):
+ '''
+ To be called from yolov5.cli
+ '''
+ opt = parse_opt(True)
+ for k, v in kwargs.items():
+ setattr(opt, k, v)
+ main(opt)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/yolov5/segment/val.py b/yolov5/segment/val.py
new file mode 100644
index 0000000..3653741
--- /dev/null
+++ b/yolov5/segment/val.py
@@ -0,0 +1,484 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Validate a trained YOLOv5 segment model on a segment dataset
+
+Usage:
+ $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
+ $ yolov5 segment val --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments
+
+Usage - formats:
+ $ yolov5 segment val --weights yolov5s-seg.pt # PyTorch
+ yolov5s-seg.torchscript # TorchScript
+ yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s-seg.xml # OpenVINO
+ yolov5s-seg.engine # TensorRT
+ yolov5s-seg.mlmodel # CoreML (macOS-only)
+ yolov5s-seg_saved_model # TensorFlow SavedModel
+ yolov5s-seg.pb # TensorFlow GraphDef
+ yolov5s-seg.tflite # TensorFlow Lite
+ yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s-seg_paddle_model # PaddlePaddle
+"""
+
+import argparse
+import json
+import os
+import sys
+from multiprocessing.pool import ThreadPool
+from pathlib import Path
+
+import numpy as np
+import torch
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLOv5 root directory
+ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
+
+import torch.nn.functional as F
+from yolov5.models.common import DetectMultiBackend
+from yolov5.models.yolo import SegmentationModel
+from yolov5.utils.callbacks import Callbacks
+from yolov5.utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset,
+ check_img_size, check_requirements,
+ check_yaml, coco80_to_coco91_class, colorstr,
+ increment_path, non_max_suppression,
+ print_args, scale_coords, xywh2xyxy,
+ xyxy2xywh)
+from yolov5.utils.metrics import ConfusionMatrix, box_iou
+from yolov5.utils.plots import output_to_target, plot_val_study
+from yolov5.utils.segment.dataloaders import create_dataloader
+from yolov5.utils.segment.general import (mask_iou, process_mask,
+ process_mask_upsample, scale_image)
+from yolov5.utils.segment.metrics import Metrics, ap_per_class_box_and_mask
+from yolov5.utils.segment.plots import plot_images_and_masks
+from yolov5.utils.torch_utils import (de_parallel, select_device,
+ smart_inference_mode)
+
+
+def save_one_txt(predn, save_conf, shape, file):
+ # Save one txt result
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
+ for *xyxy, conf, cls in predn.tolist():
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
+ with open(file, 'a') as f:
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+
+def save_one_json(predn, jdict, path, class_map, pred_masks):
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
+ from pycocotools.mask import encode
+
+ def single_encode(x):
+ rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
+ rle["counts"] = rle["counts"].decode("utf-8")
+ return rle
+
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
+ box = xyxy2xywh(predn[:, :4]) # xywh
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
+ pred_masks = np.transpose(pred_masks, (2, 0, 1))
+ with ThreadPool(NUM_THREADS) as pool:
+ rles = pool.map(single_encode, pred_masks)
+ for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
+ jdict.append({
+ 'image_id': image_id,
+ 'category_id': class_map[int(p[5])],
+ 'bbox': [round(x, 3) for x in b],
+ 'score': round(p[4], 5),
+ 'segmentation': rles[i]})
+
+
+def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
+ """
+ Return correct prediction matrix
+ Arguments:
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
+ labels (array[M, 5]), class, x1, y1, x2, y2
+ Returns:
+ correct (array[N, 10]), for 10 IoU levels
+ """
+ if masks:
+ if overlap:
+ nl = len(labels)
+ index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
+ gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
+ gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
+ if gt_masks.shape[1:] != pred_masks.shape[1:]:
+ gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0]
+ gt_masks = gt_masks.gt_(0.5)
+ iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
+ else: # boxes
+ iou = box_iou(labels[:, 1:], detections[:, :4])
+
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
+ correct_class = labels[:, 0:1] == detections[:, 5]
+ for i in range(len(iouv)):
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
+ if x[0].shape[0]:
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
+ if x[0].shape[0] > 1:
+ matches = matches[matches[:, 2].argsort()[::-1]]
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
+ # matches = matches[matches[:, 2].argsort()[::-1]]
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
+ correct[matches[:, 1].astype(int), i] = True
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
+
+
+@smart_inference_mode()
+def run(
+ data,
+ weights=None, # model.pt path(s)
+ batch_size=None, # batch size
+ batch=None, # batch size
+ imgsz=None, # inference size (pixels)
+ img=None, # inference size (pixels)
+ conf_thres=0.001, # confidence threshold
+ iou_thres=0.6, # NMS IoU threshold
+ max_det=300, # maximum detections per image
+ task='val', # train, val, test, speed or study
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
+ workers=8, # max dataloader workers (per RANK in DDP mode)
+ single_cls=False, # treat as single-class dataset
+ augment=False, # augmented inference
+ verbose=False, # verbose output
+ save_txt=False, # save results to *.txt
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
+ save_conf=False, # save confidences in --save-txt labels
+ save_json=False, # save a COCO-JSON results file
+ project='runs/val-seg', # save to project/name
+ name='exp', # save to project/name
+ exist_ok=False, # existing project/name ok, do not increment
+ half=True, # use FP16 half-precision inference
+ dnn=False, # use OpenCV DNN for ONNX inference
+ model=None,
+ dataloader=None,
+ save_dir=Path(''),
+ plots=True,
+ overlap=False,
+ mask_downsample_ratio=1,
+ compute_loss=None,
+ callbacks=Callbacks(),
+):
+ if save_json:
+ check_requirements(['pycocotools'])
+ process = process_mask_upsample # more accurate
+ else:
+ process = process_mask # faster
+
+ if imgsz is None and img is None:
+ imgsz = 640
+ elif img is not None:
+ imgsz = img
+ if batch_size is None and batch is None:
+ batch_size = 32
+ elif batch is not None:
+ batch_size = batch
+
+ # Initialize/load model and set device
+ training = model is not None
+ if training: # called by train.py
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
+ half &= device.type != 'cpu' # half precision only supported on CUDA
+ model.half() if half else model.float()
+ nm = de_parallel(model).model[-1].nm # number of masks
+ else: # called directly
+ device = select_device(device, batch_size=batch_size)
+
+ # Directories
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
+
+ # Load model
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
+ imgsz = check_img_size(imgsz, s=stride) # check image size
+ half = model.fp16 # FP16 supported on limited backends with CUDA
+ nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
+ if engine:
+ batch_size = model.batch_size
+ else:
+ device = model.device
+ if not (pt or jit):
+ batch_size = 1 # export.py models default to batch-size 1
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
+
+ # Data
+ data = check_dataset(data) # check
+
+ # Configure
+ model.eval()
+ cuda = device.type != 'cpu'
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
+ nc = 1 if single_cls else int(data['nc']) # number of classes
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
+ niou = iouv.numel()
+
+ # Dataloader
+ if not training:
+ if pt and not single_cls: # check --weights are trained on --data
+ ncm = model.model.nc
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
+ pad = 0.0 if task in ('speed', 'benchmark') else 0.5
+ rect = False if task == 'benchmark' else pt # square inference for benchmarks
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
+ dataloader = create_dataloader(data[task],
+ imgsz,
+ batch_size,
+ stride,
+ single_cls,
+ pad=pad,
+ rect=rect,
+ workers=workers,
+ prefix=colorstr(f'{task}: '),
+ overlap_mask=overlap,
+ mask_downsample_ratio=mask_downsample_ratio)[0]
+
+ seen = 0
+ confusion_matrix = ConfusionMatrix(nc=nc)
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
+ if isinstance(names, (list, tuple)): # old format
+ names = dict(enumerate(names))
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
+ s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R",
+ "mAP50", "mAP50-95)")
+ dt = Profile(), Profile(), Profile()
+ metrics = Metrics()
+ loss = torch.zeros(4, device=device)
+ jdict, stats = [], []
+ # callbacks.run('on_val_start')
+ pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
+ for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
+ # callbacks.run('on_val_batch_start')
+ with dt[0]:
+ if cuda:
+ im = im.to(device, non_blocking=True)
+ targets = targets.to(device)
+ masks = masks.to(device)
+ masks = masks.float()
+ im = im.half() if half else im.float() # uint8 to fp16/32
+ im /= 255 # 0 - 255 to 0.0 - 1.0
+ nb, _, height, width = im.shape # batch size, channels, height, width
+
+ # Inference
+ with dt[1]:
+ preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
+
+ # Loss
+ if compute_loss:
+ loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
+
+ # NMS
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
+ with dt[2]:
+ preds = non_max_suppression(preds,
+ conf_thres,
+ iou_thres,
+ labels=lb,
+ multi_label=True,
+ agnostic=single_cls,
+ max_det=max_det,
+ nm=nm)
+
+ # Metrics
+ plot_masks = [] # masks for plotting
+ for si, (pred, proto) in enumerate(zip(preds, protos)):
+ labels = targets[targets[:, 0] == si, 1:]
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
+ path, shape = Path(paths[si]), shapes[si][0]
+ correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
+ correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
+ seen += 1
+
+ if npr == 0:
+ if nl:
+ stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
+ if plots:
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
+ continue
+
+ # Masks
+ midx = [si] if overlap else targets[:, 0] == si
+ gt_masks = masks[midx]
+ pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
+
+ # Predictions
+ if single_cls:
+ pred[:, 5] = 0
+ predn = pred.clone()
+ scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
+
+ # Evaluate
+ if nl:
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
+ scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
+ correct_bboxes = process_batch(predn, labelsn, iouv)
+ correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
+ if plots:
+ confusion_matrix.process_batch(predn, labelsn)
+ stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
+
+ pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
+ if plots and batch_i < 3:
+ plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot
+
+ # Save/log
+ if save_txt:
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
+ if save_json:
+ pred_masks = scale_image(im[si].shape[1:],
+ pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
+ save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
+ # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
+
+ # Plot images
+ if plots and batch_i < 3:
+ if len(plot_masks):
+ plot_masks = torch.cat(plot_masks, dim=0)
+ plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
+ plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
+ save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
+
+ # callbacks.run('on_val_batch_end')
+
+ # Compute metrics
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
+ if len(stats) and stats[0].any():
+ results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
+ metrics.update(results)
+ nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
+
+ # Print results
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
+ LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results()))
+ if nt.sum() == 0:
+ LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')
+
+ # Print results per class
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
+ for i, c in enumerate(metrics.ap_class_index):
+ LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
+
+ # Print speeds
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
+ if not training:
+ shape = (batch_size, 3, imgsz, imgsz)
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
+
+ # Plots
+ if plots:
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
+ # callbacks.run('on_val_end')
+
+ mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
+
+ # Save JSON
+ if save_json and len(jdict):
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
+ with open(pred_json, 'w') as f:
+ json.dump(jdict, f)
+
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
+ from pycocotools.coco import COCO
+ from pycocotools.cocoeval import COCOeval
+
+ anno = COCO(anno_json) # init annotations api
+ pred = anno.loadRes(pred_json) # init predictions api
+ results = []
+ for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
+ if is_coco:
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
+ eval.evaluate()
+ eval.accumulate()
+ eval.summarize()
+ results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5)
+ map_bbox, map50_bbox, map_mask, map50_mask = results
+ except Exception as e:
+ LOGGER.info(f'pycocotools unable to run: {e}')
+
+ # Return results
+ model.float() # for training
+ if not training:
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+ final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
+ return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
+
+
+def parse_opt():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s-seg.pt', help='model path(s)')
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
+ parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+ opt = parser.parse_args()
+ opt.data = check_yaml(opt.data) # check YAML
+ # opt.save_json |= opt.data.endswith('coco.yaml')
+ opt.save_txt |= opt.save_hybrid
+ print_args(vars(opt))
+ return opt
+
+
+def main(opt):
+ opt = parse_opt()
+ check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+
+ if opt.task in ('train', 'val', 'test'): # run normally
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
+ LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️')
+ if opt.save_hybrid:
+ LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️')
+ run(**vars(opt))
+
+ else:
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
+ opt.half = True # FP16 for fastest results
+ if opt.task == 'speed': # speed benchmarks
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
+ for opt.weights in weights:
+ run(**vars(opt), plots=False)
+
+ elif opt.task == 'study': # speed vs mAP benchmarks
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
+ for opt.weights in weights:
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
+ for opt.imgsz in x: # img-size
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
+ r, _, t = run(**vars(opt), plots=False)
+ y.append(r + t) # results and times
+ np.savetxt(f, y, fmt='%10.4g') # save
+ os.system('zip -r study.zip study_*.txt')
+ plot_val_study(x=x) # plot
+
+
+if __name__ == "__main__":
+ main()
diff --git a/yolov5/train.py b/yolov5/train.py
index 89cc664..56b658c 100644
--- a/yolov5/train.py
+++ b/yolov5/train.py
@@ -1,15 +1,18 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Train a YOLOv5 model on a custom dataset.
-
Models and datasets download automatically from the latest YOLOv5 release.
-Models: https://github.com/ultralytics/yolov5/tree/master/models
-Datasets: https://github.com/ultralytics/yolov5/tree/master/data
-Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
-Usage:
- $ yolov5 train --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED)
+Usage - Single-GPU training:
+ $ yolov5 train --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
$ yolov5 train --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
+
+Usage - Multi-GPU DDP training:
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3
+
+Models: https://github.com/ultralytics/yolov5/tree/master/models
+Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
"""
import argparse
@@ -34,7 +37,7 @@
ROOT = FILE.parents[0] # YOLOv5 root directory
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-import yolov5.val as val # for end-of-epoch mAP
+import yolov5.val as validate # for end-of-epoch mAP
from yolov5.models.experimental import attempt_load
from yolov5.models.yolo import Model
from yolov5.utils.autoanchor import check_anchors
@@ -51,12 +54,13 @@
labels_to_class_weights,
labels_to_image_weights, methods, one_cycle,
print_args, print_mutation, strip_optimizer,
- yolov5_in_syspath)
+ yaml_save, yolov5_in_syspath)
from yolov5.utils.loggers import Loggers
+from yolov5.utils.loggers.comet.comet_utils import check_comet_resume
from yolov5.utils.loggers.wandb.wandb_utils import check_wandb_resume
from yolov5.utils.loss import ComputeLoss
from yolov5.utils.metrics import fitness
-from yolov5.utils.plots import plot_evolve, plot_labels
+from yolov5.utils.plots import plot_evolve
from yolov5.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel,
select_device, smart_DDP,
smart_optimizer, smart_resume,
@@ -130,10 +134,9 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
# Save run settings
if not evolve:
- with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.safe_dump(hyp, f, sort_keys=False)
- with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.safe_dump(vars(opt), f, sort_keys=False)
+ yaml_save(save_dir / 'hyp.yaml', hyp)
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
+
# Config
plots = not evolve and not opt.noplots # create plots
@@ -143,22 +146,22 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
data_dict = check_dataset(data) # check if None
train_path, val_path = data_dict['train'], data_dict['val']
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
- names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
- assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
# Loggers
if RANK in {-1, 0}:
- loggers = Loggers(save_dir, weights, opt, hyp, LOGGER, mmdet_keys=opt.mmdet_tags, class_names=names) # loggers instance
- if loggers.wandb:
- data_dict = loggers.wandb.data_dict
- if resume:
- weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER, mmdet_keys=opt.mmdet_tags, class_names=list(names.values())) # loggers instance
# Register actions
for k in methods(loggers):
callbacks.register_action(k, callback=getattr(loggers, k))
+ # Process custom dataset artifact link
+ #data_dict = loggers.remote_dataset
+ if resume: # If resuming runs from remote artifact
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
+
# upload dataset to s3
if opt.upload_dataset and opt.s3_upload_dir:
with open(data, errors='ignore') as f:
@@ -194,6 +197,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
amp = check_amp(model) # check AMP
# Freeze
+ if isinstance(freeze, int):
+ freeze = [freeze]
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
@@ -215,7 +220,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
- LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
# Scheduler
@@ -231,7 +235,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
# Resume
best_fitness, start_epoch = 0.0, 0
if pretrained:
- best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
+ if resume:
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
del ckpt, csd
# DP mode
@@ -281,15 +286,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
prefix=colorstr('val: '))[0]
if not resume:
- if plots:
- plot_labels(labels, names, save_dir)
-
- # Anchors
if not opt.noautoanchor:
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
+ check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
model.half().float() # pre-reduce anchor precision
- callbacks.run('on_pretrain_routine_end')
+ callbacks.run('on_pretrain_routine_end', labels, names)
# DDP mode
if cuda and RANK != -1:
@@ -342,7 +343,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
- LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
if RANK in {-1, 0}:
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
optimizer.zero_grad()
@@ -397,9 +398,9 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
- pbar.set_description(('%10s' * 2 + '%10.4g' * 5) %
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
- callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots)
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
if callbacks.stop_training:
return
# end batch ------------------------------------------------------------------------------------------------
@@ -414,17 +415,17 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
if not noval or final_epoch: # Calculate mAP
- results, maps, map50s, _ = val.run(data_dict,
- batch_size=batch_size // WORLD_SIZE * 2,
- imgsz=imgsz,
- half=amp,
- model=ema.ema,
- single_cls=single_cls,
- dataloader=val_loader,
- save_dir=save_dir,
- plots=False,
- callbacks=callbacks,
- compute_loss=compute_loss)
+ results, maps, map50s, _ = validate.run(data_dict,
+ batch_size=batch_size // WORLD_SIZE * 2,
+ imgsz=imgsz,
+ half=amp,
+ model=ema.ema,
+ single_cls=single_cls,
+ dataloader=val_loader,
+ save_dir=save_dir,
+ plots=False,
+ callbacks=callbacks,
+ compute_loss=compute_loss)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
@@ -438,7 +439,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
if (not nosave) or (final_epoch and not evolve): # if save
# fetch neptune run id
try:
- if loggers.neptune:
+ if loggers.neptune and loggers.neptune.neptune_run:
neptune_id = loggers.neptune.neptune_run['sys/id'].fetch()
else:
neptune_id = None
@@ -494,12 +495,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
strip_optimizer(f) # strip optimizers
if f is best:
LOGGER.info(f'\nValidating {f}...')
- results, _, _, _ = val.run(
+ results, _, _, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(f, device).half(),
- iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65
+ iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
@@ -520,7 +521,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
if result:
LOGGER.info(f"{colorstr('aws:')} Best weight has been successfully uploaded to {s3_file}")
- callbacks.run('on_train_end', last, best, plots, epoch, results)
+ callbacks.run('on_train_end', last, best, epoch, results)
torch.cuda.empty_cache()
return results
@@ -530,9 +531,9 @@ def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
- parser.add_argument('--data', type=str, default='', help='data.yaml path')
- parser.add_argument('--hyp', type=str, default='', help='hyperparameters path')
- parser.add_argument('--epochs', type=int, default=300)
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
+ parser.add_argument('--epochs', type=int, default=300, help='total training epochs')
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
@@ -584,13 +585,12 @@ def main(opt, callbacks=Callbacks()):
# Checks
if RANK in {-1, 0}:
print_args(vars(opt))
- check_git_status()
- check_requirements(exclude=['thop'])
+ #check_git_status()
+ check_requirements()
- # Resume
- if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume an interrupted run
- last = Path(opt.resume if isinstance(opt.resume, str) else get_latest_run()) # specified or most recent last.pt
- assert last.is_file(), f'ERROR: --resume checkpoint {last} does not exist'
+ # Resume (from specified or most recent last.pt)
+ if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve:
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
opt_data = opt.data # original dataset
if opt_yaml.is_file():
@@ -600,12 +600,9 @@ def main(opt, callbacks=Callbacks()):
d = torch.load(last, map_location='cpu')['opt']
opt = argparse.Namespace(**d) # replace
opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
- if is_url(opt.data):
- opt.data = str(opt_data) # avoid HUB resume auth timeout
+ if is_url(opt_data):
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
else:
- opt.hyp = opt.hyp or str(ROOT / 'data' / 'hyps' / ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch-low.yaml'))
- opt.data = opt.data or str(ROOT / 'data/coco128.yaml')
-
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
@@ -633,9 +630,6 @@ def main(opt, callbacks=Callbacks()):
# Train
if not opt.evolve:
train(opt.hyp, opt, device, callbacks)
- if WORLD_SIZE > 1 and RANK == 0:
- LOGGER.info('Destroying process group... ')
- dist.destroy_process_group()
# Evolve hyperparameters (optional)
else:
@@ -675,6 +669,8 @@ def main(opt, callbacks=Callbacks()):
hyp = yaml.safe_load(f) # load hyps dict
if 'anchors' not in hyp: # anchors commented in hyp.yaml
hyp['anchors'] = 3
+ if opt.noautoanchor:
+ del hyp['anchors'], meta['anchors']
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
diff --git a/yolov5/utils/__init__.py b/yolov5/utils/__init__.py
index 1f138d2..59a4270 100644
--- a/yolov5/utils/__init__.py
+++ b/yolov5/utils/__init__.py
@@ -3,6 +3,33 @@
utils/initialization
"""
+import contextlib
+import threading
+
+
+class TryExcept(contextlib.ContextDecorator):
+ # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
+ def __init__(self, msg=''):
+ self.msg = msg
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, value, traceback):
+ if value:
+ print(f'{self.msg}{value}')
+ return True
+
+
+def threaded(func):
+ # Multi-threads a target function and returns thread. Usage: @threaded decorator
+ def wrapper(*args, **kwargs):
+ thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
+ thread.start()
+ return thread
+
+ return wrapper
+
def notebook_init(verbose=True):
# Check system software and hardware
@@ -11,10 +38,13 @@ def notebook_init(verbose=True):
import os
import shutil
- from yolov5.utils.general import check_requirements, emojis, is_colab
+ from yolov5.utils.general import (check_font, check_requirements, emojis,
+ is_colab)
from yolov5.utils.torch_utils import select_device # imports
check_requirements(('psutil', 'IPython'))
+ check_font()
+
import psutil
from IPython import display # to display images and clear console output
diff --git a/yolov5/utils/augmentations.py b/yolov5/utils/augmentations.py
index d794563..75ea63a 100644
--- a/yolov5/utils/augmentations.py
+++ b/yolov5/utils/augmentations.py
@@ -8,15 +8,22 @@
import cv2
import numpy as np
+import torch
+import torchvision.transforms as T
+import torchvision.transforms.functional as TF
from yolov5.utils.general import (LOGGER, check_version, colorstr,
resample_segments, segment2box)
from yolov5.utils.metrics import bbox_ioa
+IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
+IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
+
class Albumentations:
# YOLOv5 Albumentations class (optional, only used if package is installed)
def __init__(self):
self.transform = None
+ prefix = colorstr('albumentations: ')
try:
import albumentations as A
check_version(A.__version__, '1.0.3', hard=True) # version requirement
@@ -31,11 +38,11 @@ def __init__(self):
A.ImageCompression(quality_lower=75, p=0.0)] # transforms
self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
- LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
+ LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
except ImportError: # package not installed, skip
pass
except Exception as e:
- LOGGER.info(colorstr('albumentations: ') + f'{e}')
+ LOGGER.info(f'{prefix}{e}')
def __call__(self, im, labels, p=1.0):
if self.transform and random.random() < p:
@@ -44,6 +51,18 @@ def __call__(self, im, labels, p=1.0):
return im, labels
+def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
+ # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std
+ return TF.normalize(x, mean, std, inplace=inplace)
+
+
+def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
+ # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean
+ for i in range(3):
+ x[:, i] = x[:, i] * std[i] + mean[i]
+ return x
+
+
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
# HSV color-space augmentation
if hgain or sgain or vgain:
@@ -282,3 +301,96 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
+
+
+def classify_albumentations(augment=True,
+ size=224,
+ scale=(0.08, 1.0),
+ hflip=0.5,
+ vflip=0.0,
+ jitter=0.4,
+ mean=IMAGENET_MEAN,
+ std=IMAGENET_STD,
+ auto_aug=False):
+ # YOLOv5 classification Albumentations (optional, only used if package is installed)
+ prefix = colorstr('albumentations: ')
+ try:
+ import albumentations as A
+ from albumentations.pytorch import ToTensorV2
+ check_version(A.__version__, '1.0.3', hard=True) # version requirement
+ if augment: # Resize and crop
+ T = [A.RandomResizedCrop(height=size, width=size, scale=scale)]
+ if auto_aug:
+ # TODO: implement AugMix, AutoAug & RandAug in albumentation
+ LOGGER.info(f'{prefix}auto augmentations are currently not supported')
+ else:
+ if hflip > 0:
+ T += [A.HorizontalFlip(p=hflip)]
+ if vflip > 0:
+ T += [A.VerticalFlip(p=vflip)]
+ if jitter > 0:
+ color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
+ T += [A.ColorJitter(*color_jitter, 0)]
+ else: # Use fixed crop for eval set (reproducibility)
+ T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
+ T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
+ LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
+ return A.Compose(T)
+
+ except ImportError: # package not installed, skip
+ pass
+ except Exception as e:
+ LOGGER.info(f'{prefix}{e}')
+
+
+def classify_transforms(size=224):
+ # Transforms to apply if albumentations not installed
+ assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
+ # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
+ return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
+
+
+class LetterBox:
+ # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
+ def __init__(self, size=(640, 640), auto=False, stride=32):
+ super().__init__()
+ self.h, self.w = (size, size) if isinstance(size, int) else size
+ self.auto = auto # pass max size integer, automatically solve for short side using stride
+ self.stride = stride # used with auto
+
+ def __call__(self, im): # im = np.array HWC
+ imh, imw = im.shape[:2]
+ r = min(self.h / imh, self.w / imw) # ratio of new/old
+ h, w = round(imh * r), round(imw * r) # resized image
+ hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
+ top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
+ im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
+ im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
+ return im_out
+
+
+class CenterCrop:
+ # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
+ def __init__(self, size=640):
+ super().__init__()
+ self.h, self.w = (size, size) if isinstance(size, int) else size
+
+ def __call__(self, im): # im = np.array HWC
+ imh, imw = im.shape[:2]
+ m = min(imh, imw) # min dimension
+ top, left = (imh - m) // 2, (imw - m) // 2
+ return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
+
+
+class ToTensor:
+ # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
+ def __init__(self, half=False):
+ super().__init__()
+ self.half = half
+
+ def __call__(self, im): # im = np.array HWC in BGR order
+ im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
+ im = torch.from_numpy(im) # to torch
+ im = im.half() if self.half else im.float() # uint8 to fp16/32
+ im /= 255.0 # 0-255 to 0.0-1.0
+ return im
diff --git a/yolov5/utils/autoanchor.py b/yolov5/utils/autoanchor.py
index 47c1e44..309434c 100644
--- a/yolov5/utils/autoanchor.py
+++ b/yolov5/utils/autoanchor.py
@@ -9,8 +9,8 @@
import torch
import yaml
from tqdm import tqdm
-
-from yolov5.utils.general import LOGGER, colorstr, emojis
+from yolov5.utils import TryExcept
+from yolov5.utils.general import LOGGER, colorstr
PREFIX = colorstr('AutoAnchor: ')
@@ -25,6 +25,7 @@ def check_anchor_order(m):
m.anchors[:] = m.anchors.flip(0)
+@TryExcept(f'{PREFIX}ERROR: ')
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
@@ -45,14 +46,11 @@ def metric(k): # compute metric
bpr, aat = metric(anchors.cpu().view(-1, 2))
s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
if bpr > 0.98: # threshold to recompute
- LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))
+ LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅')
else:
- LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))
+ LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')
na = m.anchors.numel() // 2 # number of anchors
- try:
- anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
- except Exception as e:
- LOGGER.info(f'{PREFIX}ERROR: {e}')
+ anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
@@ -62,7 +60,7 @@ def metric(k): # compute metric
s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
else:
s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
- LOGGER.info(emojis(s))
+ LOGGER.info(s)
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
@@ -124,7 +122,7 @@ def print_results(k, verbose=True):
i = (wh0 < 3.0).any(1).sum()
if i:
LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size')
- wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
+ wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans init
@@ -167,4 +165,4 @@ def print_results(k, verbose=True):
if verbose:
print_results(k, verbose)
- return print_results(k)
+ return print_results(k).astype(np.float32)
diff --git a/yolov5/utils/autobatch.py b/yolov5/utils/autobatch.py
index 81fd361..416e94d 100644
--- a/yolov5/utils/autobatch.py
+++ b/yolov5/utils/autobatch.py
@@ -7,8 +7,7 @@
import numpy as np
import torch
-
-from yolov5.utils.general import LOGGER, colorstr, emojis
+from yolov5.utils.general import LOGGER, colorstr
from yolov5.utils.torch_utils import profile
@@ -18,7 +17,7 @@ def check_train_batch_size(model, imgsz=640, amp=True):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
-def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
+def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
# Automatically estimate best batch size to use `fraction` of available CUDA memory
# Usage:
# import torch
@@ -47,7 +46,7 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
# Profile batch sizes
batch_sizes = [1, 2, 4, 8, 16]
try:
- img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
+ img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
results = profile(img, model, n=3, device=device)
except Exception as e:
LOGGER.warning(f'{prefix}{e}')
@@ -60,7 +59,10 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
i = results.index(None) # first fail index
if b >= batch_sizes[i]: # y intercept above failure point
b = batch_sizes[max(i - 1, 0)] # select prior safe point
+ if b < 1 or b > 1024: # b outside of safe range
+ b = batch_size
+ LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
fraction = np.polyval(p, b) / t # actual fraction predicted
- LOGGER.info(emojis(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅'))
+ LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
return b
diff --git a/yolov5/utils/callbacks.py b/yolov5/utils/callbacks.py
index 2b32df0..166d893 100644
--- a/yolov5/utils/callbacks.py
+++ b/yolov5/utils/callbacks.py
@@ -3,6 +3,8 @@
Callback utils
"""
+import threading
+
class Callbacks:
""""
@@ -55,17 +57,20 @@ def get_registered_actions(self, hook=None):
"""
return self._callbacks[hook] if hook else self._callbacks
- def run(self, hook, *args, **kwargs):
+ def run(self, hook, *args, thread=False, **kwargs):
"""
- Loop through the registered actions and fire all callbacks
+ Loop through the registered actions and fire all callbacks on main thread
Args:
hook: The name of the hook to check, defaults to all
args: Arguments to receive from YOLOv5
+ thread: (boolean) Run callbacks in daemon thread
kwargs: Keyword Arguments to receive from YOLOv5
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
-
for logger in self._callbacks[hook]:
- logger['callback'](*args, **kwargs)
+ if thread:
+ threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
+ else:
+ logger['callback'](*args, **kwargs)
diff --git a/yolov5/utils/dataloaders.py b/yolov5/utils/dataloaders.py
old mode 100755
new mode 100644
index cf49767..3e3d881
--- a/yolov5/utils/dataloaders.py
+++ b/yolov5/utils/dataloaders.py
@@ -22,13 +22,15 @@
import numpy as np
import torch
import torch.nn.functional as F
+import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from yolov5.utils.augmentations import (Albumentations, augment_hsv,
- copy_paste, letterbox, mixup,
- random_perspective)
+ classify_albumentations,
+ classify_transforms, copy_paste,
+ letterbox, mixup, random_perspective)
from yolov5.utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS,
check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab,
@@ -37,11 +39,12 @@
from yolov5.utils.torch_utils import torch_distributed_zero_first
# Parameters
-HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
-IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes
+HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
+IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
+PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
@@ -85,7 +88,7 @@ def exif_transpose(image):
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
- 8: Image.ROTATE_90,}.get(orientation)
+ 8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
@@ -146,7 +149,7 @@ def create_dataloader(path,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
- pin_memory=True,
+ pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
@@ -188,7 +191,7 @@ def __iter__(self):
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
- def __init__(self, path, img_size=640, stride=32, auto=True):
+ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
@@ -212,8 +215,10 @@ def __init__(self, path, img_size=640, stride=32, auto=True):
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
+ self.transforms = transforms # optional
+ self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
- self.new_video(videos[0]) # new video
+ self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
@@ -231,103 +236,71 @@ def __next__(self):
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
- ret_val, img0 = self.cap.read()
+ ret_val, im0 = self.cap.read()
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
- self.new_video(path)
- ret_val, img0 = self.cap.read()
+ self._new_video(path)
+ ret_val, im0 = self.cap.read()
self.frame += 1
+ # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
- img0 = cv2.imread(path) # BGR
- assert img0 is not None, f'Image Not Found {path}'
+ im0 = cv2.imread(path) # BGR
+ assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
- # Padded resize
- img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
-
- # Convert
- img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
- img = np.ascontiguousarray(img)
+ if self.transforms:
+ im = self.transforms(im0) # transforms
+ else:
+ im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
+ im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
+ im = np.ascontiguousarray(im) # contiguous
- return path, img, img0, self.cap, s
+ return path, im, im0, self.cap, s
- def new_video(self, path):
+ def _new_video(self, path):
+ # Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
- self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
+ self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
+ # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
+
+ def _cv2_rotate(self, im):
+ # Rotate a cv2 video manually
+ if self.orientation == 0:
+ return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
+ elif self.orientation == 180:
+ return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ elif self.orientation == 90:
+ return cv2.rotate(im, cv2.ROTATE_180)
+ return im
def __len__(self):
return self.nf # number of files
-class LoadWebcam: # for inference
- # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
- def __init__(self, pipe='0', img_size=640, stride=32):
- self.img_size = img_size
- self.stride = stride
- self.pipe = eval(pipe) if pipe.isnumeric() else pipe
- self.cap = cv2.VideoCapture(self.pipe) # video capture object
- self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
-
- def __iter__(self):
- self.count = -1
- return self
-
- def __next__(self):
- self.count += 1
- if cv2.waitKey(1) == ord('q'): # q to quit
- self.cap.release()
- cv2.destroyAllWindows()
- raise StopIteration
-
- # Read frame
- ret_val, img0 = self.cap.read()
- img0 = cv2.flip(img0, 1) # flip left-right
-
- # Print
- assert ret_val, f'Camera Error {self.pipe}'
- img_path = 'webcam.jpg'
- s = f'webcam {self.count}: '
-
- # Padded resize
- img = letterbox(img0, self.img_size, stride=self.stride)[0]
-
- # Convert
- img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
- img = np.ascontiguousarray(img)
-
- return img_path, img, img0, None, s
-
- def __len__(self):
- return 0
-
-
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
- def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
+ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
+ torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
-
- if os.path.isfile(sources):
- with open(sources) as f:
- sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
- else:
- sources = [sources]
-
+ self.vid_stride = vid_stride # video frame-rate stride
+ sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources]
n = len(sources)
- self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
- self.auto = auto
+ self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
@@ -354,19 +327,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
LOGGER.info('') # newline
# check for common shapes
- s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
+ s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
+ self.auto = auto and self.rect
+ self.transforms = transforms # optional
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
- n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
+ n, f = 0, self.frames[i] # frame number, frame array
while cap.isOpened() and n < f:
n += 1
- # _, self.imgs[index] = cap.read()
- cap.grab()
- if n % read == 0:
+ cap.grab() # .read() = .grab() followed by .retrieve()
+ if n % self.vid_stride == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
@@ -386,18 +360,15 @@ def __next__(self):
cv2.destroyAllWindows()
raise StopIteration
- # Letterbox
- img0 = self.imgs.copy()
- img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
-
- # Stack
- img = np.stack(img, 0)
-
- # Convert
- img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
- img = np.ascontiguousarray(img)
+ im0 = self.imgs.copy()
+ if self.transforms:
+ im = np.stack([self.transforms(x) for x in im0]) # transforms
+ else:
+ im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
+ im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
+ im = np.ascontiguousarray(im) # contiguous
- return self.sources, img, img0, None, ''
+ return self.sources, im, im0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
@@ -457,7 +428,7 @@ def __init__(self,
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
- raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
@@ -476,17 +447,19 @@ def __init__(self,
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
- assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
+ assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
+ nl = len(np.concatenate(labels, 0)) # number of labels
+ assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
self.labels = list(labels)
self.shapes = np.array(shapes)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
- bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
+ bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
@@ -515,6 +488,7 @@ def __init__(self,
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
+ self.segments = [self.segments[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
@@ -528,7 +502,7 @@ def __init__(self,
elif mini > 1:
shapes[i] = [1, 1 / mini]
- self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
# Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)
self.ims = [None] * n
@@ -573,7 +547,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
- LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
+ LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
@@ -835,7 +809,7 @@ def collate_fn(batch):
@staticmethod
def collate_fn4(batch):
- img, label, path, shapes = zip(*batch) # transposed
+ im, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
@@ -845,13 +819,13 @@ def collate_fn4(batch):
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
- im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
- align_corners=False)[0].type(img[i].type())
+ im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
+ align_corners=False)[0].type(im[i].type())
lb = label[i]
else:
- im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
+ im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
- im4.append(im)
+ im4.append(im1)
label4.append(lb)
for i, lb in enumerate(label4):
@@ -874,7 +848,7 @@ def flatten_recursive(path=DATASETS_DIR / 'coco128'):
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
- shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
+ shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
@@ -898,7 +872,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
- b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
@@ -920,7 +894,9 @@ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), ann
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
- [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
+ for x in txt:
+ if (path.parent / x).exists():
+ (path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
@@ -966,7 +942,7 @@ def verify_image_label(args):
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
- segments = segments[i]
+ segments = [segments[x] for x in i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
@@ -1006,7 +982,7 @@ def __init__(self, path='coco128.yaml', autodownload=False):
self.hub_dir = Path(data['path'] + '-hub')
self.im_dir = self.hub_dir / 'images'
self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
- self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
+ self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary
self.data = data
@staticmethod
@@ -1038,7 +1014,7 @@ def _hub_ops(self, f, max_dim=1920):
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
- im.save(f_new, 'JPEG', quality=75, optimize=True) # save
+ im.save(f_new, 'JPEG', quality=50, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
@@ -1094,3 +1070,65 @@ def process_images(self):
pass
print(f'Done. All images saved to {self.im_dir}')
return self.im_dir
+
+
+# Classification dataloaders -------------------------------------------------------------------------------------------
+class ClassificationDataset(torchvision.datasets.ImageFolder):
+ """
+ YOLOv5 Classification Dataset.
+ Arguments
+ root: Dataset path
+ transform: torchvision transforms, used by default
+ album_transform: Albumentations transforms, used if installed
+ """
+
+ def __init__(self, root, augment, imgsz, cache=False):
+ super().__init__(root=root)
+ self.torch_transforms = classify_transforms(imgsz)
+ self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
+ self.cache_ram = cache is True or cache == 'ram'
+ self.cache_disk = cache == 'disk'
+ self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
+
+ def __getitem__(self, i):
+ f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
+ if self.cache_ram and im is None:
+ im = self.samples[i][3] = cv2.imread(f)
+ elif self.cache_disk:
+ if not fn.exists(): # load npy
+ np.save(fn.as_posix(), cv2.imread(f))
+ im = np.load(fn)
+ else: # read image
+ im = cv2.imread(f) # BGR
+ if self.album_transforms:
+ sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"]
+ else:
+ sample = self.torch_transforms(im)
+ return sample, j
+
+
+def create_classification_dataloader(path,
+ imgsz=224,
+ batch_size=16,
+ augment=True,
+ cache=False,
+ rank=-1,
+ workers=8,
+ shuffle=True):
+ # Returns Dataloader object to be used with YOLOv5 Classifier
+ with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
+ dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
+ batch_size = min(batch_size, len(dataset))
+ nd = torch.cuda.device_count()
+ nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
+ sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
+ generator = torch.Generator()
+ generator.manual_seed(0)
+ return InfiniteDataLoader(dataset,
+ batch_size=batch_size,
+ shuffle=shuffle and sampler is None,
+ num_workers=nw,
+ sampler=sampler,
+ pin_memory=PIN_MEMORY,
+ worker_init_fn=seed_worker,
+ generator=generator) # or DataLoader(persistent_workers=True)
diff --git a/yolov5/utils/downloads.py b/yolov5/utils/downloads.py
index 69f4b0f..d752076 100644
--- a/yolov5/utils/downloads.py
+++ b/yolov5/utils/downloads.py
@@ -33,6 +33,12 @@ def gsutil_getsize(url=''):
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
+def url_getsize(url='https://ultralytics.com/images/bus.jpg'):
+ # Return downloadable file size in bytes
+ response = requests.head(url, allow_redirects=True)
+ return int(response.headers.get('content-length', -1))
+
+
def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
# Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
from yolov5.utils.general import LOGGER
@@ -44,24 +50,26 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)
assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
except Exception as e: # url2
- file.unlink(missing_ok=True) # remove partial downloads
+ if file.exists():
+ file.unlink() # remove partial downloads
LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
- os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
+ os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
finally:
if not file.exists() or file.stat().st_size < min_bytes: # check
- file.unlink(missing_ok=True) # remove partial downloads
+ if file.exists():
+ file.unlink() # remove partial downloads
LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}")
LOGGER.info('')
-def attempt_download(file, repo='ultralytics/yolov5', release='v6.1'):
- # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc.
+def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'):
+ # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.
from yolov5.utils.general import LOGGER
def github_assets(repository, version='latest'):
- # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])
+ # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])
if version != 'latest':
- version = f'tags/{version}' # i.e. tags/v6.1
+ version = f'tags/{version}' # i.e. tags/v6.2
response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api
return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets
@@ -112,8 +120,10 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
file = Path(file)
cookie = Path('cookie') # gdrive cookie
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
- file.unlink(missing_ok=True) # remove existing file
- cookie.unlink(missing_ok=True) # remove existing cookie
+ if file.exists():
+ file.unlink() # remove existing file
+ if cookie.exists():
+ cookie.unlink() # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
@@ -123,11 +133,13 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
else: # small file
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
- cookie.unlink(missing_ok=True) # remove existing cookie
+ if cookie.exists():
+ cookie.unlink() # remove existing cookie
# Error check
if r != 0:
- file.unlink(missing_ok=True) # remove partial
+ if file.exists():
+ file.unlink() # remove partial
print('Download error ') # raise Exception('Download error')
return r
diff --git a/yolov5/utils/general.py b/yolov5/utils/general.py
old mode 100755
new mode 100644
index 4cbc231..34c75d0
--- a/yolov5/utils/general.py
+++ b/yolov5/utils/general.py
@@ -14,10 +14,10 @@
import re
import shutil
import signal
-import threading
import sys
import time
import urllib
+from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
@@ -34,6 +34,7 @@
import torchvision
import yaml
+from yolov5.utils import TryExcept
from yolov5.utils.downloads import gsutil_getsize
from yolov5.utils.metrics import box_iou, fitness
@@ -56,20 +57,42 @@
os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
+def is_ascii(s=''):
+ # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
+ s = str(s) # convert list, tuple, None, etc. to str
+ return len(s.encode().decode('ascii', 'ignore')) == len(s)
+
+
+def is_chinese(s='人工智能'):
+ # Is string composed of any Chinese characters?
+ return bool(re.search('[\u4e00-\u9fff]', str(s)))
+
+
+def is_colab():
+ # Is environment a Google Colab instance?
+ return 'COLAB_GPU' in os.environ
+
+
def is_kaggle():
# Is environment a Kaggle Notebook?
- try:
- assert os.environ.get('PWD') == '/kaggle/working'
- assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
+ return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
+
+
+def is_docker() -> bool:
+ """Check if the process runs inside a docker container."""
+ if Path("/.dockerenv").exists():
return True
- except AssertionError:
+ try: # check if docker is in control groups
+ with open("/proc/self/cgroup") as file:
+ return any("docker" in line for line in file)
+ except OSError:
return False
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if not test:
- return os.access(dir, os.R_OK) # possible issues on Windows
+ return os.access(dir, os.W_OK) # possible issues on Windows
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
@@ -82,7 +105,7 @@ def is_writeable(dir, test=False):
def set_logging(name=None, verbose=VERBOSE):
# Sets level and returns logger
- if is_kaggle():
+ if is_kaggle() or is_colab():
for h in logging.root.handlers:
logging.root.removeHandler(h) # remove all handlers associated with the root logger object
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
@@ -97,6 +120,9 @@ def set_logging(name=None, verbose=VERBOSE):
set_logging() # run before defining LOGGER
LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.)
+if platform.system() == 'Windows':
+ for fn in LOGGER.info, LOGGER.warning:
+ setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
@@ -116,16 +142,27 @@ def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
class Profile(contextlib.ContextDecorator):
- # Usage: @Profile() decorator or 'with Profile():' context manager
+ # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
+ def __init__(self, t=0.0):
+ self.t = t
+ self.cuda = torch.cuda.is_available()
+
def __enter__(self):
- self.start = time.time()
+ self.start = self.time()
+ return self
def __exit__(self, type, value, traceback):
- print(f'Profile results: {time.time() - self.start:.5f}s')
+ self.dt = self.time() - self.start # delta-time
+ self.t += self.dt # accumulate dt
+
+ def time(self):
+ if self.cuda:
+ torch.cuda.synchronize()
+ return time.time()
class Timeout(contextlib.ContextDecorator):
- # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
+ # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
@@ -159,64 +196,50 @@ def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
-def try_except(func):
- # try-except function. Usage: @try_except decorator
- def handler(*args, **kwargs):
- try:
- func(*args, **kwargs)
- except Exception as e:
- print(e)
-
- return handler
-
-
-def threaded(func):
- # Multi-threads a target function and returns thread. Usage: @threaded decorator
- def wrapper(*args, **kwargs):
- thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
- thread.start()
- return thread
-
- return wrapper
-
-
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
-def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False):
+def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
# Print function arguments (optional args dict)
x = inspect.currentframe().f_back # previous frame
- file, _, fcn, _, _ = inspect.getframeinfo(x)
+ file, _, func, _, _ = inspect.getframeinfo(x)
if args is None: # get args automatically
args, _, _, frm = inspect.getargvalues(x)
args = {k: v for k, v in frm.items() if k in args}
- s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '')
+ try:
+ file = Path(file).resolve().relative_to(ROOT).with_suffix('')
+ except ValueError:
+ file = Path(file).stem
+ s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))
def init_seeds(seed=0, deterministic=False):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
- # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
- import torch.backends.cudnn as cudnn
-
- if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
- torch.use_deterministic_algorithms(True)
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
- os.environ['PYTHONHASHSEED'] = str(seed)
-
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
- cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
+ torch.backends.cudnn.benchmark = True # for faster training
+ if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
+ torch.use_deterministic_algorithms(True)
+ torch.backends.cudnn.deterministic = True
+ os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
+ os.environ['PYTHONHASHSEED'] = str(seed)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
- return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
+ return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
+
+
+def get_default_args(func):
+ # Get func() default arguments
+ signature = inspect.signature(func)
+ return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_latest_run(search_dir='.'):
@@ -225,41 +248,6 @@ def get_latest_run(search_dir='.'):
return max(last_list, key=os.path.getctime) if last_list else ''
-def is_docker() -> bool:
- """Check if the process runs inside a docker container."""
- if Path("/.dockerenv").exists():
- return True
- try: # check if docker is in control groups
- with open("/proc/self/cgroup") as file:
- return any("docker" in line for line in file)
- except OSError:
- return False
-
-
-def is_colab():
- # Is environment a Google Colab instance?
- try:
- return True
- except ImportError:
- return False
-
-
-def is_pip():
- # Is file in a pip package?
- return 'site-packages' in Path(__file__).resolve().parts
-
-
-def is_ascii(s=''):
- # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
- s = str(s) # convert list, tuple, None, etc. to str
- return len(s.encode().decode('ascii', 'ignore')) == len(s)
-
-
-def is_chinese(s='人工智能'):
- # Is string composed of any Chinese characters?
- return bool(re.search('[\u4e00-\u9fff]', str(s)))
-
-
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
@@ -308,9 +296,9 @@ def git_describe(path=ROOT): # path must be a directory
return ''
-@try_except
+@TryExcept()
@WorkingDirectory(ROOT)
-def check_git_status(repo='ultralytics/yolov5'):
+def check_git_status(repo='ultralytics/yolov5', branch='master'):
# YOLOv5 status check, recommend 'git pull' if code is out of date
url = f'https://github.com/{repo}'
msg = f', for updates see {url}'
@@ -326,14 +314,14 @@ def check_git_status(repo='ultralytics/yolov5'):
remote = 'ultralytics'
check_output(f'git remote add {remote} {url}', shell=True)
check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch
- branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
- n = int(check_output(f'git rev-list {branch}..{remote}/master --count', shell=True)) # commits behind
+ local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
+ n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind
if n > 0:
- pull = 'git pull' if remote == 'origin' else f'git pull {remote} master'
+ pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}'
s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update."
else:
s += f'up to date with {url} ✅'
- LOGGER.info(emojis(s)) # emoji-safe
+ LOGGER.info(s)
def check_python(minimum='3.7.0'):
@@ -345,49 +333,47 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
- s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string
+ s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string
if hard:
- assert result, s # assert min requirements met
+ assert result, emojis(s) # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
-@try_except
-def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()):
- # Check installed dependencies meet requirements (pass *.txt file or list of packages)
+@TryExcept()
+def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):
+ # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
- if isinstance(requirements, (str, Path)): # requirements.txt file
- file = Path(requirements)
- assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
+ if isinstance(requirements, Path): # requirements.txt file
+ file = requirements.resolve()
+ assert file.exists(), f"{prefix} {file} not found, check failed."
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
- else: # list or tuple of packages
- requirements = [x for x in requirements if x not in exclude]
+ elif isinstance(requirements, str):
+ requirements = [requirements]
- n = 0 # number of packages updates
- for i, r in enumerate(requirements):
+ s = ''
+ n = 0
+ for r in requirements:
try:
pkg.require(r)
- except Exception: # DistributionNotFound or VersionConflict if requirements not met
- s = f"{prefix} {r} not found and is required by YOLOv5"
- if install and AUTOINSTALL: # check environment variable
- LOGGER.info(f"{s}, attempting auto-update...")
- try:
- assert check_online(), f"'pip install {r}' skipped (offline)"
- LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode())
- n += 1
- except Exception as e:
- LOGGER.warning(f'{prefix} {e}')
- else:
- LOGGER.info(f'{s}. Please install and rerun your command.')
-
- if n: # if packages updated
- source = file.resolve() if 'file' in locals() else requirements
- s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
- f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
- LOGGER.info(emojis(s))
+ except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
+ s += f'"{r}" '
+ n += 1
+
+ if s and install and AUTOINSTALL: # check environment variable
+ LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
+ try:
+ assert check_online(), "AutoUpdate skipped (offline)"
+ LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())
+ source = file if 'file' in locals() else requirements
+ s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
+ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
+ LOGGER.info(s)
+ except Exception as e:
+ LOGGER.warning(f'{prefix} {e}')
def check_img_size(imgsz, s=32, floor=0):
@@ -449,6 +435,9 @@ def check_file(file, suffix=''):
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
+ elif file.startswith('clearml://'): # ClearML Dataset ID
+ assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'."
+ return file
else: # search
files = []
for d in 'data', 'models', 'utils': # search directories
@@ -463,7 +452,7 @@ def check_font(font=FONT, progress=False):
font = Path(font)
file = CONFIG_DIR / font.name
if not font.exists() and not file.exists():
- url = "https://ultralytics.com/assets/" + font.name
+ url = f'https://ultralytics.com/assets/{font.name}'
LOGGER.info(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file), progress=progress)
@@ -474,7 +463,7 @@ def check_dataset(data, autodownload=True):
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
- download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1)
+ download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)
data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
@@ -484,11 +473,11 @@ def check_dataset(data, autodownload=True):
data = yaml.safe_load(f) # dictionary
# Checks
- for k in 'train', 'val', 'nc':
- assert k in data, emojis(f"data.yaml '{k}:' field missing ❌")
- if 'names' not in data:
- LOGGER.warning(emojis("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc."))
- data['names'] = [f'class{i}' for i in range(data['nc'])] # default names
+ for k in 'train', 'val', 'names':
+ assert k in data, f"data.yaml '{k}:' field missing ❌"
+ if isinstance(data['names'], (list, tuple)): # old array format
+ data['names'] = dict(enumerate(data['names'])) # convert to dict
+ data['nc'] = len(data['names'])
# Resolve paths
path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'
@@ -503,9 +492,9 @@ def check_dataset(data, autodownload=True):
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
- LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]))
+ LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])
if not s or not autodownload:
- raise Exception(emojis('Dataset not found ❌'))
+ raise Exception('Dataset not found ❌')
t = time.time()
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
@@ -523,7 +512,7 @@ def check_dataset(data, autodownload=True):
r = exec(s, {'yaml': data}) # return None
dt = f'({round(time.time() - t, 1)}s)'
s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌"
- LOGGER.info(emojis(f"Dataset download {s}"))
+ LOGGER.info(f"Dataset download {s}")
check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
return data # dictionary
@@ -542,20 +531,32 @@ def amp_allclose(model, im):
prefix = colorstr('AMP: ')
device = next(model.parameters()).device # get model device
- if device.type == 'cpu':
- return False # AMP disabled on CPU
+ if device.type in ('cpu', 'mps'):
+ return False # AMP only used on CUDA devices
f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check
im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))
try:
- assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
- LOGGER.info(emojis(f'{prefix}checks passed ✅'))
+ assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
+ LOGGER.info(f'{prefix}checks passed ✅')
return True
except Exception:
help_url = 'https://github.com/ultralytics/yolov5/issues/7908'
- LOGGER.warning(emojis(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}'))
+ LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}')
return False
+def yaml_load(file='data.yaml'):
+ # Single-line safe yaml loading
+ with open(file, errors='ignore') as f:
+ return yaml.safe_load(f)
+
+
+def yaml_save(file='data.yaml', data={}):
+ # Single-line safe yaml saving
+ with open(file, 'w') as f:
+ yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False)
+
+
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
@@ -563,7 +564,7 @@ def url2file(url):
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
- # Multi-threaded file download and unzip function, used in data.yaml for autodownload
+ # Multithreaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
success = True
@@ -575,7 +576,8 @@ def download_one(url, dir):
for i in range(retry + 1):
if curl:
s = 'sS' if threads > 1 else '' # silent
- r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue
+ r = os.system(
+ f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue
success = r == 0
else:
torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
@@ -587,10 +589,12 @@ def download_one(url, dir):
else:
LOGGER.warning(f'Failed to download {url}...')
- if unzip and success and f.suffix in ('.zip', '.gz'):
+ if unzip and success and f.suffix in ('.zip', '.tar', '.gz'):
LOGGER.info(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
+ elif f.suffix == '.tar':
+ os.system(f'tar xf {f} --directory {f.parent}') # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
@@ -600,7 +604,7 @@ def download_one(url, dir):
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
- pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
+ pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded
pool.close()
pool.join()
else:
@@ -794,22 +798,28 @@ def clip_coords(boxes, shape):
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
-def non_max_suppression(prediction,
- conf_thres=0.25,
- iou_thres=0.45,
- classes=None,
- agnostic=False,
- multi_label=False,
- labels=(),
- max_det=300):
- """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes
+def non_max_suppression(
+ prediction,
+ conf_thres=0.25,
+ iou_thres=0.45,
+ classes=None,
+ agnostic=False,
+ multi_label=False,
+ labels=(),
+ max_det=300,
+ nm=0, # number of masks
+):
+ """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
+ if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
+ prediction = prediction[0] # select only inference output
+
bs = prediction.shape[0] # batch size
- nc = prediction.shape[2] - 5 # number of classes
+ nc = prediction.shape[2] - nm - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
@@ -820,13 +830,14 @@ def non_max_suppression(prediction,
# min_wh = 2 # (pixels) minimum box width and height
max_wh = 7680 # (pixels) maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
- time_limit = 0.3 + 0.03 * bs # seconds to quit after
+ time_limit = 0.5 + 0.05 * bs # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
- output = [torch.zeros((0, 6), device=prediction.device)] * bs
+ mi = 5 + nc # mask start index
+ output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
@@ -835,7 +846,7 @@ def non_max_suppression(prediction,
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
lb = labels[xi]
- v = torch.zeros((len(lb), nc + 5), device=x.device)
+ v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
v[:, :4] = lb[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
@@ -848,16 +859,17 @@ def non_max_suppression(prediction,
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
- box = xywh2xyxy(x[:, :4])
+ # Box/Mask
+ box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)
+ mask = x[:, mi:] # zero columns if no masks
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
- i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
+ i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
+ x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
else: # best class only
- conf, j = x[:, 5:].max(1, keepdim=True)
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
+ conf, j = x[:, 5:mi].max(1, keepdim=True)
+ x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
@@ -873,6 +885,8 @@ def non_max_suppression(prediction,
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
+ else:
+ x = x[x[:, 4].argsort(descending=True)] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
diff --git a/yolov5/utils/loggers/__init__.py b/yolov5/utils/loggers/__init__.py
index a881afc..72b4dc1 100644
--- a/yolov5/utils/loggers/__init__.py
+++ b/yolov5/utils/loggers/__init__.py
@@ -10,13 +10,14 @@
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
-from yolov5.utils.general import colorstr, cv2, emojis
+from yolov5.utils.general import colorstr, cv2
+from yolov5.utils.loggers.clearml.clearml_utils import ClearmlLogger
from yolov5.utils.loggers.neptune.neptune_utils import NeptuneLogger
from yolov5.utils.loggers.wandb.wandb_utils import WandbLogger
-from yolov5.utils.plots import plot_images, plot_results
+from yolov5.utils.plots import plot_images, plot_labels, plot_results
from yolov5.utils.torch_utils import de_parallel
-LOGGERS = ('csv', 'tb', 'wandb', 'neptune') # text-file, TensorBoard, Weights & Biases
+LOGGERS = ('csv', 'tb', 'wandb', 'neptune', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML
RANK = int(os.getenv('RANK', -1))
try:
@@ -38,6 +39,28 @@
except ImportError:
neptune = None
+try:
+ import clearml
+
+ assert hasattr(clearml, '__version__') # verify package import not local dir
+except (ImportError, AssertionError):
+ clearml = None
+
+
+
+try:
+ if RANK not in [0, -1]:
+ comet_ml = None
+ else:
+ import comet_ml
+
+ assert hasattr(comet_ml, '__version__') # verify package import not local dir
+ from yolov5.utils.loggers.comet import CometLogger
+
+except (ModuleNotFoundError, ImportError, AssertionError):
+ comet_ml = None
+
+
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS, mmdet_keys=False, class_names=None):
@@ -45,6 +68,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None,
self.weights = weights
self.opt = opt
self.hyp = hyp
+ self.plots = not opt.noplots # plot results
self.logger = logger # for printing results to console
self.include = include
if not mmdet_keys:
@@ -88,17 +112,11 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None,
self.class_name_keys = ['val/' + name + '_mAP' for name in class_names] + ['val/' + name + '_mAP_50' for name in class_names]
self.s3_weight_folder = None if not opt.s3_upload_dir else "s3://" + str(Path(opt.s3_upload_dir.replace("s3://","")) / save_dir.name / "weights").replace(os.sep, '/')
- # Message
- if not wandb:
- prefix = colorstr('Weights & Biases: ')
- s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
- self.logger.info(emojis(s))
-
+ # Messages
if not neptune:
prefix = colorstr('Neptune AI: ')
s = f"{prefix}run 'pip install neptune-client' to automatically track and visualize YOLOv5 🚀 runs"
- self.logger.info(emojis(s))
-
+ self.logger.info(s)
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
@@ -114,9 +132,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None,
self.wandb = WandbLogger(self.opt, run_id)
# temp warn. because nested artifacts not supported after 0.12.10
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'):
- self.logger.warning(
- "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected."
- )
+ s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected."
+ self.logger.warning(s)
else:
self.wandb = None
@@ -126,30 +143,76 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None,
else:
self.neptune = None
+ # ClearML
+ if clearml and 'clearml' in self.include:
+ self.clearml = ClearmlLogger(self.opt, self.hyp)
+ else:
+ self.clearml = None
+
+ # Comet
+ if comet_ml and 'comet' in self.include:
+ if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"):
+ run_id = self.opt.resume.split("/")[-1]
+ self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id)
+
+ else:
+ self.comet_logger = CometLogger(self.opt, self.hyp)
+
+ else:
+ self.comet_logger = None
+
+ @property
+ def remote_dataset(self):
+ # Get data_dict if custom dataset artifact link is provided
+ data_dict = None
+ if self.clearml:
+ data_dict = self.clearml.data_dict
+ if self.wandb:
+ data_dict = self.wandb.data_dict
+ if self.comet_logger:
+ data_dict = self.comet_logger.data_dict
+
+ return data_dict
+
def on_train_start(self):
- # Callback runs on train start
- pass
+ if self.comet_logger:
+ self.comet_logger.on_train_start()
+
+ def on_pretrain_routine_start(self):
+ if self.comet_logger:
+ self.comet_logger.on_pretrain_routine_start()
- def on_pretrain_routine_end(self):
+ def on_pretrain_routine_end(self, labels, names):
# Callback runs on pre-train routine end
- paths = self.save_dir.glob('*labels*.jpg') # training labels
- if self.wandb:
- self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
+ if self.plots:
+ plot_labels(labels, names, self.save_dir)
+ paths = self.save_dir.glob('*labels*.jpg') # training labels
+ if self.wandb:
+ self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
+ # if self.clearml:
+ # pass # ClearML saves these images automatically using hooks
+ if self.comet_logger:
+ self.comet_logger.on_pretrain_routine_end(paths)
- def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):
+ def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
+ log_dict = dict(zip(self.keys[0:3], vals))
# Callback runs on train batch end
- if plots:
- if ni == 0:
- if self.tb and not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754
- with warnings.catch_warnings():
- warnings.simplefilter('ignore') # suppress jit trace warning
- self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
+ # ni: number integrated batches (since train start)
+ if self.plots:
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
plot_images(imgs, targets, paths, f)
- if self.wandb and ni == 10:
+ if ni == 0 and self.tb and not self.opt.sync_bn:
+ log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz))
+ if ni == 10 and (self.wandb or self.clearml):
files = sorted(self.save_dir.glob('train*.jpg'))
- self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
+ if self.wandb:
+ self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
+ if self.clearml:
+ self.clearml.log_debug_samples(files, title='Mosaics')
+
+ if self.comet_logger:
+ self.comet_logger.on_train_batch_end(log_dict, step=ni)
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
@@ -157,17 +220,35 @@ def on_train_epoch_end(self, epoch):
self.wandb.current_epoch = epoch + 1
if self.neptune and self.neptune.neptune_run:
self.neptune.current_epoch = epoch + 1
+ if self.comet_logger:
+ self.comet_logger.on_train_epoch_end(epoch)
+
+ def on_val_start(self):
+ if self.comet_logger:
+ self.comet_logger.on_val_start()
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
+ if self.clearml:
+ self.clearml.log_image_with_boxes(path, pred, names, im)
- def on_val_end(self):
+ def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
+ if self.comet_logger:
+ self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)
+
+ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
# Callback runs on val end
- if self.wandb:
+ if self.wandb or self.clearml:
files = sorted(self.save_dir.glob('val*.jpg'))
- self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
+ if self.wandb:
+ self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
+ if self.clearml:
+ self.clearml.log_debug_samples(files, title='Validation')
+
+ if self.comet_logger:
+ self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
@@ -182,6 +263,10 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
+ elif self.clearml: # log to ClearML if TensorBoard not used
+ for k, v in x.items():
+ title, series = k.split('/')
+ self.clearml.task.get_logger().report_scalar(title, series, v, epoch)
if self.wandb:
if best_fitness == fi:
@@ -195,24 +280,39 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
self.neptune.log(x)
self.neptune.end_epoch()
+ if self.clearml:
+ self.clearml.current_epoch_logged_images = set() # reset epoch image limit
+ self.clearml.current_epoch += 1
+
+ if self.comet_logger:
+ self.comet_logger.on_fit_epoch_end(x, epoch=epoch)
+
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
- if self.wandb:
- if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
+ if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:
+ if self.wandb:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
if self.neptune and self.neptune.neptune_run and self.s3_weight_folder is not None:
if not final_epoch and best_fitness == fi:
self.neptune.neptune_run["weights"].track_files(self.s3_weight_folder)
- def on_train_end(self, last, best, plots, epoch, results):
- # Callback runs on training end
- if plots:
+ if self.clearml:
+ self.clearml.task.update_output_model(model_path=str(last),
+ model_name='Latest Model',
+ auto_delete_file=False)
+
+ if self.comet_logger:
+ self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)
+
+ def on_train_end(self, last, best, epoch, results):
+ # Callback runs on training end, i.e. saving best model
+ if self.plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
- files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')], "results.html"]
+ files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')), "results.html"]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}")
- if self.tb:
+ if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles
for f in files:
if f.suffix != ".html":
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
@@ -246,8 +346,138 @@ def on_train_end(self, last, best, plots, epoch, results):
self.neptune.finish_run()
- def on_params_update(self, params):
+ if self.clearml and not self.opt.evolve:
+ self.clearml.task.update_output_model(model_path=str(best if best.exists() else last),
+ name='Best Model',
+ auto_delete_file=False)
+
+ if self.comet_logger:
+ final_results = dict(zip(self.keys[3:10], results))
+ self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)
+
+ def on_params_update(self, params: dict):
# Update hyperparams or configs of the experiment
- # params: A dict containing {param: value} pairs
if self.wandb:
self.wandb.wandb_run.config.update(params, allow_val_change=True)
+ if self.comet_logger:
+ self.comet_logger.on_params_update(params)
+
+
+class GenericLogger:
+ """
+ YOLOv5 General purpose logger for non-task specific logging
+ Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...)
+ Arguments
+ opt: Run arguments
+ console_logger: Console logger
+ include: loggers to include
+ """
+
+ def __init__(self, opt, console_logger, include=('tb', 'wandb', 'neptune')):
+ # init default loggers
+ self.save_dir = Path(opt.save_dir)
+ self.include = include
+ self.console_logger = console_logger
+ self.csv = self.save_dir / 'results.csv' # CSV logger
+ if 'tb' in self.include:
+ prefix = colorstr('TensorBoard: ')
+ self.console_logger.info(
+ f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/")
+ self.tb = SummaryWriter(str(self.save_dir))
+
+ if wandb and 'wandb' in self.include:
+ self.wandb = wandb.init(project=web_project_name(str(opt.project)),
+ name=None if opt.name == "exp" else opt.name,
+ config=opt)
+ else:
+ self.wandb = None
+
+ if neptune and 'neptune' in self.include and opt.neptune_token is not None:
+ self.neptune = neptune.init(api_token=opt.neptune_token,
+ project=opt.neptune_project,
+ name=Path(opt.save_dir).stem)
+ else:
+ self.neptune = None
+
+ def log_metrics(self, metrics, epoch):
+ # Log metrics dictionary to all loggers
+ if self.csv:
+ keys, vals = list(metrics.keys()), list(metrics.values())
+ n = len(metrics) + 1 # number of cols
+ s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header
+ with open(self.csv, 'a') as f:
+ f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
+
+ if self.tb:
+ for k, v in metrics.items():
+ self.tb.add_scalar(k, v, epoch)
+
+ if self.wandb:
+ self.wandb.log(metrics, step=epoch)
+
+ if self.neptune:
+ for key, value in metrics.items():
+ self.neptune[key].log(value)
+
+ def log_images(self, files, name='Images', epoch=0):
+ # Log images to all loggers
+ files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path
+ files = [f for f in files if f.exists()] # filter by exists
+
+ if self.tb:
+ for f in files:
+ if f.suffix != ".html":
+ self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
+
+ if self.wandb:
+ results_files = []
+ if f.suffix == ".html":
+ results_files.append(wandb.Html(str(f)))
+ else:
+ results_files.append(wandb.Image(str(f), caption=f.name))
+ self.wandb.log({"Results": results_files}, step=epoch)
+
+ if self.neptune:
+ for f in files:
+ if f.suffix == ".html":
+ self.neptune['Results/{}'.format(f)].upload(neptune.types.File(str(f)))
+ else:
+ self.neptune['Results/{}'.format(f)].log(neptune.types.File(str(f)))
+
+ def log_graph(self, model, imgsz=(640, 640)):
+ # Log model graph to all loggers
+ if self.tb:
+ log_tensorboard_graph(self.tb, model, imgsz)
+
+ def log_model(self, model_path, epoch=0, metadata={}):
+ # Log model to all loggers
+ if self.wandb:
+ art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata)
+ art.add_file(str(model_path))
+ wandb.log_artifact(art)
+
+ def update_params(self, params):
+ # Update the paramters logged
+ if self.wandb:
+ wandb.run.config.update(params, allow_val_change=True)
+
+
+def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
+ # Log model graph to TensorBoard
+ try:
+ p = next(model.parameters()) # for device, type
+ imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand
+ im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty)
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore') # suppress jit trace warning
+ tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
+ except Exception as e:
+ print(f'WARNING: TensorBoard graph visualization failure {e}')
+
+
+def web_project_name(project):
+ # Convert local project name to web project name
+ if not project.startswith('runs/train'):
+ return project
+ suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else ''
+ return f'YOLOv5{suffix}'
diff --git a/yolov5/utils/loggers/clearml/README.md b/yolov5/utils/loggers/clearml/README.md
new file mode 100644
index 0000000..64eef6b
--- /dev/null
+++ b/yolov5/utils/loggers/clearml/README.md
@@ -0,0 +1,222 @@
+# ClearML Integration
+
+
+
+## About ClearML
+
+[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️.
+
+🔨 Track every YOLOv5 training run in the experiment manager
+
+🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool
+
+🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent
+
+🔬 Get the very best mAP using ClearML Hyperparameter Optimization
+
+🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving
+
+
+And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
+
+
+
+![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif)
+
+
+
+
+
+## 🦾 Setting Things Up
+
+To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
+
+Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go!
+
+1. Install the `clearml` python package:
+
+ ```bash
+ pip install clearml
+ ```
+
+1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions:
+
+ ```bash
+ clearml-init
+ ```
+
+That's it! You're done 😎
+
+
+
+## 🚀 Training YOLOv5 With ClearML
+
+To enable ClearML experiment tracking, simply install the ClearML pip package.
+
+```bash
+pip install clearml
+```
+
+This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py`
+
+```bash
+python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
+```
+
+This will capture:
+- Source code + uncommitted changes
+- Installed packages
+- (Hyper)parameters
+- Model files (use `--save-period n` to save a checkpoint every n epochs)
+- Console output
+- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...)
+- General info such as machine details, runtime, creation date etc.
+- All produced plots such as label correlogram and confusion matrix
+- Images with bounding boxes per epoch
+- Mosaic per epoch
+- Validation images per epoch
+- ...
+
+That's a lot right? 🤯
+Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
+
+There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
+
+
+
+## 🔗 Dataset Version Management
+
+Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
+
+![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif)
+
+### Prepare Your Dataset
+
+The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure:
+
+```
+..
+|_ yolov5
+|_ datasets
+ |_ coco128
+ |_ images
+ |_ labels
+ |_ LICENSE
+ |_ README.txt
+```
+But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure.
+
+Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls.
+
+Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`.
+
+```
+..
+|_ yolov5
+|_ datasets
+ |_ coco128
+ |_ images
+ |_ labels
+ |_ coco128.yaml # <---- HERE!
+ |_ LICENSE
+ |_ README.txt
+```
+
+### Upload Your Dataset
+
+To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command:
+```bash
+cd coco128
+clearml-data sync --project YOLOv5 --name coco128 --folder .
+```
+
+The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other:
+```bash
+# Optionally add --parent if you want to base
+# this version on another dataset version, so no duplicate files are uploaded!
+clearml-data create --name coco128 --project YOLOv5
+clearml-data add --files .
+clearml-data close
+```
+
+### Run Training Using A ClearML Dataset
+
+Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models!
+
+```bash
+python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache
+```
+
+
+
+## 👀 Hyperparameter Optimization
+
+Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!
+
+Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does!
+
+To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters.
+
+You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead.
+
+```bash
+# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch
+pip install optuna
+python utils/loggers/clearml/hpo.py
+```
+
+![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png)
+
+## 🤯 Remote Execution (advanced)
+
+Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs.
+This is where the ClearML Agent comes into play. Check out what the agent can do here:
+
+- [YouTube video](https://youtu.be/MX3BrXnaULs)
+- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent)
+
+In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager.
+
+You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running:
+```bash
+clearml-agent daemon --queue [--docker]
+```
+
+### Cloning, Editing And Enqueuing
+
+With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too!
+
+🪄 Clone the experiment by right clicking it
+
+🎯 Edit the hyperparameters to what you wish them to be
+
+⏳ Enqueue the task to any of the queues by right clicking it
+
+![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif)
+
+### Executing A Task Remotely
+
+Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on!
+
+To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated:
+```python
+# ...
+# Loggers
+data_dict = None
+if RANK in {-1, 0}:
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
+ if loggers.clearml:
+ loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE
+ # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML
+ data_dict = loggers.clearml.data_dict
+# ...
+```
+When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead!
+
+### Autoscaling workers
+
+ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying!
+
+Check out the autoscalers getting started video below.
+
+[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E)
diff --git a/yolov5/utils/loggers/clearml/__init__.py b/yolov5/utils/loggers/clearml/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/yolov5/utils/loggers/clearml/clearml_utils.py b/yolov5/utils/loggers/clearml/clearml_utils.py
new file mode 100644
index 0000000..1cc8e42
--- /dev/null
+++ b/yolov5/utils/loggers/clearml/clearml_utils.py
@@ -0,0 +1,155 @@
+"""Main Logger class for ClearML experiment tracking."""
+import glob
+import re
+from pathlib import Path
+
+import numpy as np
+import yaml
+from yolov5.utils.plots import Annotator, colors
+
+try:
+ import clearml
+ from clearml import Dataset, Task
+ assert hasattr(clearml, '__version__') # verify package import not local dir
+except (ImportError, AssertionError):
+ clearml = None
+
+
+def construct_dataset(clearml_info_string):
+ """Load in a clearml dataset and fill the internal data_dict with its contents.
+ """
+ dataset_id = clearml_info_string.replace('clearml://', '')
+ dataset = Dataset.get(dataset_id=dataset_id)
+ dataset_root_path = Path(dataset.get_local_copy())
+
+ # We'll search for the yaml file definition in the dataset
+ yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml")))
+ if len(yaml_filenames) > 1:
+ raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '
+ 'the dataset definition this way.')
+ elif len(yaml_filenames) == 0:
+ raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '
+ 'inside the dataset root path.')
+ with open(yaml_filenames[0]) as f:
+ dataset_definition = yaml.safe_load(f)
+
+ assert set(dataset_definition.keys()).issuperset(
+ {'train', 'test', 'val', 'nc', 'names'}
+ ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
+
+ data_dict = dict()
+ data_dict['train'] = str(
+ (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None
+ data_dict['test'] = str(
+ (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None
+ data_dict['val'] = str(
+ (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None
+ data_dict['nc'] = dataset_definition['nc']
+ data_dict['names'] = dataset_definition['names']
+
+ return data_dict
+
+
+class ClearmlLogger:
+ """Log training runs, datasets, models, and predictions to ClearML.
+
+ This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,
+ this information includes hyperparameters, system configuration and metrics, model metrics, code information and
+ basic data metrics and analyses.
+
+ By providing additional command line arguments to train.py, datasets,
+ models and predictions can also be logged.
+ """
+
+ def __init__(self, opt, hyp):
+ """
+ - Initialize ClearML Task, this object will capture the experiment
+ - Upload dataset version to ClearML Data if opt.upload_dataset is True
+
+ arguments:
+ opt (namespace) -- Commandline arguments for this run
+ hyp (dict) -- Hyperparameters for this run
+
+ """
+ self.current_epoch = 0
+ # Keep tracked of amount of logged images to enforce a limit
+ self.current_epoch_logged_images = set()
+ # Maximum number of images to log to clearML per epoch
+ self.max_imgs_to_log_per_epoch = 16
+ # Get the interval of epochs when bounding box images should be logged
+ self.bbox_interval = opt.bbox_interval
+ self.clearml = clearml
+ self.task = None
+ self.data_dict = None
+ if self.clearml:
+ self.task = Task.init(
+ project_name='YOLOv5',
+ task_name='training',
+ tags=['YOLOv5'],
+ output_uri=True,
+ auto_connect_frameworks={'pytorch': False}
+ # We disconnect pytorch auto-detection, because we added manual model save points in the code
+ )
+ # ClearML's hooks will already grab all general parameters
+ # Only the hyperparameters coming from the yaml config file
+ # will have to be added manually!
+ self.task.connect(hyp, name='Hyperparameters')
+
+ # Get ClearML Dataset Version if requested
+ if opt.data.startswith('clearml://'):
+ # data_dict should have the following keys:
+ # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)
+ self.data_dict = construct_dataset(opt.data)
+ # Set data to data_dict because wandb will crash without this information and opt is the best way
+ # to give it to them
+ opt.data = self.data_dict
+
+ def log_debug_samples(self, files, title='Debug Samples'):
+ """
+ Log files (images) as debug samples in the ClearML task.
+
+ arguments:
+ files (List(PosixPath)) a list of file paths in PosixPath format
+ title (str) A title that groups together images with the same values
+ """
+ for f in files:
+ if f.exists():
+ it = re.search(r'_batch(\d+)', f.name)
+ iteration = int(it.groups()[0]) if it else 0
+ self.task.get_logger().report_image(title=title,
+ series=f.name.replace(it.group(), ''),
+ local_path=str(f),
+ iteration=iteration)
+
+ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):
+ """
+ Draw the bounding boxes on a single image and report the result as a ClearML debug sample.
+
+ arguments:
+ image_path (PosixPath) the path the original image file
+ boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
+ class_names (dict): dict containing mapping of class int to class name
+ image (Tensor): A torch tensor containing the actual image data
+ """
+ if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
+ # Log every bbox_interval times and deduplicate for any intermittend extra eval runs
+ if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
+ im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
+ annotator = Annotator(im=im, pil=True)
+ for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
+ color = colors(i)
+
+ class_name = class_names[int(class_nr)]
+ confidence_percentage = round(float(conf) * 100, 2)
+ label = f"{class_name}: {confidence_percentage}%"
+
+ if conf > conf_threshold:
+ annotator.rectangle(box.cpu().numpy(), outline=color)
+ annotator.box_label(box.cpu().numpy(), label=label, color=color)
+
+ annotated_image = annotator.result()
+ self.task.get_logger().report_image(title='Bounding Boxes',
+ series=image_path.name,
+ iteration=self.current_epoch,
+ image=annotated_image)
+ self.current_epoch_logged_images.add(image_path)
diff --git a/yolov5/utils/loggers/clearml/hpo.py b/yolov5/utils/loggers/clearml/hpo.py
new file mode 100644
index 0000000..ee518b0
--- /dev/null
+++ b/yolov5/utils/loggers/clearml/hpo.py
@@ -0,0 +1,84 @@
+from clearml import Task
+# Connecting ClearML with the current process,
+# from here on everything is logged automatically
+from clearml.automation import HyperParameterOptimizer, UniformParameterRange
+from clearml.automation.optuna import OptimizerOptuna
+
+task = Task.init(project_name='Hyper-Parameter Optimization',
+ task_name='YOLOv5',
+ task_type=Task.TaskTypes.optimizer,
+ reuse_last_task_id=False)
+
+# Example use case:
+optimizer = HyperParameterOptimizer(
+ # This is the experiment we want to optimize
+ base_task_id='',
+ # here we define the hyper-parameters to optimize
+ # Notice: The parameter name should exactly match what you see in the UI: /
+ # For Example, here we see in the base experiment a section Named: "General"
+ # under it a parameter named "batch_size", this becomes "General/batch_size"
+ # If you have `argparse` for example, then arguments will appear under the "Args" section,
+ # and you should instead pass "Args/batch_size"
+ hyper_parameters=[
+ UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1),
+ UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0),
+ UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98),
+ UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001),
+ UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0),
+ UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95),
+ UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2),
+ UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2),
+ UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0),
+ UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0),
+ UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0),
+ UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0),
+ UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7),
+ UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0),
+ UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0),
+ UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1),
+ UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9),
+ UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9),
+ UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0),
+ UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9),
+ UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9),
+ UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0),
+ UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001),
+ UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0),
+ UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0),
+ UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0),
+ UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0),
+ UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)],
+ # this is the objective metric we want to maximize/minimize
+ objective_metric_title='metrics',
+ objective_metric_series='mAP_0.5',
+ # now we decide if we want to maximize it or minimize it (accuracy we maximize)
+ objective_metric_sign='max',
+ # let us limit the number of concurrent experiments,
+ # this in turn will make sure we do dont bombard the scheduler with experiments.
+ # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
+ max_number_of_concurrent_tasks=1,
+ # this is the optimizer class (actually doing the optimization)
+ # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
+ optimizer_class=OptimizerOptuna,
+ # If specified only the top K performing Tasks will be kept, the others will be automatically archived
+ save_top_k_tasks_only=5, # 5,
+ compute_time_limit=None,
+ total_max_jobs=20,
+ min_iteration_per_job=None,
+ max_iteration_per_job=None,
+)
+
+# report every 10 seconds, this is way too often, but we are testing here
+optimizer.set_report_period(10 / 60)
+# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
+# an_optimizer.start_locally(job_complete_callback=job_complete_callback)
+# set the time limit for the optimization process (2 hours)
+optimizer.set_time_limit(in_minutes=120.0)
+# Start the optimization process in the local environment
+optimizer.start_locally()
+# wait until process is done (notice we are controlling the optimization process in the background)
+optimizer.wait()
+# make sure background optimization stopped
+optimizer.stop()
+
+print('We are done, good bye')
diff --git a/yolov5/utils/loggers/comet/README.md b/yolov5/utils/loggers/comet/README.md
new file mode 100644
index 0000000..7b0b8e0
--- /dev/null
+++ b/yolov5/utils/loggers/comet/README.md
@@ -0,0 +1,256 @@
+
+
+# YOLOv5 with Comet
+
+This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)
+
+# About Comet
+
+Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models.
+
+Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)!
+Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!
+
+# Getting Started
+
+## Install Comet
+
+```shell
+pip install comet_ml
+```
+
+## Configure Comet Credentials
+
+There are two ways to configure Comet with YOLOv5.
+
+You can either set your credentials through enviroment variables
+
+**Environment Variables**
+
+```shell
+export COMET_API_KEY=
+export COMET_PROJECT_NAME= # This will default to 'yolov5'
+```
+
+Or create a `.comet.config` file in your working directory and set your credentials there.
+
+**Comet Configuration File**
+
+```
+[comet]
+api_key=
+project_name= # This will default to 'yolov5'
+```
+
+## Run the Training Script
+
+```shell
+# Train YOLOv5s on COCO128 for 5 epochs
+python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt
+```
+
+That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI
+
+
+
+# Try out an Example!
+Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)
+
+Or better yet, try it out yourself in this Colab Notebook
+
+[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)
+
+# Log automatically
+
+By default, Comet will log the following items
+
+## Metrics
+- Box Loss, Object Loss, Classification Loss for the training and validation data
+- mAP_0.5, mAP_0.5:0.95 metrics for the validation data.
+- Precision and Recall for the validation data
+
+## Parameters
+
+- Model Hyperparameters
+- All parameters passed through the command line options
+
+## Visualizations
+
+- Confusion Matrix of the model predictions on the validation data
+- Plots for the PR and F1 curves across all classes
+- Correlogram of the Class Labels
+
+# Configure Comet Logging
+
+Comet can be configured to log additional data either through command line flags passed to the training script
+or through environment variables.
+
+```shell
+export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online
+export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5
+export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true
+export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100.
+export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false
+export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt'
+export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false.
+export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions
+```
+
+## Logging Checkpoints with Comet
+
+Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the
+logged checkpoints to Comet based on the interval value provided by `save-period`
+
+```shell
+python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data coco128.yaml \
+--weights yolov5s.pt \
+--save-period 1
+```
+
+## Logging Model Predictions
+
+By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet.
+
+You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch.
+
+**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly.
+
+Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)
+
+
+```shell
+python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data coco128.yaml \
+--weights yolov5s.pt \
+--bbox_interval 2
+```
+
+### Controlling the number of Prediction Images logged to Comet
+
+When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable.
+
+```shell
+env COMET_MAX_IMAGE_UPLOADS=200 python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data coco128.yaml \
+--weights yolov5s.pt \
+--bbox_interval 1
+```
+
+### Logging Class Level Metrics
+
+Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class.
+
+```shell
+env COMET_LOG_PER_CLASS_METRICS=true python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data coco128.yaml \
+--weights yolov5s.pt
+```
+
+## Uploading a Dataset to Comet Artifacts
+
+If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag.
+
+The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
+
+```shell
+python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data coco128.yaml \
+--weights yolov5s.pt \
+--upload_dataset
+```
+
+You can find the uploaded dataset in the Artifacts tab in your Comet Workspace
+
+
+You can preview the data directly in the Comet UI.
+
+
+Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file
+
+
+### Using a saved Artifact
+
+If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL.
+
+```
+# contents of artifact.yaml file
+path: "comet:///:"
+```
+Then pass this file to your training script in the following way
+
+```shell
+python train.py \
+--img 640 \
+--batch 16 \
+--epochs 5 \
+--data artifact.yaml \
+--weights yolov5s.pt
+```
+
+Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset.
+
+
+## Resuming a Training Run
+
+If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path.
+
+The Run Path has the following format `comet:////`.
+
+This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI
+
+```shell
+python train.py \
+--resume "comet://"
+```
+
+## Hyperparameter Search with the Comet Optimizer
+
+YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI.
+
+### Configuring an Optimizer Sweep
+
+To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json`
+
+```shell
+python utils/loggers/comet/hpo.py \
+ --comet_optimizer_config "utils/loggers/comet/optimizer_config.json"
+```
+
+The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after
+the script.
+
+```shell
+python utils/loggers/comet/hpo.py \
+ --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \
+ --save-period 1 \
+ --bbox_interval 1
+```
+
+### Running a Sweep in Parallel
+
+```shell
+comet optimizer -j utils/loggers/comet/hpo.py \
+ utils/loggers/comet/optimizer_config.json"
+```
+
+### Visualizing Results
+
+Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)
+
+
\ No newline at end of file
diff --git a/yolov5/utils/loggers/comet/__init__.py b/yolov5/utils/loggers/comet/__init__.py
new file mode 100644
index 0000000..0170d0a
--- /dev/null
+++ b/yolov5/utils/loggers/comet/__init__.py
@@ -0,0 +1,493 @@
+import glob
+import json
+import logging
+import os
+import sys
+from pathlib import Path
+
+logger = logging.getLogger(__name__)
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[3] # YOLOv5 root directory
+
+try:
+ import comet_ml
+
+ # Project Configuration
+ config = comet_ml.config.get_config()
+ COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
+except (ModuleNotFoundError, ImportError):
+ comet_ml = None
+ COMET_PROJECT_NAME = None
+
+import torch
+import torchvision.transforms as T
+import yaml
+from yolov5.utils.dataloaders import img2label_paths
+from yolov5.utils.general import check_dataset, scale_coords, xywh2xyxy
+from yolov5.utils.metrics import box_iou
+
+COMET_PREFIX = "comet://"
+
+COMET_MODE = os.getenv("COMET_MODE", "online")
+
+# Model Saving Settings
+COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
+
+# Dataset Artifact Settings
+COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
+
+# Evaluation Settings
+COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
+COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
+COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
+
+# Confusion Matrix Settings
+CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
+IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
+
+# Batch Logging Settings
+COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
+COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
+COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1)
+COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
+
+RANK = int(os.getenv("RANK", -1))
+
+to_pil = T.ToPILImage()
+
+
+class CometLogger:
+ """Log metrics, parameters, source code, models and much more
+ with Comet
+ """
+
+ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None:
+ self.job_type = job_type
+ self.opt = opt
+ self.hyp = hyp
+
+ # Comet Flags
+ self.comet_mode = COMET_MODE
+
+ self.save_model = opt.save_period > -1
+ self.model_name = COMET_MODEL_NAME
+
+ # Batch Logging Settings
+ self.log_batch_metrics = COMET_LOG_BATCH_METRICS
+ self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
+
+ # Dataset Artifact Settings
+ self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET
+ self.resume = self.opt.resume
+
+ # Default parameters to pass to Experiment objects
+ self.default_experiment_kwargs = {
+ "log_code": False,
+ "log_env_gpu": True,
+ "log_env_cpu": True,
+ "project_name": COMET_PROJECT_NAME,}
+ self.default_experiment_kwargs.update(experiment_kwargs)
+ self.experiment = self._get_experiment(self.comet_mode, run_id)
+
+ self.data_dict = self.check_dataset(self.opt.data)
+ self.class_names = self.data_dict["names"]
+ self.num_classes = self.data_dict["nc"]
+
+ self.logged_images_count = 0
+ self.max_images = COMET_MAX_IMAGE_UPLOADS
+
+ if run_id is None:
+ self.experiment.log_other("Created from", "YOLOv5")
+ if not isinstance(self.experiment, comet_ml.OfflineExperiment):
+ workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:]
+ self.experiment.log_other(
+ "Run Path",
+ f"{workspace}/{project_name}/{experiment_id}",
+ )
+ self.log_parameters(vars(opt))
+ self.log_parameters(self.opt.hyp)
+ self.log_asset_data(
+ self.opt.hyp,
+ name="hyperparameters.json",
+ metadata={"type": "hyp-config-file"},
+ )
+ self.log_asset(
+ f"{self.opt.save_dir}/opt.yaml",
+ metadata={"type": "opt-config-file"},
+ )
+
+ self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
+
+ if hasattr(self.opt, "conf_thres"):
+ self.conf_thres = self.opt.conf_thres
+ else:
+ self.conf_thres = CONF_THRES
+ if hasattr(self.opt, "iou_thres"):
+ self.iou_thres = self.opt.iou_thres
+ else:
+ self.iou_thres = IOU_THRES
+
+ self.comet_log_predictions = COMET_LOG_PREDICTIONS
+ if self.opt.bbox_interval == -1:
+ self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
+ else:
+ self.comet_log_prediction_interval = self.opt.bbox_interval
+
+ if self.comet_log_predictions:
+ self.metadata_dict = {}
+
+ self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
+
+ self.experiment.log_others({
+ "comet_mode": COMET_MODE,
+ "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
+ "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
+ "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
+ "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
+ "comet_model_name": COMET_MODEL_NAME,})
+
+ # Check if running the Experiment with the Comet Optimizer
+ if hasattr(self.opt, "comet_optimizer_id"):
+ self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id)
+ self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective)
+ self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric)
+ self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp))
+
+ def _get_experiment(self, mode, experiment_id=None):
+ if mode == "offline":
+ if experiment_id is not None:
+ return comet_ml.ExistingOfflineExperiment(
+ previous_experiment=experiment_id,
+ **self.default_experiment_kwargs,
+ )
+
+ return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,)
+
+ else:
+ try:
+ if experiment_id is not None:
+ return comet_ml.ExistingExperiment(
+ previous_experiment=experiment_id,
+ **self.default_experiment_kwargs,
+ )
+
+ return comet_ml.Experiment(**self.default_experiment_kwargs)
+
+ except ValueError:
+ logger.warning("COMET WARNING: "
+ "Comet credentials have not been set. "
+ "Comet will default to offline logging. "
+ "Please set your credentials to enable online logging.")
+ return self._get_experiment("offline", experiment_id)
+
+ return
+
+ def log_metrics(self, log_dict, **kwargs):
+ self.experiment.log_metrics(log_dict, **kwargs)
+
+ def log_parameters(self, log_dict, **kwargs):
+ self.experiment.log_parameters(log_dict, **kwargs)
+
+ def log_asset(self, asset_path, **kwargs):
+ self.experiment.log_asset(asset_path, **kwargs)
+
+ def log_asset_data(self, asset, **kwargs):
+ self.experiment.log_asset_data(asset, **kwargs)
+
+ def log_image(self, img, **kwargs):
+ self.experiment.log_image(img, **kwargs)
+
+ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+ if not self.save_model:
+ return
+
+ model_metadata = {
+ "fitness_score": fitness_score[-1],
+ "epochs_trained": epoch + 1,
+ "save_period": opt.save_period,
+ "total_epochs": opt.epochs,}
+
+ model_files = glob.glob(f"{path}/*.pt")
+ for model_path in model_files:
+ name = Path(model_path).name
+
+ self.experiment.log_model(
+ self.model_name,
+ file_or_folder=model_path,
+ file_name=name,
+ metadata=model_metadata,
+ overwrite=True,
+ )
+
+ def check_dataset(self, data_file):
+ with open(data_file) as f:
+ data_config = yaml.safe_load(f)
+
+ if data_config['path'].startswith(COMET_PREFIX):
+ path = data_config['path'].replace(COMET_PREFIX, "")
+ data_dict = self.download_dataset_artifact(path)
+
+ return data_dict
+
+ self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
+
+ return check_dataset(data_file)
+
+ def log_predictions(self, image, labelsn, path, shape, predn):
+ if self.logged_images_count >= self.max_images:
+ return
+ detections = predn[predn[:, 4] > self.conf_thres]
+ iou = box_iou(labelsn[:, 1:], detections[:, :4])
+ mask, _ = torch.where(iou > self.iou_thres)
+ if len(mask) == 0:
+ return
+
+ filtered_detections = detections[mask]
+ filtered_labels = labelsn[mask]
+
+ processed_image = (image * 255).to(torch.uint8)
+
+ image_id = path.split("/")[-1].split(".")[0]
+ image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
+ self.log_image(to_pil(processed_image), name=image_name)
+
+ metadata = []
+ for cls, *xyxy in filtered_labels.tolist():
+ metadata.append({
+ "label": f"{self.class_names[int(cls)]}-gt",
+ "score": 100,
+ "box": {
+ "x": xyxy[0],
+ "y": xyxy[1],
+ "x2": xyxy[2],
+ "y2": xyxy[3]},})
+ for *xyxy, conf, cls in filtered_detections.tolist():
+ metadata.append({
+ "label": f"{self.class_names[int(cls)]}",
+ "score": conf * 100,
+ "box": {
+ "x": xyxy[0],
+ "y": xyxy[1],
+ "x2": xyxy[2],
+ "y2": xyxy[3]},})
+
+ self.metadata_dict[image_name] = metadata
+ self.logged_images_count += 1
+
+ return
+
+ def preprocess_prediction(self, image, labels, shape, pred):
+ nl, _ = labels.shape[0], pred.shape[0]
+
+ # Predictions
+ if self.opt.single_cls:
+ pred[:, 5] = 0
+
+ predn = pred.clone()
+ scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1])
+
+ labelsn = None
+ if nl:
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
+ scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
+ scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred
+
+ return predn, labelsn
+
+ def add_assets_to_artifact(self, artifact, path, asset_path, split):
+ img_paths = sorted(glob.glob(f"{asset_path}/*"))
+ label_paths = img2label_paths(img_paths)
+
+ for image_file, label_file in zip(img_paths, label_paths):
+ image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
+
+ try:
+ artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split})
+ artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split})
+ except ValueError as e:
+ logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
+ logger.error(f"COMET ERROR: {e}")
+ continue
+
+ return artifact
+
+ def upload_dataset_artifact(self):
+ dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
+ path = str((ROOT / Path(self.data_dict["path"])).resolve())
+
+ metadata = self.data_dict.copy()
+ for key in ["train", "val", "test"]:
+ split_path = metadata.get(key)
+ if split_path is not None:
+ metadata[key] = split_path.replace(path, "")
+
+ artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata)
+ for key in metadata.keys():
+ if key in ["train", "val", "test"]:
+ if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):
+ continue
+
+ asset_path = self.data_dict.get(key)
+ if asset_path is not None:
+ artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)
+
+ self.experiment.log_artifact(artifact)
+
+ return
+
+ def download_dataset_artifact(self, artifact_path):
+ logged_artifact = self.experiment.get_artifact(artifact_path)
+ artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
+ logged_artifact.download(artifact_save_dir)
+
+ metadata = logged_artifact.metadata
+ data_dict = metadata.copy()
+ data_dict["path"] = artifact_save_dir
+ data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()}
+
+ data_dict = self.update_data_paths(data_dict)
+ return data_dict
+
+ def update_data_paths(self, data_dict):
+ path = data_dict.get("path", "")
+
+ for split in ["train", "val", "test"]:
+ if data_dict.get(split):
+ split_path = data_dict.get(split)
+ data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [
+ f"{path}/{x}" for x in split_path])
+
+ return data_dict
+
+ def on_pretrain_routine_end(self, paths):
+ if self.opt.resume:
+ return
+
+ for path in paths:
+ self.log_asset(str(path))
+
+ if self.upload_dataset:
+ if not self.resume:
+ self.upload_dataset_artifact()
+
+ return
+
+ def on_train_start(self):
+ self.log_parameters(self.hyp)
+
+ def on_train_epoch_start(self):
+ return
+
+ def on_train_epoch_end(self, epoch):
+ self.experiment.curr_epoch = epoch
+
+ return
+
+ def on_train_batch_start(self):
+ return
+
+ def on_train_batch_end(self, log_dict, step):
+ self.experiment.curr_step = step
+ if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
+ self.log_metrics(log_dict, step=step)
+
+ return
+
+ def on_train_end(self, files, save_dir, last, best, epoch, results):
+ if self.comet_log_predictions:
+ curr_epoch = self.experiment.curr_epoch
+ self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch)
+
+ for f in files:
+ self.log_asset(f, metadata={"epoch": epoch})
+ self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
+
+ if not self.opt.evolve:
+ model_path = str(best if best.exists() else last)
+ name = Path(model_path).name
+ if self.save_model:
+ self.experiment.log_model(
+ self.model_name,
+ file_or_folder=model_path,
+ file_name=name,
+ overwrite=True,
+ )
+
+ # Check if running Experiment with Comet Optimizer
+ if hasattr(self.opt, 'comet_optimizer_id'):
+ metric = results.get(self.opt.comet_optimizer_metric)
+ self.experiment.log_other('optimizer_metric_value', metric)
+
+ self.finish_run()
+
+ def on_val_start(self):
+ return
+
+ def on_val_batch_start(self):
+ return
+
+ def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
+ if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
+ return
+
+ for si, pred in enumerate(outputs):
+ if len(pred) == 0:
+ continue
+
+ image = images[si]
+ labels = targets[targets[:, 0] == si, 1:]
+ shape = shapes[si]
+ path = paths[si]
+ predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)
+ if labelsn is not None:
+ self.log_predictions(image, labelsn, path, shape, predn)
+
+ return
+
+ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
+ if self.comet_log_per_class_metrics:
+ if self.num_classes > 1:
+ for i, c in enumerate(ap_class):
+ class_name = self.class_names[c]
+ self.experiment.log_metrics(
+ {
+ 'mAP@.5': ap50[i],
+ 'mAP@.5:.95': ap[i],
+ 'precision': p[i],
+ 'recall': r[i],
+ 'f1': f1[i],
+ 'true_positives': tp[i],
+ 'false_positives': fp[i],
+ 'support': nt[c]},
+ prefix=class_name)
+
+ if self.comet_log_confusion_matrix:
+ epoch = self.experiment.curr_epoch
+ class_names = list(self.class_names.values())
+ class_names.append("background")
+ num_classes = len(class_names)
+
+ self.experiment.log_confusion_matrix(
+ matrix=confusion_matrix.matrix,
+ max_categories=num_classes,
+ labels=class_names,
+ epoch=epoch,
+ column_label='Actual Category',
+ row_label='Predicted Category',
+ file_name=f"confusion-matrix-epoch-{epoch}.json",
+ )
+
+ def on_fit_epoch_end(self, result, epoch):
+ self.log_metrics(result, epoch=epoch)
+
+ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
+ if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
+ self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
+
+ def on_params_update(self, params):
+ self.log_parameters(params)
+
+ def finish_run(self):
+ self.experiment.end()
diff --git a/yolov5/utils/loggers/comet/comet_utils.py b/yolov5/utils/loggers/comet/comet_utils.py
new file mode 100644
index 0000000..3cbd451
--- /dev/null
+++ b/yolov5/utils/loggers/comet/comet_utils.py
@@ -0,0 +1,150 @@
+import logging
+import os
+from urllib.parse import urlparse
+
+try:
+ import comet_ml
+except (ModuleNotFoundError, ImportError):
+ comet_ml = None
+
+import yaml
+
+logger = logging.getLogger(__name__)
+
+COMET_PREFIX = "comet://"
+COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
+COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt")
+
+
+def download_model_checkpoint(opt, experiment):
+ model_dir = f"{opt.project}/{experiment.name}"
+ os.makedirs(model_dir, exist_ok=True)
+
+ model_name = COMET_MODEL_NAME
+ model_asset_list = experiment.get_model_asset_list(model_name)
+
+ if len(model_asset_list) == 0:
+ logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
+ return
+
+ model_asset_list = sorted(
+ model_asset_list,
+ key=lambda x: x["step"],
+ reverse=True,
+ )
+ logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
+
+ resource_url = urlparse(opt.weights)
+ checkpoint_filename = resource_url.query
+
+ if checkpoint_filename:
+ asset_id = logged_checkpoint_map.get(checkpoint_filename)
+ else:
+ asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
+ checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
+
+ if asset_id is None:
+ logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
+ return
+
+ try:
+ logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
+ asset_filename = checkpoint_filename
+
+ model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
+ model_download_path = f"{model_dir}/{asset_filename}"
+ with open(model_download_path, "wb") as f:
+ f.write(model_binary)
+
+ opt.weights = model_download_path
+
+ except Exception as e:
+ logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
+ logger.exception(e)
+
+
+def set_opt_parameters(opt, experiment):
+ """Update the opts Namespace with parameters
+ from Comet's ExistingExperiment when resuming a run
+
+ Args:
+ opt (argparse.Namespace): Namespace of command line options
+ experiment (comet_ml.APIExperiment): Comet API Experiment object
+ """
+ asset_list = experiment.get_asset_list()
+ resume_string = opt.resume
+
+ for asset in asset_list:
+ if asset["fileName"] == "opt.yaml":
+ asset_id = asset["assetId"]
+ asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
+ opt_dict = yaml.safe_load(asset_binary)
+ for key, value in opt_dict.items():
+ setattr(opt, key, value)
+ opt.resume = resume_string
+
+ # Save hyperparameters to YAML file
+ # Necessary to pass checks in training script
+ save_dir = f"{opt.project}/{experiment.name}"
+ os.makedirs(save_dir, exist_ok=True)
+
+ hyp_yaml_path = f"{save_dir}/hyp.yaml"
+ with open(hyp_yaml_path, "w") as f:
+ yaml.dump(opt.hyp, f)
+ opt.hyp = hyp_yaml_path
+
+
+def check_comet_weights(opt):
+ """Downloads model weights from Comet and updates the
+ weights path to point to saved weights location
+
+ Args:
+ opt (argparse.Namespace): Command Line arguments passed
+ to YOLOv5 training script
+
+ Returns:
+ None/bool: Return True if weights are successfully downloaded
+ else return None
+ """
+ if comet_ml is None:
+ return
+
+ if isinstance(opt.weights, str):
+ if opt.weights.startswith(COMET_PREFIX):
+ api = comet_ml.API()
+ resource = urlparse(opt.weights)
+ experiment_path = f"{resource.netloc}{resource.path}"
+ experiment = api.get(experiment_path)
+ download_model_checkpoint(opt, experiment)
+ return True
+
+ return None
+
+
+def check_comet_resume(opt):
+ """Restores run parameters to its original state based on the model checkpoint
+ and logged Experiment parameters.
+
+ Args:
+ opt (argparse.Namespace): Command Line arguments passed
+ to YOLOv5 training script
+
+ Returns:
+ None/bool: Return True if the run is restored successfully
+ else return None
+ """
+ if comet_ml is None:
+ return
+
+ if isinstance(opt.resume, str):
+ if opt.resume.startswith(COMET_PREFIX):
+ api = comet_ml.API()
+ resource = urlparse(opt.resume)
+ experiment_path = f"{resource.netloc}{resource.path}"
+ experiment = api.get(experiment_path)
+ set_opt_parameters(opt, experiment)
+ download_model_checkpoint(opt, experiment)
+
+ return True
+
+ return None
diff --git a/yolov5/utils/loggers/comet/hpo.py b/yolov5/utils/loggers/comet/hpo.py
new file mode 100644
index 0000000..0d9bbb8
--- /dev/null
+++ b/yolov5/utils/loggers/comet/hpo.py
@@ -0,0 +1,116 @@
+import argparse
+import json
+import logging
+import os
+import sys
+from pathlib import Path
+
+import comet_ml
+
+logger = logging.getLogger(__name__)
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[3] # YOLOv5 root directory
+
+from yolov5.train import parse_opt, train
+from yolov5.utils.callbacks import Callbacks
+from yolov5.utils.general import increment_path
+from yolov5.utils.torch_utils import select_device
+
+# Project Configuration
+config = comet_ml.config.get_config()
+COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
+
+
+def get_args(known=False):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
+ parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
+ parser.add_argument('--epochs', type=int, default=300, help='total training epochs')
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
+
+ # Weights & Biases arguments
+ parser.add_argument('--entity', default=None, help='W&B: Entity')
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
+
+ # Comet Arguments
+ parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.")
+ parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.")
+ parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.")
+ parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.")
+ parser.add_argument("--comet_optimizer_workers",
+ type=int,
+ default=1,
+ help="Comet: Number of Parallel Workers to use with the Comet Optimizer.")
+
+ return parser.parse_known_args()[0] if known else parser.parse_args()
+
+
+def run(parameters, opt):
+ hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]}
+
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
+ opt.batch_size = parameters.get("batch_size")
+ opt.epochs = parameters.get("epochs")
+
+ device = select_device(opt.device, batch_size=opt.batch_size)
+ train(hyp_dict, opt, device, callbacks=Callbacks())
+
+
+if __name__ == "__main__":
+ opt = get_args(known=True)
+
+ opt.weights = str(opt.weights)
+ opt.cfg = str(opt.cfg)
+ opt.data = str(opt.data)
+ opt.project = str(opt.project)
+
+ optimizer_id = os.getenv("COMET_OPTIMIZER_ID")
+ if optimizer_id is None:
+ with open(opt.comet_optimizer_config) as f:
+ optimizer_config = json.load(f)
+ optimizer = comet_ml.Optimizer(optimizer_config)
+ else:
+ optimizer = comet_ml.Optimizer(optimizer_id)
+
+ opt.comet_optimizer_id = optimizer.id
+ status = optimizer.status()
+
+ opt.comet_optimizer_objective = status["spec"]["objective"]
+ opt.comet_optimizer_metric = status["spec"]["metric"]
+
+ logger.info("COMET INFO: Starting Hyperparameter Sweep")
+ for parameter in optimizer.get_parameters():
+ run(parameter["parameters"], opt)
diff --git a/yolov5/utils/metrics.py b/yolov5/utils/metrics.py
index 9bf084c..7a8fb56 100644
--- a/yolov5/utils/metrics.py
+++ b/yolov5/utils/metrics.py
@@ -10,6 +10,7 @@
import matplotlib.pyplot as plt
import numpy as np
import torch
+from yolov5.utils import TryExcept, threaded
def fitness(x):
@@ -26,7 +27,7 @@ def smooth(y, f=0.05):
return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed
-def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16):
+def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
@@ -81,10 +82,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
names = dict(enumerate(names)) # to dict
if plot:
- plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
- plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
- plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
- plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
+ plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names)
+ plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1')
+ plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision')
+ plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall')
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
p, r, f1 = p[:, i], r[:, i], f1[:, i]
@@ -141,7 +142,7 @@ def process_batch(self, detections, labels):
"""
if detections is None:
gt_classes = labels.int()
- for i, gc in enumerate(gt_classes):
+ for gc in gt_classes:
self.matrix[self.nc, gc] += 1 # background FN
return
@@ -168,12 +169,12 @@ def process_batch(self, detections, labels):
if n and sum(j) == 1:
self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
- self.matrix[self.nc, gc] += 1 # background FP
+ self.matrix[self.nc, gc] += 1 # true background
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
- self.matrix[dc, self.nc] += 1 # background FN
+ self.matrix[dc, self.nc] += 1 # predicted background
def matrix(self):
return self.matrix
@@ -184,35 +185,36 @@ def tp_fp(self):
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
return tp[:-1], fp[:-1] # remove background class
+ @TryExcept('WARNING: ConfusionMatrix plot failure: ')
def plot(self, normalize=True, save_dir='', names=()):
- try:
- import seaborn as sn
-
- array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns
- array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
-
- fig = plt.figure(figsize=(12, 9), tight_layout=True)
- nc, nn = self.nc, len(names) # number of classes, names
- sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
- labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
- with warnings.catch_warnings():
- warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
- sn.heatmap(array,
- annot=nc < 30,
- annot_kws={
- "size": 8},
- cmap='Blues',
- fmt='.2f',
- square=True,
- vmin=0.0,
- xticklabels=names + ['background FP'] if labels else "auto",
- yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
- fig.axes[0].set_xlabel('True')
- fig.axes[0].set_ylabel('Predicted')
- fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
- plt.close()
- except Exception as e:
- print(f'WARNING: ConfusionMatrix plot failure: {e}')
+ import seaborn as sn
+
+ array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns
+ array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
+
+ fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
+ nc, nn = self.nc, len(names) # number of classes, names
+ sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
+ labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
+ ticklabels = (names + ['background']) if labels else "auto"
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
+ sn.heatmap(array,
+ ax=ax,
+ annot=nc < 30,
+ annot_kws={
+ "size": 8},
+ cmap='Blues',
+ fmt='.2f',
+ square=True,
+ vmin=0.0,
+ xticklabels=ticklabels,
+ yticklabels=ticklabels).set_facecolor((1, 1, 1))
+ ax.set_ylabel('True')
+ ax.set_ylabel('Predicted')
+ ax.set_title('Confusion Matrix')
+ fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
+ plt.close(fig)
def print(self):
for i in range(self.nc + 1):
@@ -319,6 +321,7 @@ def wh_iou(wh1, wh2, eps=1e-7):
# Plots ----------------------------------------------------------------------------------------------------------------
+@threaded
def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
# Precision-recall curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
@@ -335,11 +338,13 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
ax.set_ylabel('Precision')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
- plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ ax.set_title('Precision-Recall Curve')
fig.savefig(save_dir, dpi=250)
- plt.close()
+ plt.close(fig)
+@threaded
def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'):
# Metric-confidence curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
@@ -356,6 +361,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi
ax.set_ylabel(ylabel)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
- plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ ax.set_title(f'{ylabel}-Confidence Curve')
fig.savefig(save_dir, dpi=250)
- plt.close()
+ plt.close(fig)
diff --git a/yolov5/utils/plots.py b/yolov5/utils/plots.py
index 6bfad2b..f3188f2 100644
--- a/yolov5/utils/plots.py
+++ b/yolov5/utils/plots.py
@@ -3,6 +3,7 @@
Plotting utils
"""
+import contextlib
import math
import os
from copy import copy
@@ -17,11 +18,13 @@
import seaborn as sn
import torch
from PIL import Image, ImageDraw, ImageFont
-from yolov5.utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout,
- check_font, check_requirements, clip_coords,
- increment_path, is_ascii, threaded,
- try_except, xywh2xyxy, xyxy2xywh)
+from yolov5.utils import TryExcept, threaded
+from yolov5.utils.general import (CONFIG_DIR, FONT, LOGGER, check_font,
+ check_requirements, clip_coords,
+ increment_path, is_ascii, xywh2xyxy,
+ xyxy2xywh)
from yolov5.utils.metrics import fitness
+from yolov5.utils.segment.general import scale_image
# Settings
RANK = int(os.getenv('RANK', -1))
@@ -112,14 +115,67 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2
thickness=tf,
lineType=cv2.LINE_AA)
+ def masks(self, masks, colors, im_gpu=None, alpha=0.5):
+ """Plot masks at once.
+ Args:
+ masks (tensor): predicted masks on cuda, shape: [n, h, w]
+ colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
+ im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
+ alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
+ """
+ if self.pil:
+ # convert to numpy first
+ self.im = np.asarray(self.im).copy()
+ if im_gpu is None:
+ # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...)
+ if len(masks) == 0:
+ return
+ if isinstance(masks, torch.Tensor):
+ masks = torch.as_tensor(masks, dtype=torch.uint8)
+ masks = masks.permute(1, 2, 0).contiguous()
+ masks = masks.cpu().numpy()
+ # masks = np.ascontiguousarray(masks.transpose(1, 2, 0))
+ masks = scale_image(masks.shape[:2], masks, self.im.shape)
+ masks = np.asarray(masks, dtype=np.float32)
+ colors = np.asarray(colors, dtype=np.float32) # shape(n,3)
+ s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together
+ masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3)
+ self.im[:] = masks * alpha + self.im * (1 - s * alpha)
+ else:
+ if len(masks) == 0:
+ self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
+ colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
+ colors = colors[:, None, None] # shape(n,1,1,3)
+ masks = masks.unsqueeze(3) # shape(n,h,w,1)
+ masks_color = masks * (colors * alpha) # shape(n,h,w,3)
+
+ inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
+ mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
+
+ im_gpu = im_gpu.flip(dims=[0]) # flip channel
+ im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
+ im_gpu = im_gpu * inv_alph_masks[-1] + mcs
+ im_mask = (im_gpu * 255).byte().cpu().numpy()
+ self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape)
+ if self.pil:
+ # convert im back to PIL and update draw
+ self.fromarray(self.im)
+
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
- def text(self, xy, text, txt_color=(255, 255, 255)):
+ def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
# Add text to image (PIL-only)
- w, h = self.font.getsize(text) # text width, height
- self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
+ if anchor == 'bottom': # start y from font bottom
+ w, h = self.font.getsize(text) # text width, height
+ xy[1] += 1 - h
+ self.draw.text(xy, text, fill=txt_color, font=self.font)
+
+ def fromarray(self, im):
+ # Update self.im from a numpy array
+ self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
+ self.draw = ImageDraw.Draw(self.im)
def result(self):
# Return annotated image as array
@@ -149,6 +205,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec
ax[i].axis('off')
LOGGER.info(f'Saving {f}... ({n}/{channels})')
+ plt.title('Features')
plt.savefig(f, dpi=300, bbox_inches='tight')
plt.close()
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
@@ -176,27 +233,31 @@ def butter_lowpass(cutoff, fs, order):
return filtfilt(b, a, data) # forward-backward filter
-def output_to_target(output):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
+def output_to_target(output, max_det=300):
+ # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting
targets = []
for i, o in enumerate(output):
- for *box, conf, cls in o.cpu().numpy():
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
- return np.array(targets)
+ box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)
+ j = torch.full((conf.shape[0], 1), i)
+ targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))
+ return torch.cat(targets, 0).numpy()
@threaded
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
+def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
- if np.max(images[0]) <= 1:
- images *= 255 # de-normalise (optional)
+
+ max_size = 1920 # max image size
+ max_subplots = 16 # max image subplots, i.e. 4x4
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
+ if np.max(images[0]) <= 1:
+ images *= 255 # de-normalise (optional)
# Build Image
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
@@ -221,7 +282,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths:
- annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
+ annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0:
ti = targets[targets[:, 0] == i] # image targets
boxes = xywh2xyxy(ti[:, 2:6]).T
@@ -339,8 +400,7 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_
plt.savefig(f, dpi=300)
-@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
-@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
+@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
def plot_labels(labels, names=(), save_dir=Path('')):
# plot dataset labels
LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
@@ -357,14 +417,12 @@ def plot_labels(labels, names=(), save_dir=Path('')):
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- try: # color histogram bars by class
+ with contextlib.suppress(Exception): # color histogram bars by class
[y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
- except Exception:
- pass
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
- ax[0].set_xticklabels(names, rotation=90, fontsize=10)
+ ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
@@ -388,6 +446,35 @@ def plot_labels(labels, names=(), save_dir=Path('')):
plt.close()
+def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):
+ # Show classification image grid with labels (optional) and predictions (optional)
+ from yolov5.utils.augmentations import denormalize
+
+ names = names or [f'class{i}' for i in range(1000)]
+ blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im),
+ dim=0) # select batch index 0, block by channels
+ n = min(len(blocks), nmax) # number of plots
+ m = min(8, round(n ** 0.5)) # 8 x 8 default
+ fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols
+ ax = ax.ravel() if m > 1 else [ax]
+ # plt.subplots_adjust(wspace=0.05, hspace=0.05)
+ for i in range(n):
+ ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))
+ ax[i].axis('off')
+ if labels is not None:
+ s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '')
+ ax[i].set_title(s, fontsize=8, verticalalignment='top')
+ plt.savefig(f, dpi=300, bbox_inches='tight')
+ plt.close()
+ if verbose:
+ LOGGER.info(f"Saving {f}")
+ if labels is not None:
+ LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax]))
+ if pred is not None:
+ LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax]))
+ return f
+
+
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
# Plot evolve.csv hyp evolution results
evolve_csv = Path(evolve_csv)
diff --git a/yolov5/utils/segment/__init__.py b/yolov5/utils/segment/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/yolov5/utils/segment/augmentations.py b/yolov5/utils/segment/augmentations.py
new file mode 100644
index 0000000..169adde
--- /dev/null
+++ b/yolov5/utils/segment/augmentations.py
@@ -0,0 +1,104 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Image augmentation functions
+"""
+
+import math
+import random
+
+import cv2
+import numpy as np
+
+from ..augmentations import box_candidates
+from ..general import resample_segments, segment2box
+
+
+def mixup(im, labels, segments, im2, labels2, segments2):
+ # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
+ r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
+ im = (im * r + im2 * (1 - r)).astype(np.uint8)
+ labels = np.concatenate((labels, labels2), 0)
+ segments = np.concatenate((segments, segments2), 0)
+ return im, labels, segments
+
+
+def random_perspective(im,
+ targets=(),
+ segments=(),
+ degrees=10,
+ translate=.1,
+ scale=.1,
+ shear=10,
+ perspective=0.0,
+ border=(0, 0)):
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
+ # targets = [cls, xyxy]
+
+ height = im.shape[0] + border[0] * 2 # shape(h,w,c)
+ width = im.shape[1] + border[1] * 2
+
+ # Center
+ C = np.eye(3)
+ C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
+ C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
+
+ # Perspective
+ P = np.eye(3)
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
+
+ # Rotation and Scale
+ R = np.eye(3)
+ a = random.uniform(-degrees, degrees)
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
+ s = random.uniform(1 - scale, 1 + scale)
+ # s = 2 ** random.uniform(-scale, scale)
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
+
+ # Shear
+ S = np.eye(3)
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
+
+ # Translation
+ T = np.eye(3)
+ T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
+ T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
+
+ # Combined rotation matrix
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
+ if perspective:
+ im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
+ else: # affine
+ im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
+
+ # Visualize
+ # import matplotlib.pyplot as plt
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
+ # ax[0].imshow(im[:, :, ::-1]) # base
+ # ax[1].imshow(im2[:, :, ::-1]) # warped
+
+ # Transform label coordinates
+ n = len(targets)
+ new_segments = []
+ if n:
+ new = np.zeros((n, 4))
+ segments = resample_segments(segments) # upsample
+ for i, segment in enumerate(segments):
+ xy = np.ones((len(segment), 3))
+ xy[:, :2] = segment
+ xy = xy @ M.T # transform
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine
+
+ # clip
+ new[i] = segment2box(xy, width, height)
+ new_segments.append(xy)
+
+ # filter candidates
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
+ targets = targets[i]
+ targets[:, 1:5] = new[i]
+ new_segments = np.array(new_segments)[i]
+
+ return im, targets, new_segments
diff --git a/yolov5/utils/segment/dataloaders.py b/yolov5/utils/segment/dataloaders.py
new file mode 100644
index 0000000..f6fe642
--- /dev/null
+++ b/yolov5/utils/segment/dataloaders.py
@@ -0,0 +1,330 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Dataloaders
+"""
+
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, distributed
+
+from ..augmentations import augment_hsv, copy_paste, letterbox
+from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker
+from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn
+from ..torch_utils import torch_distributed_zero_first
+from .augmentations import mixup, random_perspective
+
+
+def create_dataloader(path,
+ imgsz,
+ batch_size,
+ stride,
+ single_cls=False,
+ hyp=None,
+ augment=False,
+ cache=False,
+ pad=0.0,
+ rect=False,
+ rank=-1,
+ workers=8,
+ image_weights=False,
+ quad=False,
+ prefix='',
+ shuffle=False,
+ mask_downsample_ratio=1,
+ overlap_mask=False):
+ if rect and shuffle:
+ LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
+ shuffle = False
+ with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
+ dataset = LoadImagesAndLabelsAndMasks(
+ path,
+ imgsz,
+ batch_size,
+ augment=augment, # augmentation
+ hyp=hyp, # hyperparameters
+ rect=rect, # rectangular batches
+ cache_images=cache,
+ single_cls=single_cls,
+ stride=int(stride),
+ pad=pad,
+ image_weights=image_weights,
+ prefix=prefix,
+ downsample_ratio=mask_downsample_ratio,
+ overlap=overlap_mask)
+
+ batch_size = min(batch_size, len(dataset))
+ nd = torch.cuda.device_count() # number of CUDA devices
+ nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
+ sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
+ loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
+ # generator = torch.Generator()
+ # generator.manual_seed(0)
+ return loader(
+ dataset,
+ batch_size=batch_size,
+ shuffle=shuffle and sampler is None,
+ num_workers=nw,
+ sampler=sampler,
+ pin_memory=True,
+ collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,
+ worker_init_fn=seed_worker,
+ # generator=generator,
+ ), dataset
+
+
+class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing
+
+ def __init__(
+ self,
+ path,
+ img_size=640,
+ batch_size=16,
+ augment=False,
+ hyp=None,
+ rect=False,
+ image_weights=False,
+ cache_images=False,
+ single_cls=False,
+ stride=32,
+ pad=0,
+ prefix="",
+ downsample_ratio=1,
+ overlap=False,
+ ):
+ super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls,
+ stride, pad, prefix)
+ self.downsample_ratio = downsample_ratio
+ self.overlap = overlap
+
+ def __getitem__(self, index):
+ index = self.indices[index] # linear, shuffled, or image_weights
+
+ hyp = self.hyp
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
+ masks = []
+ if mosaic:
+ # Load mosaic
+ img, labels, segments = self.load_mosaic(index)
+ shapes = None
+
+ # MixUp augmentation
+ if random.random() < hyp["mixup"]:
+ img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1)))
+
+ else:
+ # Load image
+ img, (h0, w0), (h, w) = self.load_image(index)
+
+ # Letterbox
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
+
+ labels = self.labels[index].copy()
+ # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy
+ segments = self.segments[index].copy()
+ if len(segments):
+ for i_s in range(len(segments)):
+ segments[i_s] = xyn2xy(
+ segments[i_s],
+ ratio[0] * w,
+ ratio[1] * h,
+ padw=pad[0],
+ padh=pad[1],
+ )
+ if labels.size: # normalized xywh to pixel xyxy format
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
+
+ if self.augment:
+ img, labels, segments = random_perspective(
+ img,
+ labels,
+ segments=segments,
+ degrees=hyp["degrees"],
+ translate=hyp["translate"],
+ scale=hyp["scale"],
+ shear=hyp["shear"],
+ perspective=hyp["perspective"],
+ return_seg=True,
+ )
+
+ nl = len(labels) # number of labels
+ if nl:
+ labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3)
+ if self.overlap:
+ masks, sorted_idx = polygons2masks_overlap(img.shape[:2],
+ segments,
+ downsample_ratio=self.downsample_ratio)
+ masks = masks[None] # (640, 640) -> (1, 640, 640)
+ labels = labels[sorted_idx]
+ else:
+ masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio)
+
+ masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] //
+ self.downsample_ratio, img.shape[1] //
+ self.downsample_ratio))
+ # TODO: albumentations support
+ if self.augment:
+ # Albumentations
+ # there are some augmentation that won't change boxes and masks,
+ # so just be it for now.
+ img, labels = self.albumentations(img, labels)
+ nl = len(labels) # update after albumentations
+
+ # HSV color-space
+ augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"])
+
+ # Flip up-down
+ if random.random() < hyp["flipud"]:
+ img = np.flipud(img)
+ if nl:
+ labels[:, 2] = 1 - labels[:, 2]
+ masks = torch.flip(masks, dims=[1])
+
+ # Flip left-right
+ if random.random() < hyp["fliplr"]:
+ img = np.fliplr(img)
+ if nl:
+ labels[:, 1] = 1 - labels[:, 1]
+ masks = torch.flip(masks, dims=[2])
+
+ # Cutouts # labels = cutout(img, labels, p=0.5)
+
+ labels_out = torch.zeros((nl, 6))
+ if nl:
+ labels_out[:, 1:] = torch.from_numpy(labels)
+
+ # Convert
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
+ img = np.ascontiguousarray(img)
+
+ return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks)
+
+ def load_mosaic(self, index):
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
+ labels4, segments4 = [], []
+ s = self.img_size
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
+
+ # 3 additional image indices
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
+ for i, index in enumerate(indices):
+ # Load image
+ img, _, (h, w) = self.load_image(index)
+
+ # place img in img4
+ if i == 0: # top left
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
+ elif i == 1: # top right
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
+ elif i == 2: # bottom left
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
+ elif i == 3: # bottom right
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
+
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
+ padw = x1a - x1b
+ padh = y1a - y1b
+
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
+
+ if labels.size:
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
+ labels4.append(labels)
+ segments4.extend(segments)
+
+ # Concat/clip labels
+ labels4 = np.concatenate(labels4, 0)
+ for x in (labels4[:, 1:], *segments4):
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
+ # img4, labels4 = replicate(img4, labels4) # replicate
+
+ # Augment
+ img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"])
+ img4, labels4, segments4 = random_perspective(img4,
+ labels4,
+ segments4,
+ degrees=self.hyp["degrees"],
+ translate=self.hyp["translate"],
+ scale=self.hyp["scale"],
+ shear=self.hyp["shear"],
+ perspective=self.hyp["perspective"],
+ border=self.mosaic_border) # border to remove
+ return img4, labels4, segments4
+
+ @staticmethod
+ def collate_fn(batch):
+ img, label, path, shapes, masks = zip(*batch) # transposed
+ batched_masks = torch.cat(masks, 0)
+ for i, l in enumerate(label):
+ l[:, 0] = i # add target image index for build_targets()
+ return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks
+
+
+def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
+ """
+ Args:
+ img_size (tuple): The image size.
+ polygons (np.ndarray): [N, M], N is the number of polygons,
+ M is the number of points(Be divided by 2).
+ """
+ mask = np.zeros(img_size, dtype=np.uint8)
+ polygons = np.asarray(polygons)
+ polygons = polygons.astype(np.int32)
+ shape = polygons.shape
+ polygons = polygons.reshape(shape[0], -1, 2)
+ cv2.fillPoly(mask, polygons, color=color)
+ nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
+ # NOTE: fillPoly firstly then resize is trying the keep the same way
+ # of loss calculation when mask-ratio=1.
+ mask = cv2.resize(mask, (nw, nh))
+ return mask
+
+
+def polygons2masks(img_size, polygons, color, downsample_ratio=1):
+ """
+ Args:
+ img_size (tuple): The image size.
+ polygons (list[np.ndarray]): each polygon is [N, M],
+ N is the number of polygons,
+ M is the number of points(Be divided by 2).
+ """
+ masks = []
+ for si in range(len(polygons)):
+ mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio)
+ masks.append(mask)
+ return np.array(masks)
+
+
+def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
+ """Return a (640, 640) overlap mask."""
+ masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8)
+ areas = []
+ ms = []
+ for si in range(len(segments)):
+ mask = polygon2mask(
+ img_size,
+ [segments[si].reshape(-1)],
+ downsample_ratio=downsample_ratio,
+ color=1,
+ )
+ ms.append(mask)
+ areas.append(mask.sum())
+ areas = np.asarray(areas)
+ index = np.argsort(-areas)
+ ms = np.array(ms)[index]
+ for i in range(len(segments)):
+ mask = ms[i] * (i + 1)
+ masks = masks + mask
+ masks = np.clip(masks, a_min=0, a_max=i + 1)
+ return masks, index
diff --git a/yolov5/utils/segment/general.py b/yolov5/utils/segment/general.py
new file mode 100644
index 0000000..36547ed
--- /dev/null
+++ b/yolov5/utils/segment/general.py
@@ -0,0 +1,120 @@
+import cv2
+import torch
+import torch.nn.functional as F
+
+
+def crop_mask(masks, boxes):
+ """
+ "Crop" predicted masks by zeroing out everything not in the predicted bbox.
+ Vectorized by Chong (thanks Chong).
+
+ Args:
+ - masks should be a size [h, w, n] tensor of masks
+ - boxes should be a size [n, 4] tensor of bbox coords in relative point form
+ """
+
+ n, h, w = masks.shape
+ x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
+ r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
+ c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
+
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
+
+
+def process_mask_upsample(protos, masks_in, bboxes, shape):
+ """
+ Crop after upsample.
+ proto_out: [mask_dim, mask_h, mask_w]
+ out_masks: [n, mask_dim], n is number of masks after nms
+ bboxes: [n, 4], n is number of masks after nms
+ shape:input_image_size, (h, w)
+
+ return: h, w, n
+ """
+
+ c, mh, mw = protos.shape # CHW
+ masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
+ masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
+ masks = crop_mask(masks, bboxes) # CHW
+ return masks.gt_(0.5)
+
+
+def process_mask(protos, masks_in, bboxes, shape, upsample=False):
+ """
+ Crop before upsample.
+ proto_out: [mask_dim, mask_h, mask_w]
+ out_masks: [n, mask_dim], n is number of masks after nms
+ bboxes: [n, 4], n is number of masks after nms
+ shape:input_image_size, (h, w)
+
+ return: h, w, n
+ """
+
+ c, mh, mw = protos.shape # CHW
+ ih, iw = shape
+ masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
+
+ downsampled_bboxes = bboxes.clone()
+ downsampled_bboxes[:, 0] *= mw / iw
+ downsampled_bboxes[:, 2] *= mw / iw
+ downsampled_bboxes[:, 3] *= mh / ih
+ downsampled_bboxes[:, 1] *= mh / ih
+
+ masks = crop_mask(masks, downsampled_bboxes) # CHW
+ if upsample:
+ masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
+ return masks.gt_(0.5)
+
+
+def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
+ """
+ img1_shape: model input shape, [h, w]
+ img0_shape: origin pic shape, [h, w, 3]
+ masks: [h, w, num]
+ """
+ # Rescale coordinates (xyxy) from im1_shape to im0_shape
+ if ratio_pad is None: # calculate from im0_shape
+ gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
+ pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
+ else:
+ pad = ratio_pad[1]
+ top, left = int(pad[1]), int(pad[0]) # y, x
+ bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
+
+ if len(masks.shape) < 2:
+ raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
+ masks = masks[top:bottom, left:right]
+ # masks = masks.permute(2, 0, 1).contiguous()
+ # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
+ # masks = masks.permute(1, 2, 0).contiguous()
+ masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
+
+ if len(masks.shape) == 2:
+ masks = masks[:, :, None]
+ return masks
+
+
+def mask_iou(mask1, mask2, eps=1e-7):
+ """
+ mask1: [N, n] m1 means number of predicted objects
+ mask2: [M, n] m2 means number of gt objects
+ Note: n means image_w x image_h
+
+ return: masks iou, [N, M]
+ """
+ intersection = torch.matmul(mask1, mask2.t()).clamp(0)
+ union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
+ return intersection / (union + eps)
+
+
+def masks_iou(mask1, mask2, eps=1e-7):
+ """
+ mask1: [N, n] m1 means number of predicted objects
+ mask2: [N, n] m2 means number of gt objects
+ Note: n means image_w x image_h
+
+ return: masks iou, (N, )
+ """
+ intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
+ union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
+ return intersection / (union + eps)
diff --git a/yolov5/utils/segment/loss.py b/yolov5/utils/segment/loss.py
new file mode 100644
index 0000000..b45b2c2
--- /dev/null
+++ b/yolov5/utils/segment/loss.py
@@ -0,0 +1,186 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..general import xywh2xyxy
+from ..loss import FocalLoss, smooth_BCE
+from ..metrics import bbox_iou
+from ..torch_utils import de_parallel
+from .general import crop_mask
+
+
+class ComputeLoss:
+ # Compute losses
+ def __init__(self, model, autobalance=False, overlap=False):
+ self.sort_obj_iou = False
+ self.overlap = overlap
+ device = next(model.parameters()).device # get model device
+ h = model.hyp # hyperparameters
+ self.device = device
+
+ # Define criteria
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
+
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
+
+ # Focal loss
+ g = h['fl_gamma'] # focal loss gamma
+ if g > 0:
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
+
+ m = de_parallel(model).model[-1] # Detect() module
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
+ self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
+ self.na = m.na # number of anchors
+ self.nc = m.nc # number of classes
+ self.nl = m.nl # number of layers
+ self.nm = m.nm # number of masks
+ self.anchors = m.anchors
+ self.device = device
+
+ def __call__(self, preds, targets, masks): # predictions, targets, model
+ p, proto = preds
+ bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
+ lcls = torch.zeros(1, device=self.device)
+ lbox = torch.zeros(1, device=self.device)
+ lobj = torch.zeros(1, device=self.device)
+ lseg = torch.zeros(1, device=self.device)
+ tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets
+
+ # Losses
+ for i, pi in enumerate(p): # layer index, layer predictions
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
+ tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
+
+ n = b.shape[0] # number of targets
+ if n:
+ pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions
+
+ # Box regression
+ pxy = pxy.sigmoid() * 2 - 0.5
+ pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
+ iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target)
+ lbox += (1.0 - iou).mean() # iou loss
+
+ # Objectness
+ iou = iou.detach().clamp(0).type(tobj.dtype)
+ if self.sort_obj_iou:
+ j = iou.argsort()
+ b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
+ if self.gr < 1:
+ iou = (1.0 - self.gr) + self.gr * iou
+ tobj[b, a, gj, gi] = iou # iou ratio
+
+ # Classification
+ if self.nc > 1: # cls loss (only if multiple classes)
+ t = torch.full_like(pcls, self.cn, device=self.device) # targets
+ t[range(n), tcls[i]] = self.cp
+ lcls += self.BCEcls(pcls, t) # BCE
+
+ # Mask regression
+ if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
+ masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
+ marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized
+ mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device))
+ for bi in b.unique():
+ j = b == bi # matching index
+ if self.overlap:
+ mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0)
+ else:
+ mask_gti = masks[tidxs[i]][j]
+ lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j])
+
+ obji = self.BCEobj(pi[..., 4], tobj)
+ lobj += obji * self.balance[i] # obj loss
+ if self.autobalance:
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
+
+ if self.autobalance:
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
+ lbox *= self.hyp["box"]
+ lobj *= self.hyp["obj"]
+ lcls *= self.hyp["cls"]
+ lseg *= self.hyp["box"] / bs
+
+ loss = lbox + lobj + lcls + lseg
+ return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach()
+
+ def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
+ # Mask loss for one image
+ pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80)
+ loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
+ return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
+
+ def build_targets(self, p, targets):
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
+ tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], []
+ gain = torch.ones(8, device=self.device) # normalized to gridspace gain
+ ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
+ if self.overlap:
+ batch = p[0].shape[0]
+ ti = []
+ for i in range(batch):
+ num = (targets[:, 0] == i).sum() # find number of targets of each image
+ ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num)
+ ti = torch.cat(ti, 1) # (na, nt)
+ else:
+ ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1)
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices
+
+ g = 0.5 # bias
+ off = torch.tensor(
+ [
+ [0, 0],
+ [1, 0],
+ [0, 1],
+ [-1, 0],
+ [0, -1], # j,k,l,m
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
+ ],
+ device=self.device).float() * g # offsets
+
+ for i in range(self.nl):
+ anchors, shape = self.anchors[i], p[i].shape
+ gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
+
+ # Match targets to anchors
+ t = targets * gain # shape(3,n,7)
+ if nt:
+ # Matches
+ r = t[..., 4:6] / anchors[:, None] # wh ratio
+ j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
+ t = t[j] # filter
+
+ # Offsets
+ gxy = t[:, 2:4] # grid xy
+ gxi = gain[[2, 3]] - gxy # inverse
+ j, k = ((gxy % 1 < g) & (gxy > 1)).T
+ l, m = ((gxi % 1 < g) & (gxi > 1)).T
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
+ t = t.repeat((5, 1, 1))[j]
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
+ else:
+ t = targets[0]
+ offsets = 0
+
+ # Define
+ bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors
+ (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class
+ gij = (gxy - offsets).long()
+ gi, gj = gij.T # grid indices
+
+ # Append
+ indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
+ anch.append(anchors[a]) # anchors
+ tcls.append(c) # class
+ tidxs.append(tidx)
+ xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized
+
+ return tcls, tbox, indices, anch, tidxs, xywhn
diff --git a/yolov5/utils/segment/metrics.py b/yolov5/utils/segment/metrics.py
new file mode 100644
index 0000000..b09ce23
--- /dev/null
+++ b/yolov5/utils/segment/metrics.py
@@ -0,0 +1,210 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Model validation metrics
+"""
+
+import numpy as np
+
+from ..metrics import ap_per_class
+
+
+def fitness(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
+ return (x[:, :8] * w).sum(1)
+
+
+def ap_per_class_box_and_mask(
+ tp_m,
+ tp_b,
+ conf,
+ pred_cls,
+ target_cls,
+ plot=False,
+ save_dir=".",
+ names=(),
+):
+ """
+ Args:
+ tp_b: tp of boxes.
+ tp_m: tp of masks.
+ other arguments see `func: ap_per_class`.
+ """
+ results_boxes = ap_per_class(tp_b,
+ conf,
+ pred_cls,
+ target_cls,
+ plot=plot,
+ save_dir=save_dir,
+ names=names,
+ prefix="Box")[2:]
+ results_masks = ap_per_class(tp_m,
+ conf,
+ pred_cls,
+ target_cls,
+ plot=plot,
+ save_dir=save_dir,
+ names=names,
+ prefix="Mask")[2:]
+
+ results = {
+ "boxes": {
+ "p": results_boxes[0],
+ "r": results_boxes[1],
+ "ap": results_boxes[3],
+ "f1": results_boxes[2],
+ "ap_class": results_boxes[4]},
+ "masks": {
+ "p": results_masks[0],
+ "r": results_masks[1],
+ "ap": results_masks[3],
+ "f1": results_masks[2],
+ "ap_class": results_masks[4]}}
+ return results
+
+
+class Metric:
+
+ def __init__(self) -> None:
+ self.p = [] # (nc, )
+ self.r = [] # (nc, )
+ self.f1 = [] # (nc, )
+ self.all_ap = [] # (nc, 10)
+ self.ap_class_index = [] # (nc, )
+
+ @property
+ def ap50(self):
+ """AP@0.5 of all classes.
+ Return:
+ (nc, ) or [].
+ """
+ return self.all_ap[:, 0] if len(self.all_ap) else []
+
+ @property
+ def ap(self):
+ """AP@0.5:0.95
+ Return:
+ (nc, ) or [].
+ """
+ return self.all_ap.mean(1) if len(self.all_ap) else []
+
+ @property
+ def mp(self):
+ """mean precision of all classes.
+ Return:
+ float.
+ """
+ return self.p.mean() if len(self.p) else 0.0
+
+ @property
+ def mr(self):
+ """mean recall of all classes.
+ Return:
+ float.
+ """
+ return self.r.mean() if len(self.r) else 0.0
+
+ @property
+ def map50(self):
+ """Mean AP@0.5 of all classes.
+ Return:
+ float.
+ """
+ return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
+
+ @property
+ def map(self):
+ """Mean AP@0.5:0.95 of all classes.
+ Return:
+ float.
+ """
+ return self.all_ap.mean() if len(self.all_ap) else 0.0
+
+ def mean_results(self):
+ """Mean of results, return mp, mr, map50, map"""
+ return (self.mp, self.mr, self.map50, self.map)
+
+ def class_result(self, i):
+ """class-aware result, return p[i], r[i], ap50[i], ap[i]"""
+ return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
+
+ def get_maps(self, nc):
+ maps = np.zeros(nc) + self.map
+ for i, c in enumerate(self.ap_class_index):
+ maps[c] = self.ap[i]
+ return maps
+
+ def update(self, results):
+ """
+ Args:
+ results: tuple(p, r, ap, f1, ap_class)
+ """
+ p, r, all_ap, f1, ap_class_index = results
+ self.p = p
+ self.r = r
+ self.all_ap = all_ap
+ self.f1 = f1
+ self.ap_class_index = ap_class_index
+
+
+class Metrics:
+ """Metric for boxes and masks."""
+
+ def __init__(self) -> None:
+ self.metric_box = Metric()
+ self.metric_mask = Metric()
+
+ def update(self, results):
+ """
+ Args:
+ results: Dict{'boxes': Dict{}, 'masks': Dict{}}
+ """
+ self.metric_box.update(list(results["boxes"].values()))
+ self.metric_mask.update(list(results["masks"].values()))
+
+ def mean_results(self):
+ return self.metric_box.mean_results() + self.metric_mask.mean_results()
+
+ def class_result(self, i):
+ return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
+
+ def get_maps(self, nc):
+ return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
+
+ @property
+ def ap_class_index(self):
+ # boxes and masks have the same ap_class_index
+ return self.metric_box.ap_class_index
+
+
+KEYS = [
+ "train/box_loss",
+ "train/seg_loss", # train loss
+ "train/obj_loss",
+ "train/cls_loss",
+ "metrics/precision(B)",
+ "metrics/recall(B)",
+ "metrics/mAP_0.5(B)",
+ "metrics/mAP_0.5:0.95(B)", # metrics
+ "metrics/precision(M)",
+ "metrics/recall(M)",
+ "metrics/mAP_0.5(M)",
+ "metrics/mAP_0.5:0.95(M)", # metrics
+ "val/box_loss",
+ "val/seg_loss", # val loss
+ "val/obj_loss",
+ "val/cls_loss",
+ "x/lr0",
+ "x/lr1",
+ "x/lr2",]
+
+BEST_KEYS = [
+ "best/epoch",
+ "best/precision(B)",
+ "best/recall(B)",
+ "best/mAP_0.5(B)",
+ "best/mAP_0.5:0.95(B)",
+ "best/precision(M)",
+ "best/recall(M)",
+ "best/mAP_0.5(M)",
+ "best/mAP_0.5:0.95(M)",]
diff --git a/yolov5/utils/segment/plots.py b/yolov5/utils/segment/plots.py
new file mode 100644
index 0000000..e882c14
--- /dev/null
+++ b/yolov5/utils/segment/plots.py
@@ -0,0 +1,143 @@
+import contextlib
+import math
+from pathlib import Path
+
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import torch
+
+from .. import threaded
+from ..general import xywh2xyxy
+from ..plots import Annotator, colors
+
+
+@threaded
+def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):
+ # Plot image grid with labels
+ if isinstance(images, torch.Tensor):
+ images = images.cpu().float().numpy()
+ if isinstance(targets, torch.Tensor):
+ targets = targets.cpu().numpy()
+ if isinstance(masks, torch.Tensor):
+ masks = masks.cpu().numpy().astype(int)
+
+ max_size = 1920 # max image size
+ max_subplots = 16 # max image subplots, i.e. 4x4
+ bs, _, h, w = images.shape # batch size, _, height, width
+ bs = min(bs, max_subplots) # limit plot images
+ ns = np.ceil(bs ** 0.5) # number of subplots (square)
+ if np.max(images[0]) <= 1:
+ images *= 255 # de-normalise (optional)
+
+ # Build Image
+ mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
+ for i, im in enumerate(images):
+ if i == max_subplots: # if last batch has fewer images than we expect
+ break
+ x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
+ im = im.transpose(1, 2, 0)
+ mosaic[y:y + h, x:x + w, :] = im
+
+ # Resize (optional)
+ scale = max_size / ns / max(h, w)
+ if scale < 1:
+ h = math.ceil(scale * h)
+ w = math.ceil(scale * w)
+ mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
+
+ # Annotate
+ fs = int((h + w) * ns * 0.01) # font size
+ annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
+ for i in range(i + 1):
+ x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
+ annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
+ if paths:
+ annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
+ if len(targets) > 0:
+ idx = targets[:, 0] == i
+ ti = targets[idx] # image targets
+
+ boxes = xywh2xyxy(ti[:, 2:6]).T
+ classes = ti[:, 1].astype('int')
+ labels = ti.shape[1] == 6 # labels if no conf column
+ conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
+
+ if boxes.shape[1]:
+ if boxes.max() <= 1.01: # if normalized with tolerance 0.01
+ boxes[[0, 2]] *= w # scale to pixels
+ boxes[[1, 3]] *= h
+ elif scale < 1: # absolute coords need scale if image scales
+ boxes *= scale
+ boxes[[0, 2]] += x
+ boxes[[1, 3]] += y
+ for j, box in enumerate(boxes.T.tolist()):
+ cls = classes[j]
+ color = colors(cls)
+ cls = names[cls] if names else cls
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
+ label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
+ annotator.box_label(box, label, color=color)
+
+ # Plot masks
+ if len(masks):
+ if masks.max() > 1.0: # mean that masks are overlap
+ image_masks = masks[[i]] # (1, 640, 640)
+ nl = len(ti)
+ index = np.arange(nl).reshape(nl, 1, 1) + 1
+ image_masks = np.repeat(image_masks, nl, axis=0)
+ image_masks = np.where(image_masks == index, 1.0, 0.0)
+ else:
+ image_masks = masks[idx]
+
+ im = np.asarray(annotator.im).copy()
+ for j, box in enumerate(boxes.T.tolist()):
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
+ color = colors(classes[j])
+ mh, mw = image_masks[j].shape
+ if mh != h or mw != w:
+ mask = image_masks[j].astype(np.uint8)
+ mask = cv2.resize(mask, (w, h))
+ mask = mask.astype(np.bool)
+ else:
+ mask = image_masks[j].astype(np.bool)
+ with contextlib.suppress(Exception):
+ im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6
+ annotator.fromarray(im)
+ annotator.im.save(fname) # save
+
+
+def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):
+ # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
+ save_dir = Path(file).parent if file else Path(dir)
+ fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
+ ax = ax.ravel()
+ files = list(save_dir.glob("results*.csv"))
+ assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
+ for f in files:
+ try:
+ data = pd.read_csv(f)
+ index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] +
+ 0.1 * data.values[:, 11])
+ s = [x.strip() for x in data.columns]
+ x = data.values[:, 0]
+ for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]):
+ y = data.values[:, j]
+ # y[y == 0] = np.nan # don't show zero values
+ ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2)
+ if best:
+ # best
+ ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3)
+ ax[i].set_title(s[j] + f"\n{round(y[index], 5)}")
+ else:
+ # last
+ ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3)
+ ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}")
+ # if j in [8, 9, 10]: # share train and val loss y axes
+ # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
+ except Exception as e:
+ print(f"Warning: Plotting error for {f}: {e}")
+ ax[1].legend()
+ fig.savefig(save_dir / "results.png", dpi=200)
+ plt.close()
diff --git a/yolov5/utils/torch_utils.py b/yolov5/utils/torch_utils.py
index db822a4..f392d44 100644
--- a/yolov5/utils/torch_utils.py
+++ b/yolov5/utils/torch_utils.py
@@ -18,8 +18,8 @@
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
-from yolov5.utils.general import (LOGGER, check_version, colorstr, file_date,
- git_describe)
+
+from yolov5.utils.general import LOGGER, check_version, colorstr, file_date, git_describe
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
@@ -34,6 +34,23 @@
warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling')
+def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):
+ # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator
+ def decorate(fn):
+ return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)
+
+ return decorate
+
+
+def smartCrossEntropyLoss(label_smoothing=0.0):
+ # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0
+ if check_version(torch.__version__, '1.10.0'):
+ return nn.CrossEntropyLoss(label_smoothing=label_smoothing)
+ if label_smoothing > 0:
+ LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0')
+ return nn.CrossEntropyLoss()
+
+
def smart_DDP(model):
# Model DDP creation with checks
assert not check_version(torch.__version__, '1.12.0', pinned=True), \
@@ -45,6 +62,28 @@ def smart_DDP(model):
return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
+def reshape_classifier_output(model, n=1000):
+ # Update a TorchVision classification model to class count 'n' if required
+ from models.common import Classify
+ name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module
+ if isinstance(m, Classify): # YOLOv5 Classify() head
+ if m.linear.out_features != n:
+ m.linear = nn.Linear(m.linear.in_features, n)
+ elif isinstance(m, nn.Linear): # ResNet, EfficientNet
+ if m.out_features != n:
+ setattr(model, name, nn.Linear(m.in_features, n))
+ elif isinstance(m, nn.Sequential):
+ types = [type(x) for x in m]
+ if nn.Linear in types:
+ i = types.index(nn.Linear) # nn.Linear index
+ if m[i].out_features != n:
+ m[i] = nn.Linear(m[i].in_features, n)
+ elif nn.Conv2d in types:
+ i = types.index(nn.Conv2d) # nn.Conv2d index
+ if m[i].out_channels != n:
+ m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias)
+
+
@contextmanager
def torch_distributed_zero_first(local_rank: int):
# Decorator to make all processes in distributed training wait for each local_master to do something
@@ -78,7 +117,7 @@ def select_device(device='', batch_size=0, newline=True):
assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
- if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available
+ if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size > 0: # check batch_size is divisible by device_count
@@ -97,7 +136,7 @@ def select_device(device='', batch_size=0, newline=True):
if not newline:
s = s.rstrip()
- LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
+ LOGGER.info(s)
return torch.device(arg)
@@ -109,14 +148,13 @@ def time_sync():
def profile(input, ops, n=10, device=None):
- # YOLOv5 speed/memory/FLOPs profiler
- #
- # Usage:
- # input = torch.randn(16, 3, 640, 640)
- # m1 = lambda x: x * torch.sigmoid(x)
- # m2 = nn.SiLU()
- # profile(input, [m1, m2], n=100) # profile over 100 iterations
-
+ """ YOLOv5 speed/memory/FLOPs profiler
+ Usage:
+ input = torch.randn(16, 3, 640, 640)
+ m1 = lambda x: x * torch.sigmoid(x)
+ m2 = nn.SiLU()
+ profile(input, [m1, m2], n=100) # profile over 100 iterations
+ """
results = []
if not isinstance(device, torch.device):
device = select_device(device)
@@ -199,12 +237,11 @@ def sparsity(model):
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
- print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
- print(' %.3g global sparsity' % sparsity(model))
+ LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity')
def fuse_conv_and_bn(conv, bn):
@@ -214,6 +251,7 @@ def fuse_conv_and_bn(conv, bn):
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
+ dilation=conv.dilation,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
@@ -230,7 +268,7 @@ def fuse_conv_and_bn(conv, bn):
return fusedconv
-def model_info(model, verbose=False, img_size=640):
+def model_info(model, verbose=False, imgsz=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
@@ -242,12 +280,12 @@ def model_info(model, verbose=False, img_size=640):
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPs
- from thop import profile
- stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
- img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
- flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
- img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
- fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
+ p = next(model.parameters())
+ stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
+ im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
+ flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
+ imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
+ fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs
except Exception:
fs = ''
@@ -276,7 +314,7 @@ def copy_attr(a, b, include=(), exclude=()):
setattr(a, k, v)
-def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-5):
+def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5):
# YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay
g = [], [], [] # optimizer parameter groups
bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d()
@@ -299,13 +337,25 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-
else:
raise NotImplementedError(f'Optimizer {name} not implemented.')
- optimizer.add_param_group({'params': g[0], 'weight_decay': weight_decay}) # add g0 with weight_decay
+ optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay
optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights)
- LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
- f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias")
+ LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups "
+ f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias")
return optimizer
+def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs):
+ # YOLOv5 torch.hub.load() wrapper with smart error/issue handling
+ if check_version(torch.__version__, '1.9.1'):
+ kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors
+ if check_version(torch.__version__, '1.12.0'):
+ kwargs['trust_repo'] = True # argument required starting in torch 0.12
+ try:
+ return torch.hub.load(repo, model, **kwargs)
+ except Exception:
+ return torch.hub.load(repo, model, force_reload=True, **kwargs)
+
+
def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True):
# Resume training from a partially trained checkpoint
best_fitness = 0.0
@@ -358,8 +408,6 @@ class ModelEMA:
def __init__(self, model, decay=0.9999, tau=2000, updates=0):
# Create EMA
self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
- # if next(model.parameters()).device.type != 'cpu':
- # self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
@@ -367,15 +415,15 @@ def __init__(self, model, decay=0.9999, tau=2000, updates=0):
def update(self, model):
# Update EMA parameters
- with torch.no_grad():
- self.updates += 1
- d = self.decay(self.updates)
-
- msd = de_parallel(model).state_dict() # model state_dict
- for k, v in self.ema.state_dict().items():
- if v.dtype.is_floating_point:
- v *= d
- v += (1 - d) * msd[k].detach()
+ self.updates += 1
+ d = self.decay(self.updates)
+
+ msd = de_parallel(model).state_dict() # model state_dict
+ for k, v in self.ema.state_dict().items():
+ if v.dtype.is_floating_point: # true for FP16 and FP32
+ v *= d
+ v += (1 - d) * msd[k].detach()
+ # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32'
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
diff --git a/yolov5/val.py b/yolov5/val.py
index 6fdc4cd..810161e 100644
--- a/yolov5/val.py
+++ b/yolov5/val.py
@@ -1,21 +1,22 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
-Validate a trained YOLOv5 model accuracy on a custom dataset
+Validate a trained YOLOv5 detection model on a detection dataset
Usage:
$ yolov5 val --weights yolov5s.pt --data coco128.yaml --img 640
Usage - formats:
- $ yolov5 val --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s.xml # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (macOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ $ yolov5 val --weights yolov5s.pt # PyTorch
+ yolov5s.torchscript # TorchScript
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s.xml # OpenVINO
+ yolov5s.engine # TensorRT
+ yolov5s.mlmodel # CoreML (macOS-only)
+ yolov5s_saved_model # TensorFlow SavedModel
+ yolov5s.pb # TensorFlow GraphDef
+ yolov5s.tflite # TensorFlow Lite
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s_paddle_model # PaddlePaddle
"""
import argparse
@@ -35,15 +36,15 @@
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.callbacks import Callbacks
from yolov5.utils.dataloaders import create_dataloader
-from yolov5.utils.general import (LOGGER, check_dataset, check_img_size,
- check_requirements, check_yaml,
- coco80_to_coco91_class, colorstr, emojis,
+from yolov5.utils.general import (LOGGER, Profile, check_dataset,
+ check_img_size, check_requirements,
+ check_yaml, coco80_to_coco91_class, colorstr,
increment_path, non_max_suppression,
print_args, scale_coords, xywh2xyxy,
xyxy2xywh)
from yolov5.utils.metrics import ConfusionMatrix, ap_per_class, box_iou
from yolov5.utils.plots import output_to_target, plot_images, plot_val_study
-from yolov5.utils.torch_utils import select_device, time_sync
+from yolov5.utils.torch_utils import select_device, smart_inference_mode
def save_one_txt(predn, save_conf, shape, file):
@@ -71,12 +72,12 @@ def save_one_json(predn, jdict, path, class_map):
def process_batch(detections, labels, iouv):
"""
- Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
+ Return correct prediction matrix
Arguments:
- detections (Array[N, 6]), x1, y1, x2, y2, conf, class
- labels (Array[M, 5]), class, x1, y1, x2, y2
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
+ labels (array[M, 5]), class, x1, y1, x2, y2
Returns:
- correct (Array[N, 10]), for 10 IoU levels
+ correct (array[N, 10]), for 10 IoU levels
"""
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
iou = box_iou(labels[:, 1:], detections[:, :4])
@@ -94,7 +95,7 @@ def process_batch(detections, labels, iouv):
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
-@torch.no_grad()
+@smart_inference_mode()
def run(
data,
weights=None, # model.pt path(s)
@@ -104,6 +105,7 @@ def run(
img=None, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
+ max_det=300, # maximum detections per image
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
workers=8, # max dataloader workers (per RANK in DDP mode)
@@ -144,6 +146,7 @@ def run(
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
+ half &= device.type != 'cpu' # half precision only supported on CUDA
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
@@ -167,7 +170,7 @@ def run(
# Configure
model.eval()
- cuda = device.type != 'cpu'
+ half = cuda = device.type != 'cpu'
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
@@ -195,43 +198,49 @@ def run(
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
- names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names))
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
+ if isinstance(names, (list, tuple)): # old format
+ names = dict(enumerate(names))
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
- s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
- dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
+ dt = Profile(), Profile(), Profile() # profiling times
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
callbacks.run('on_val_start')
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
callbacks.run('on_val_batch_start')
- t1 = time_sync()
- if cuda:
- im = im.to(device, non_blocking=True)
- targets = targets.to(device)
- im = im.half() if half else im.float() # uint8 to fp16/32
- im /= 255 # 0 - 255 to 0.0 - 1.0
- nb, _, height, width = im.shape # batch size, channels, height, width
- t2 = time_sync()
- dt[0] += t2 - t1
+ with dt[0]:
+ if cuda:
+ im = im.to(device, non_blocking=True)
+ targets = targets.to(device)
+ im = im.half() if half else im.float() # uint8 to fp16/32
+ im /= 255 # 0 - 255 to 0.0 - 1.0
+ nb, _, height, width = im.shape # batch size, channels, height, width
# Inference
- out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
- dt[1] += time_sync() - t2
+ with dt[1]:
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
# Loss
if compute_loss:
- loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
+ loss += compute_loss(train_out, targets)[1] # box, obj, cls
# NMS
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
- t3 = time_sync()
- out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
- dt[2] += time_sync() - t3
+ with dt[2]:
+ preds = non_max_suppression(preds,
+ conf_thres,
+ iou_thres,
+ labels=lb,
+ multi_label=True,
+ agnostic=single_cls,
+ max_det=max_det)
# Metrics
- for si, pred in enumerate(out):
+ for si, pred in enumerate(preds):
labels = targets[targets[:, 0] == si, 1:]
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
path, shape = Path(paths[si]), shapes[si][0]
@@ -271,9 +280,9 @@ def run(
# Plot images
if plots and batch_i < 3:
plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
- plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
- callbacks.run('on_val_batch_end')
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
# Compute metrics
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
@@ -284,10 +293,10 @@ def run(
nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
# Print results
- pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
if nt.sum() == 0:
- LOGGER.warning(emojis(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️'))
+ LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
@@ -308,7 +317,7 @@ def run(
text_file.close()
# Print speeds
- t = tuple(x / seen * 1E3 for x in dt) # speeds per image
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
@@ -316,7 +325,7 @@ def run(
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
- callbacks.run('on_val_end')
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
# Save JSON
if save_json and len(jdict):
@@ -328,7 +337,7 @@ def run(
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
- check_requirements(['pycocotools'])
+ check_requirements('pycocotools')
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
@@ -361,11 +370,12 @@ def run(
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
- parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
@@ -391,11 +401,13 @@ def parse_opt():
def main():
opt = parse_opt()
- #check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+ #check_requirements(exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
- LOGGER.info(emojis(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️'))
+ LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️')
+ if opt.save_hybrid:
+ LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️')
run(**vars(opt))
else: